]>
Commit | Line | Data |
---|---|---|
60a11774 SR |
1 | /* Include in trace.c */ |
2 | ||
3 | #include <linux/kthread.h> | |
c7aafc54 | 4 | #include <linux/delay.h> |
60a11774 | 5 | |
9ff9cdb2 | 6 | static notrace inline int trace_valid_entry(struct trace_entry *entry) |
60a11774 SR |
7 | { |
8 | switch (entry->type) { | |
9 | case TRACE_FN: | |
10 | case TRACE_CTX: | |
11 | return 1; | |
12 | } | |
13 | return 0; | |
14 | } | |
15 | ||
16 | static int | |
17 | trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) | |
18 | { | |
60a11774 | 19 | struct trace_entry *entries; |
c7aafc54 | 20 | struct page *page; |
60a11774 SR |
21 | int idx = 0; |
22 | int i; | |
23 | ||
c7aafc54 | 24 | BUG_ON(list_empty(&data->trace_pages)); |
60a11774 SR |
25 | page = list_entry(data->trace_pages.next, struct page, lru); |
26 | entries = page_address(page); | |
27 | ||
c7aafc54 | 28 | if (head_page(data) != entries) |
60a11774 SR |
29 | goto failed; |
30 | ||
31 | /* | |
32 | * The starting trace buffer always has valid elements, | |
c7aafc54 | 33 | * if any element exists. |
60a11774 | 34 | */ |
c7aafc54 | 35 | entries = head_page(data); |
60a11774 SR |
36 | |
37 | for (i = 0; i < tr->entries; i++) { | |
38 | ||
c7aafc54 IM |
39 | if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) { |
40 | printk(KERN_CONT ".. invalid entry %d ", | |
41 | entries[idx].type); | |
60a11774 SR |
42 | goto failed; |
43 | } | |
44 | ||
45 | idx++; | |
46 | if (idx >= ENTRIES_PER_PAGE) { | |
47 | page = virt_to_page(entries); | |
48 | if (page->lru.next == &data->trace_pages) { | |
49 | if (i != tr->entries - 1) { | |
50 | printk(KERN_CONT ".. entries buffer mismatch"); | |
51 | goto failed; | |
52 | } | |
53 | } else { | |
54 | page = list_entry(page->lru.next, struct page, lru); | |
55 | entries = page_address(page); | |
56 | } | |
57 | idx = 0; | |
58 | } | |
59 | } | |
60 | ||
61 | page = virt_to_page(entries); | |
62 | if (page->lru.next != &data->trace_pages) { | |
63 | printk(KERN_CONT ".. too many entries"); | |
64 | goto failed; | |
65 | } | |
66 | ||
67 | return 0; | |
68 | ||
69 | failed: | |
08bafa0e SR |
70 | /* disable tracing */ |
71 | tracing_disabled = 1; | |
60a11774 SR |
72 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
73 | return -1; | |
74 | } | |
75 | ||
76 | /* | |
77 | * Test the trace buffer to see if all the elements | |
78 | * are still sane. | |
79 | */ | |
80 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |
81 | { | |
82 | unsigned long cnt = 0; | |
83 | int cpu; | |
84 | int ret = 0; | |
85 | ||
86 | for_each_possible_cpu(cpu) { | |
c7aafc54 | 87 | if (!head_page(tr->data[cpu])) |
60a11774 SR |
88 | continue; |
89 | ||
90 | cnt += tr->data[cpu]->trace_idx; | |
60a11774 SR |
91 | |
92 | ret = trace_test_buffer_cpu(tr, tr->data[cpu]); | |
93 | if (ret) | |
94 | break; | |
95 | } | |
96 | ||
97 | if (count) | |
98 | *count = cnt; | |
99 | ||
100 | return ret; | |
101 | } | |
102 | ||
103 | #ifdef CONFIG_FTRACE | |
77a2b37d SR |
104 | |
105 | #ifdef CONFIG_DYNAMIC_FTRACE | |
106 | ||
107 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | |
108 | #define __STR(x) #x | |
109 | #define STR(x) __STR(x) | |
110 | static int DYN_FTRACE_TEST_NAME(void) | |
111 | { | |
112 | /* used to call mcount */ | |
113 | return 0; | |
114 | } | |
115 | ||
116 | /* Test dynamic code modification and ftrace filters */ | |
117 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |
118 | struct trace_array *tr, | |
119 | int (*func)(void)) | |
120 | { | |
121 | unsigned long count; | |
122 | int ret; | |
123 | int save_ftrace_enabled = ftrace_enabled; | |
124 | int save_tracer_enabled = tracer_enabled; | |
125 | ||
126 | /* The ftrace test PASSED */ | |
127 | printk(KERN_CONT "PASSED\n"); | |
128 | pr_info("Testing dynamic ftrace: "); | |
129 | ||
130 | /* enable tracing, and record the filter function */ | |
131 | ftrace_enabled = 1; | |
132 | tracer_enabled = 1; | |
133 | ||
134 | /* passed in by parameter to fool gcc from optimizing */ | |
135 | func(); | |
136 | ||
137 | /* update the records */ | |
138 | ret = ftrace_force_update(); | |
139 | if (ret) { | |
140 | printk(KERN_CONT ".. ftraced failed .. "); | |
141 | return ret; | |
142 | } | |
143 | ||
144 | /* filter only on our function */ | |
145 | ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME), | |
146 | sizeof(STR(DYN_FTRACE_TEST_NAME)), 1); | |
147 | ||
148 | /* enable tracing */ | |
149 | tr->ctrl = 1; | |
150 | trace->init(tr); | |
151 | /* Sleep for a 1/10 of a second */ | |
152 | msleep(100); | |
153 | ||
154 | /* we should have nothing in the buffer */ | |
155 | ret = trace_test_buffer(tr, &count); | |
156 | if (ret) | |
157 | goto out; | |
158 | ||
159 | if (count) { | |
160 | ret = -1; | |
161 | printk(KERN_CONT ".. filter did not filter .. "); | |
162 | goto out; | |
163 | } | |
164 | ||
165 | /* call our function again */ | |
166 | func(); | |
167 | ||
168 | /* sleep again */ | |
169 | msleep(100); | |
170 | ||
171 | /* stop the tracing. */ | |
172 | tr->ctrl = 0; | |
173 | trace->ctrl_update(tr); | |
174 | ftrace_enabled = 0; | |
175 | ||
176 | /* check the trace buffer */ | |
177 | ret = trace_test_buffer(tr, &count); | |
178 | trace->reset(tr); | |
179 | ||
180 | /* we should only have one item */ | |
181 | if (!ret && count != 1) { | |
182 | printk(KERN_CONT ".. filter failed .."); | |
183 | ret = -1; | |
184 | goto out; | |
185 | } | |
186 | out: | |
187 | ftrace_enabled = save_ftrace_enabled; | |
188 | tracer_enabled = save_tracer_enabled; | |
189 | ||
190 | /* Enable tracing on all functions again */ | |
191 | ftrace_set_filter(NULL, 0, 1); | |
192 | ||
193 | return ret; | |
194 | } | |
195 | #else | |
196 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | |
197 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
60a11774 SR |
198 | /* |
199 | * Simple verification test of ftrace function tracer. | |
200 | * Enable ftrace, sleep 1/10 second, and then read the trace | |
201 | * buffer to see if all is in order. | |
202 | */ | |
203 | int | |
204 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |
205 | { | |
206 | unsigned long count; | |
207 | int ret; | |
77a2b37d SR |
208 | int save_ftrace_enabled = ftrace_enabled; |
209 | int save_tracer_enabled = tracer_enabled; | |
60a11774 | 210 | |
77a2b37d SR |
211 | /* make sure msleep has been recorded */ |
212 | msleep(1); | |
213 | ||
214 | /* force the recorded functions to be traced */ | |
60a11774 SR |
215 | ret = ftrace_force_update(); |
216 | if (ret) { | |
217 | printk(KERN_CONT ".. ftraced failed .. "); | |
218 | return ret; | |
219 | } | |
220 | ||
221 | /* start the tracing */ | |
c7aafc54 | 222 | ftrace_enabled = 1; |
77a2b37d | 223 | tracer_enabled = 1; |
c7aafc54 | 224 | |
60a11774 SR |
225 | tr->ctrl = 1; |
226 | trace->init(tr); | |
227 | /* Sleep for a 1/10 of a second */ | |
228 | msleep(100); | |
229 | /* stop the tracing. */ | |
230 | tr->ctrl = 0; | |
231 | trace->ctrl_update(tr); | |
c7aafc54 IM |
232 | ftrace_enabled = 0; |
233 | ||
60a11774 SR |
234 | /* check the trace buffer */ |
235 | ret = trace_test_buffer(tr, &count); | |
236 | trace->reset(tr); | |
237 | ||
238 | if (!ret && !count) { | |
239 | printk(KERN_CONT ".. no entries found .."); | |
240 | ret = -1; | |
77a2b37d | 241 | goto out; |
60a11774 SR |
242 | } |
243 | ||
77a2b37d SR |
244 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
245 | DYN_FTRACE_TEST_NAME); | |
246 | ||
247 | out: | |
248 | ftrace_enabled = save_ftrace_enabled; | |
249 | tracer_enabled = save_tracer_enabled; | |
250 | ||
4eebcc81 SR |
251 | /* kill ftrace totally if we failed */ |
252 | if (ret) | |
253 | ftrace_kill(); | |
254 | ||
60a11774 SR |
255 | return ret; |
256 | } | |
257 | #endif /* CONFIG_FTRACE */ | |
258 | ||
259 | #ifdef CONFIG_IRQSOFF_TRACER | |
260 | int | |
261 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |
262 | { | |
263 | unsigned long save_max = tracing_max_latency; | |
264 | unsigned long count; | |
265 | int ret; | |
266 | ||
267 | /* start the tracing */ | |
268 | tr->ctrl = 1; | |
269 | trace->init(tr); | |
270 | /* reset the max latency */ | |
271 | tracing_max_latency = 0; | |
272 | /* disable interrupts for a bit */ | |
273 | local_irq_disable(); | |
274 | udelay(100); | |
275 | local_irq_enable(); | |
276 | /* stop the tracing. */ | |
277 | tr->ctrl = 0; | |
278 | trace->ctrl_update(tr); | |
279 | /* check both trace buffers */ | |
280 | ret = trace_test_buffer(tr, NULL); | |
281 | if (!ret) | |
282 | ret = trace_test_buffer(&max_tr, &count); | |
283 | trace->reset(tr); | |
284 | ||
285 | if (!ret && !count) { | |
286 | printk(KERN_CONT ".. no entries found .."); | |
287 | ret = -1; | |
288 | } | |
289 | ||
290 | tracing_max_latency = save_max; | |
291 | ||
292 | return ret; | |
293 | } | |
294 | #endif /* CONFIG_IRQSOFF_TRACER */ | |
295 | ||
296 | #ifdef CONFIG_PREEMPT_TRACER | |
297 | int | |
298 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |
299 | { | |
300 | unsigned long save_max = tracing_max_latency; | |
301 | unsigned long count; | |
302 | int ret; | |
303 | ||
304 | /* start the tracing */ | |
305 | tr->ctrl = 1; | |
306 | trace->init(tr); | |
307 | /* reset the max latency */ | |
308 | tracing_max_latency = 0; | |
309 | /* disable preemption for a bit */ | |
310 | preempt_disable(); | |
311 | udelay(100); | |
312 | preempt_enable(); | |
313 | /* stop the tracing. */ | |
314 | tr->ctrl = 0; | |
315 | trace->ctrl_update(tr); | |
316 | /* check both trace buffers */ | |
317 | ret = trace_test_buffer(tr, NULL); | |
318 | if (!ret) | |
319 | ret = trace_test_buffer(&max_tr, &count); | |
320 | trace->reset(tr); | |
321 | ||
322 | if (!ret && !count) { | |
323 | printk(KERN_CONT ".. no entries found .."); | |
324 | ret = -1; | |
325 | } | |
326 | ||
327 | tracing_max_latency = save_max; | |
328 | ||
329 | return ret; | |
330 | } | |
331 | #endif /* CONFIG_PREEMPT_TRACER */ | |
332 | ||
333 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) | |
334 | int | |
335 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) | |
336 | { | |
337 | unsigned long save_max = tracing_max_latency; | |
338 | unsigned long count; | |
339 | int ret; | |
340 | ||
341 | /* start the tracing */ | |
342 | tr->ctrl = 1; | |
343 | trace->init(tr); | |
344 | ||
345 | /* reset the max latency */ | |
346 | tracing_max_latency = 0; | |
347 | ||
348 | /* disable preemption and interrupts for a bit */ | |
349 | preempt_disable(); | |
350 | local_irq_disable(); | |
351 | udelay(100); | |
352 | preempt_enable(); | |
353 | /* reverse the order of preempt vs irqs */ | |
354 | local_irq_enable(); | |
355 | ||
356 | /* stop the tracing. */ | |
357 | tr->ctrl = 0; | |
358 | trace->ctrl_update(tr); | |
359 | /* check both trace buffers */ | |
360 | ret = trace_test_buffer(tr, NULL); | |
361 | if (ret) | |
362 | goto out; | |
363 | ||
364 | ret = trace_test_buffer(&max_tr, &count); | |
365 | if (ret) | |
366 | goto out; | |
367 | ||
368 | if (!ret && !count) { | |
369 | printk(KERN_CONT ".. no entries found .."); | |
370 | ret = -1; | |
371 | goto out; | |
372 | } | |
373 | ||
374 | /* do the test by disabling interrupts first this time */ | |
375 | tracing_max_latency = 0; | |
376 | tr->ctrl = 1; | |
377 | trace->ctrl_update(tr); | |
378 | preempt_disable(); | |
379 | local_irq_disable(); | |
380 | udelay(100); | |
381 | preempt_enable(); | |
382 | /* reverse the order of preempt vs irqs */ | |
383 | local_irq_enable(); | |
384 | ||
385 | /* stop the tracing. */ | |
386 | tr->ctrl = 0; | |
387 | trace->ctrl_update(tr); | |
388 | /* check both trace buffers */ | |
389 | ret = trace_test_buffer(tr, NULL); | |
390 | if (ret) | |
391 | goto out; | |
392 | ||
393 | ret = trace_test_buffer(&max_tr, &count); | |
394 | ||
395 | if (!ret && !count) { | |
396 | printk(KERN_CONT ".. no entries found .."); | |
397 | ret = -1; | |
398 | goto out; | |
399 | } | |
400 | ||
401 | out: | |
402 | trace->reset(tr); | |
403 | tracing_max_latency = save_max; | |
404 | ||
405 | return ret; | |
406 | } | |
407 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ | |
408 | ||
409 | #ifdef CONFIG_SCHED_TRACER | |
410 | static int trace_wakeup_test_thread(void *data) | |
411 | { | |
412 | struct completion *x = data; | |
413 | ||
414 | /* Make this a RT thread, doesn't need to be too high */ | |
415 | ||
416 | rt_mutex_setprio(current, MAX_RT_PRIO - 5); | |
417 | ||
418 | /* Make it know we have a new prio */ | |
419 | complete(x); | |
420 | ||
421 | /* now go to sleep and let the test wake us up */ | |
422 | set_current_state(TASK_INTERRUPTIBLE); | |
423 | schedule(); | |
424 | ||
425 | /* we are awake, now wait to disappear */ | |
426 | while (!kthread_should_stop()) { | |
427 | /* | |
428 | * This is an RT task, do short sleeps to let | |
429 | * others run. | |
430 | */ | |
431 | msleep(100); | |
432 | } | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | int | |
438 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |
439 | { | |
440 | unsigned long save_max = tracing_max_latency; | |
441 | struct task_struct *p; | |
442 | struct completion isrt; | |
443 | unsigned long count; | |
444 | int ret; | |
445 | ||
446 | init_completion(&isrt); | |
447 | ||
448 | /* create a high prio thread */ | |
449 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); | |
c7aafc54 | 450 | if (IS_ERR(p)) { |
60a11774 SR |
451 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
452 | return -1; | |
453 | } | |
454 | ||
455 | /* make sure the thread is running at an RT prio */ | |
456 | wait_for_completion(&isrt); | |
457 | ||
458 | /* start the tracing */ | |
459 | tr->ctrl = 1; | |
460 | trace->init(tr); | |
461 | /* reset the max latency */ | |
462 | tracing_max_latency = 0; | |
463 | ||
464 | /* sleep to let the RT thread sleep too */ | |
465 | msleep(100); | |
466 | ||
467 | /* | |
468 | * Yes this is slightly racy. It is possible that for some | |
469 | * strange reason that the RT thread we created, did not | |
470 | * call schedule for 100ms after doing the completion, | |
471 | * and we do a wakeup on a task that already is awake. | |
472 | * But that is extremely unlikely, and the worst thing that | |
473 | * happens in such a case, is that we disable tracing. | |
474 | * Honestly, if this race does happen something is horrible | |
475 | * wrong with the system. | |
476 | */ | |
477 | ||
478 | wake_up_process(p); | |
479 | ||
480 | /* stop the tracing. */ | |
481 | tr->ctrl = 0; | |
482 | trace->ctrl_update(tr); | |
483 | /* check both trace buffers */ | |
484 | ret = trace_test_buffer(tr, NULL); | |
485 | if (!ret) | |
486 | ret = trace_test_buffer(&max_tr, &count); | |
487 | ||
488 | ||
489 | trace->reset(tr); | |
490 | ||
491 | tracing_max_latency = save_max; | |
492 | ||
493 | /* kill the thread */ | |
494 | kthread_stop(p); | |
495 | ||
496 | if (!ret && !count) { | |
497 | printk(KERN_CONT ".. no entries found .."); | |
498 | ret = -1; | |
499 | } | |
500 | ||
501 | return ret; | |
502 | } | |
503 | #endif /* CONFIG_SCHED_TRACER */ | |
504 | ||
505 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | |
506 | int | |
507 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) | |
508 | { | |
509 | unsigned long count; | |
510 | int ret; | |
511 | ||
512 | /* start the tracing */ | |
513 | tr->ctrl = 1; | |
514 | trace->init(tr); | |
515 | /* Sleep for a 1/10 of a second */ | |
516 | msleep(100); | |
517 | /* stop the tracing. */ | |
518 | tr->ctrl = 0; | |
519 | trace->ctrl_update(tr); | |
520 | /* check the trace buffer */ | |
521 | ret = trace_test_buffer(tr, &count); | |
522 | trace->reset(tr); | |
523 | ||
524 | if (!ret && !count) { | |
525 | printk(KERN_CONT ".. no entries found .."); | |
526 | ret = -1; | |
527 | } | |
528 | ||
529 | return ret; | |
530 | } | |
531 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |