]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/trace/Kconfig
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / Kconfig
1 #
2 # Architectures that offer an FUNCTION_TRACER implementation should
3 # select HAVE_FUNCTION_TRACER:
4 #
5
6 config USER_STACKTRACE_SUPPORT
7 bool
8
9 config NOP_TRACER
10 bool
11
12 config HAVE_FTRACE_NMI_ENTER
13 bool
14 help
15 See Documentation/trace/ftrace-design.txt
16
17 config HAVE_FUNCTION_TRACER
18 bool
19 help
20 See Documentation/trace/ftrace-design.txt
21
22 config HAVE_FUNCTION_GRAPH_TRACER
23 bool
24 help
25 See Documentation/trace/ftrace-design.txt
26
27 config HAVE_FUNCTION_GRAPH_FP_TEST
28 bool
29 help
30 See Documentation/trace/ftrace-design.txt
31
32 config HAVE_FUNCTION_TRACE_MCOUNT_TEST
33 bool
34 help
35 See Documentation/trace/ftrace-design.txt
36
37 config HAVE_DYNAMIC_FTRACE
38 bool
39 help
40 See Documentation/trace/ftrace-design.txt
41
42 config HAVE_FTRACE_MCOUNT_RECORD
43 bool
44 help
45 See Documentation/trace/ftrace-design.txt
46
47 config HAVE_SYSCALL_TRACEPOINTS
48 bool
49 help
50 See Documentation/trace/ftrace-design.txt
51
52 config HAVE_C_RECORDMCOUNT
53 bool
54 help
55 C version of recordmcount available?
56
57 config TRACER_MAX_TRACE
58 bool
59
60 config RING_BUFFER
61 bool
62
63 config FTRACE_NMI_ENTER
64 bool
65 depends on HAVE_FTRACE_NMI_ENTER
66 default y
67
68 config EVENT_TRACING
69 select CONTEXT_SWITCH_TRACER
70 bool
71
72 config EVENT_POWER_TRACING_DEPRECATED
73 depends on EVENT_TRACING
74 bool "Deprecated power event trace API, to be removed"
75 default y
76 help
77 Provides old power event types:
78 C-state/idle accounting events:
79 power:power_start
80 power:power_end
81 and old cpufreq accounting event:
82 power:power_frequency
83 This is for userspace compatibility
84 and will vanish after 5 kernel iterations,
85 namely 3.1.
86
87 config CONTEXT_SWITCH_TRACER
88 bool
89
90 config RING_BUFFER_ALLOW_SWAP
91 bool
92 help
93 Allow the use of ring_buffer_swap_cpu.
94 Adds a very slight overhead to tracing when enabled.
95
96 # All tracer options should select GENERIC_TRACER. For those options that are
97 # enabled by all tracers (context switch and event tracer) they select TRACING.
98 # This allows those options to appear when no other tracer is selected. But the
99 # options do not appear when something else selects it. We need the two options
100 # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
101 # hiding of the automatic options.
102
103 config TRACING
104 bool
105 select DEBUG_FS
106 select RING_BUFFER
107 select STACKTRACE if STACKTRACE_SUPPORT
108 select TRACEPOINTS
109 select NOP_TRACER
110 select BINARY_PRINTF
111 select EVENT_TRACING
112
113 config GENERIC_TRACER
114 bool
115 select TRACING
116
117 #
118 # Minimum requirements an architecture has to meet for us to
119 # be able to offer generic tracing facilities:
120 #
121 config TRACING_SUPPORT
122 bool
123 # PPC32 has no irqflags tracing support, but it can use most of the
124 # tracers anyway, they were tested to build and work. Note that new
125 # exceptions to this list aren't welcomed, better implement the
126 # irqflags tracing for your architecture.
127 depends on TRACE_IRQFLAGS_SUPPORT || PPC32
128 depends on STACKTRACE_SUPPORT
129 default y
130
131 if TRACING_SUPPORT
132
133 menuconfig FTRACE
134 bool "Tracers"
135 default y if DEBUG_KERNEL
136 help
137 Enable the kernel tracing infrastructure.
138
139 if FTRACE
140
141 config FUNCTION_TRACER
142 bool "Kernel Function Tracer"
143 depends on HAVE_FUNCTION_TRACER
144 select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE
145 select KALLSYMS
146 select GENERIC_TRACER
147 select CONTEXT_SWITCH_TRACER
148 help
149 Enable the kernel to trace every kernel function. This is done
150 by using a compiler feature to insert a small, 5-byte No-Operation
151 instruction at the beginning of every kernel function, which NOP
152 sequence is then dynamically patched into a tracer call when
153 tracing is enabled by the administrator. If it's runtime disabled
154 (the bootup default), then the overhead of the instructions is very
155 small and not measurable even in micro-benchmarks.
156
157 config FUNCTION_GRAPH_TRACER
158 bool "Kernel Function Graph Tracer"
159 depends on HAVE_FUNCTION_GRAPH_TRACER
160 depends on FUNCTION_TRACER
161 depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
162 default y
163 help
164 Enable the kernel to trace a function at both its return
165 and its entry.
166 Its first purpose is to trace the duration of functions and
167 draw a call graph for each thread with some information like
168 the return value. This is done by setting the current return
169 address on the current task structure into a stack of calls.
170
171
172 config IRQSOFF_TRACER
173 bool "Interrupts-off Latency Tracer"
174 default n
175 depends on TRACE_IRQFLAGS_SUPPORT
176 depends on !ARCH_USES_GETTIMEOFFSET
177 select TRACE_IRQFLAGS
178 select GENERIC_TRACER
179 select TRACER_MAX_TRACE
180 select RING_BUFFER_ALLOW_SWAP
181 help
182 This option measures the time spent in irqs-off critical
183 sections, with microsecond accuracy.
184
185 The default measurement method is a maximum search, which is
186 disabled by default and can be runtime (re-)started
187 via:
188
189 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
190
191 (Note that kernel size and overhead increase with this option
192 enabled. This option and the preempt-off timing option can be
193 used together or separately.)
194
195 config PREEMPT_TRACER
196 bool "Preemption-off Latency Tracer"
197 default n
198 depends on !ARCH_USES_GETTIMEOFFSET
199 depends on PREEMPT
200 select GENERIC_TRACER
201 select TRACER_MAX_TRACE
202 select RING_BUFFER_ALLOW_SWAP
203 help
204 This option measures the time spent in preemption-off critical
205 sections, with microsecond accuracy.
206
207 The default measurement method is a maximum search, which is
208 disabled by default and can be runtime (re-)started
209 via:
210
211 echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
212
213 (Note that kernel size and overhead increase with this option
214 enabled. This option and the irqs-off timing option can be
215 used together or separately.)
216
217 config SCHED_TRACER
218 bool "Scheduling Latency Tracer"
219 select GENERIC_TRACER
220 select CONTEXT_SWITCH_TRACER
221 select TRACER_MAX_TRACE
222 help
223 This tracer tracks the latency of the highest priority task
224 to be scheduled in, starting from the point it has woken up.
225
226 config ENABLE_DEFAULT_TRACERS
227 bool "Trace process context switches and events"
228 depends on !GENERIC_TRACER
229 select TRACING
230 help
231 This tracer hooks to various trace points in the kernel,
232 allowing the user to pick and choose which trace point they
233 want to trace. It also includes the sched_switch tracer plugin.
234
235 config FTRACE_SYSCALLS
236 bool "Trace syscalls"
237 depends on HAVE_SYSCALL_TRACEPOINTS
238 select GENERIC_TRACER
239 select KALLSYMS
240 help
241 Basic tracer to catch the syscall entry and exit events.
242
243 config TRACE_BRANCH_PROFILING
244 bool
245 select GENERIC_TRACER
246
247 choice
248 prompt "Branch Profiling"
249 default BRANCH_PROFILE_NONE
250 help
251 The branch profiling is a software profiler. It will add hooks
252 into the C conditionals to test which path a branch takes.
253
254 The likely/unlikely profiler only looks at the conditions that
255 are annotated with a likely or unlikely macro.
256
257 The "all branch" profiler will profile every if-statement in the
258 kernel. This profiler will also enable the likely/unlikely
259 profiler.
260
261 Either of the above profilers adds a bit of overhead to the system.
262 If unsure, choose "No branch profiling".
263
264 config BRANCH_PROFILE_NONE
265 bool "No branch profiling"
266 help
267 No branch profiling. Branch profiling adds a bit of overhead.
268 Only enable it if you want to analyse the branching behavior.
269 Otherwise keep it disabled.
270
271 config PROFILE_ANNOTATED_BRANCHES
272 bool "Trace likely/unlikely profiler"
273 select TRACE_BRANCH_PROFILING
274 help
275 This tracer profiles all the the likely and unlikely macros
276 in the kernel. It will display the results in:
277
278 /sys/kernel/debug/tracing/trace_stat/branch_annotated
279
280 Note: this will add a significant overhead; only turn this
281 on if you need to profile the system's use of these macros.
282
283 config PROFILE_ALL_BRANCHES
284 bool "Profile all if conditionals"
285 select TRACE_BRANCH_PROFILING
286 help
287 This tracer profiles all branch conditions. Every if ()
288 taken in the kernel is recorded whether it hit or miss.
289 The results will be displayed in:
290
291 /sys/kernel/debug/tracing/trace_stat/branch_all
292
293 This option also enables the likely/unlikely profiler.
294
295 This configuration, when enabled, will impose a great overhead
296 on the system. This should only be enabled when the system
297 is to be analyzed in much detail.
298 endchoice
299
300 config TRACING_BRANCHES
301 bool
302 help
303 Selected by tracers that will trace the likely and unlikely
304 conditions. This prevents the tracers themselves from being
305 profiled. Profiling the tracing infrastructure can only happen
306 when the likelys and unlikelys are not being traced.
307
308 config BRANCH_TRACER
309 bool "Trace likely/unlikely instances"
310 depends on TRACE_BRANCH_PROFILING
311 select TRACING_BRANCHES
312 help
313 This traces the events of likely and unlikely condition
314 calls in the kernel. The difference between this and the
315 "Trace likely/unlikely profiler" is that this is not a
316 histogram of the callers, but actually places the calling
317 events into a running trace buffer to see when and where the
318 events happened, as well as their results.
319
320 Say N if unsure.
321
322 config STACK_TRACER
323 bool "Trace max stack"
324 depends on HAVE_FUNCTION_TRACER
325 select FUNCTION_TRACER
326 select STACKTRACE
327 select KALLSYMS
328 help
329 This special tracer records the maximum stack footprint of the
330 kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
331
332 This tracer works by hooking into every function call that the
333 kernel executes, and keeping a maximum stack depth value and
334 stack-trace saved. If this is configured with DYNAMIC_FTRACE
335 then it will not have any overhead while the stack tracer
336 is disabled.
337
338 To enable the stack tracer on bootup, pass in 'stacktrace'
339 on the kernel command line.
340
341 The stack tracer can also be enabled or disabled via the
342 sysctl kernel.stack_tracer_enabled
343
344 Say N if unsure.
345
346 config BLK_DEV_IO_TRACE
347 bool "Support for tracing block IO actions"
348 depends on SYSFS
349 depends on BLOCK
350 select RELAY
351 select DEBUG_FS
352 select TRACEPOINTS
353 select GENERIC_TRACER
354 select STACKTRACE
355 help
356 Say Y here if you want to be able to trace the block layer actions
357 on a given queue. Tracing allows you to see any traffic happening
358 on a block device queue. For more information (and the userspace
359 support tools needed), fetch the blktrace tools from:
360
361 git://git.kernel.dk/blktrace.git
362
363 Tracing also is possible using the ftrace interface, e.g.:
364
365 echo 1 > /sys/block/sda/sda1/trace/enable
366 echo blk > /sys/kernel/debug/tracing/current_tracer
367 cat /sys/kernel/debug/tracing/trace_pipe
368
369 If unsure, say N.
370
371 config KPROBE_EVENT
372 depends on KPROBES
373 depends on HAVE_REGS_AND_STACK_ACCESS_API
374 bool "Enable kprobes-based dynamic events"
375 select TRACING
376 default y
377 help
378 This allows the user to add tracing events (similar to tracepoints)
379 on the fly via the ftrace interface. See
380 Documentation/trace/kprobetrace.txt for more details.
381
382 Those events can be inserted wherever kprobes can probe, and record
383 various register and memory values.
384
385 This option is also required by perf-probe subcommand of perf tools.
386 If you want to use perf tools, this option is strongly recommended.
387
388 config DYNAMIC_FTRACE
389 bool "enable/disable ftrace tracepoints dynamically"
390 depends on FUNCTION_TRACER
391 depends on HAVE_DYNAMIC_FTRACE
392 default y
393 help
394 This option will modify all the calls to ftrace dynamically
395 (will patch them out of the binary image and replace them
396 with a No-Op instruction) as they are called. A table is
397 created to dynamically enable them again.
398
399 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
400 otherwise has native performance as long as no tracing is active.
401
402 The changes to the code are done by a kernel thread that
403 wakes up once a second and checks to see if any ftrace calls
404 were made. If so, it runs stop_machine (stops all CPUS)
405 and modifies the code to jump over the call to ftrace.
406
407 config FUNCTION_PROFILER
408 bool "Kernel function profiler"
409 depends on FUNCTION_TRACER
410 default n
411 help
412 This option enables the kernel function profiler. A file is created
413 in debugfs called function_profile_enabled which defaults to zero.
414 When a 1 is echoed into this file profiling begins, and when a
415 zero is entered, profiling stops. A "functions" file is created in
416 the trace_stats directory; this file shows the list of functions that
417 have been hit and their counters.
418
419 If in doubt, say N.
420
421 config FTRACE_MCOUNT_RECORD
422 def_bool y
423 depends on DYNAMIC_FTRACE
424 depends on HAVE_FTRACE_MCOUNT_RECORD
425
426 config FTRACE_SELFTEST
427 bool
428
429 config FTRACE_STARTUP_TEST
430 bool "Perform a startup test on ftrace"
431 depends on GENERIC_TRACER
432 select FTRACE_SELFTEST
433 help
434 This option performs a series of startup tests on ftrace. On bootup
435 a series of tests are made to verify that the tracer is
436 functioning properly. It will do tests on all the configured
437 tracers of ftrace.
438
439 config EVENT_TRACE_TEST_SYSCALLS
440 bool "Run selftest on syscall events"
441 depends on FTRACE_STARTUP_TEST
442 help
443 This option will also enable testing every syscall event.
444 It only enables the event and disables it and runs various loads
445 with the event enabled. This adds a bit more time for kernel boot
446 up since it runs this on every system call defined.
447
448 TBD - enable a way to actually call the syscalls as we test their
449 events
450
451 config MMIOTRACE
452 bool "Memory mapped IO tracing"
453 depends on HAVE_MMIOTRACE_SUPPORT && PCI
454 select GENERIC_TRACER
455 help
456 Mmiotrace traces Memory Mapped I/O access and is meant for
457 debugging and reverse engineering. It is called from the ioremap
458 implementation and works via page faults. Tracing is disabled by
459 default and can be enabled at run-time.
460
461 See Documentation/trace/mmiotrace.txt.
462 If you are not helping to develop drivers, say N.
463
464 config MMIOTRACE_TEST
465 tristate "Test module for mmiotrace"
466 depends on MMIOTRACE && m
467 help
468 This is a dumb module for testing mmiotrace. It is very dangerous
469 as it will write garbage to IO memory starting at a given address.
470 However, it should be safe to use on e.g. unused portion of VRAM.
471
472 Say N, unless you absolutely know what you are doing.
473
474 config RING_BUFFER_BENCHMARK
475 tristate "Ring buffer benchmark stress tester"
476 depends on RING_BUFFER
477 help
478 This option creates a test to stress the ring buffer and benchmark it.
479 It creates its own ring buffer such that it will not interfere with
480 any other users of the ring buffer (such as ftrace). It then creates
481 a producer and consumer that will run for 10 seconds and sleep for
482 10 seconds. Each interval it will print out the number of events
483 it recorded and give a rough estimate of how long each iteration took.
484
485 It does not disable interrupts or raise its priority, so it may be
486 affected by processes that are running.
487
488 If unsure, say N.
489
490 endif # FTRACE
491
492 endif # TRACING_SUPPORT
493