]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/trace/Kconfig
Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / kernel / trace / Kconfig
CommitLineData
16444a8a 1#
606576ce
SR
2# Architectures that offer an FUNCTION_TRACER implementation should
3# select HAVE_FUNCTION_TRACER:
16444a8a 4#
2a3a4f66 5
8d26487f
TE
6config USER_STACKTRACE_SUPPORT
7 bool
8
2a3a4f66
FW
9config NOP_TRACER
10 bool
11
78d904b4
SR
12config HAVE_FTRACE_NMI_ENTER
13 bool
14
606576ce 15config HAVE_FUNCTION_TRACER
16444a8a 16 bool
bc0c38d1 17
fb52607a 18config HAVE_FUNCTION_GRAPH_TRACER
15e6cb36
FW
19 bool
20
60a7ecf4
SR
21config HAVE_FUNCTION_TRACE_MCOUNT_TEST
22 bool
23 help
24 This gets selected when the arch tests the function_trace_stop
25 variable at the mcount call site. Otherwise, this variable
26 is tested by the called function.
27
677aa9f7
SR
28config HAVE_DYNAMIC_FTRACE
29 bool
30
8da3821b
SR
31config HAVE_FTRACE_MCOUNT_RECORD
32 bool
33
1e9b51c2
MM
34config HAVE_HW_BRANCH_TRACER
35 bool
36
352ad25a
SR
37config TRACER_MAX_TRACE
38 bool
39
7a8e76a3
SR
40config RING_BUFFER
41 bool
42
78d904b4
SR
43config FTRACE_NMI_ENTER
44 bool
45 depends on HAVE_FTRACE_NMI_ENTER
46 default y
47
bc0c38d1
SR
48config TRACING
49 bool
50 select DEBUG_FS
7a8e76a3 51 select RING_BUFFER
c2c80529 52 select STACKTRACE if STACKTRACE_SUPPORT
5f87f112 53 select TRACEPOINTS
f3384b28 54 select NOP_TRACER
bc0c38d1 55
40ada30f
IM
56#
57# Minimum requirements an architecture has to meet for us to
58# be able to offer generic tracing facilities:
59#
60config TRACING_SUPPORT
61 bool
62 depends on TRACE_IRQFLAGS_SUPPORT
63 depends on STACKTRACE_SUPPORT
64
65if TRACING_SUPPORT
66
17d80fd0
PZ
67menu "Tracers"
68
606576ce 69config FUNCTION_TRACER
1b29b018 70 bool "Kernel Function Tracer"
606576ce 71 depends on HAVE_FUNCTION_TRACER
1b29b018 72 select FRAME_POINTER
4d7a077c 73 select KALLSYMS
1b29b018 74 select TRACING
35e8e302 75 select CONTEXT_SWITCH_TRACER
1b29b018
SR
76 help
77 Enable the kernel to trace every kernel function. This is done
78 by using a compiler feature to insert a small, 5-byte No-Operation
79 instruction to the beginning of every kernel function, which NOP
80 sequence is then dynamically patched into a tracer call when
81 tracing is enabled by the administrator. If it's runtime disabled
82 (the bootup default), then the overhead of the instructions is very
83 small and not measurable even in micro-benchmarks.
35e8e302 84
fb52607a
FW
85config FUNCTION_GRAPH_TRACER
86 bool "Kernel Function Graph Tracer"
87 depends on HAVE_FUNCTION_GRAPH_TRACER
15e6cb36 88 depends on FUNCTION_TRACER
764f3b95 89 default y
15e6cb36 90 help
fb52607a
FW
91 Enable the kernel to trace a function at both its return
92 and its entry.
93 It's first purpose is to trace the duration of functions and
94 draw a call graph for each thread with some informations like
95 the return value.
96 This is done by setting the current return address on the current
97 task structure into a stack of calls.
15e6cb36 98
81d68a96
SR
99config IRQSOFF_TRACER
100 bool "Interrupts-off Latency Tracer"
101 default n
102 depends on TRACE_IRQFLAGS_SUPPORT
103 depends on GENERIC_TIME
104 select TRACE_IRQFLAGS
105 select TRACING
106 select TRACER_MAX_TRACE
107 help
108 This option measures the time spent in irqs-off critical
109 sections, with microsecond accuracy.
110
111 The default measurement method is a maximum search, which is
112 disabled by default and can be runtime (re-)started
113 via:
114
115 echo 0 > /debugfs/tracing/tracing_max_latency
116
6cd8a4bb
SR
117 (Note that kernel size and overhead increases with this option
118 enabled. This option and the preempt-off timing option can be
119 used together or separately.)
120
121config PREEMPT_TRACER
122 bool "Preemption-off Latency Tracer"
123 default n
124 depends on GENERIC_TIME
125 depends on PREEMPT
126 select TRACING
127 select TRACER_MAX_TRACE
128 help
129 This option measures the time spent in preemption off critical
130 sections, with microsecond accuracy.
131
132 The default measurement method is a maximum search, which is
133 disabled by default and can be runtime (re-)started
134 via:
135
136 echo 0 > /debugfs/tracing/tracing_max_latency
137
138 (Note that kernel size and overhead increases with this option
139 enabled. This option and the irqs-off timing option can be
140 used together or separately.)
141
f06c3810
IM
142config SYSPROF_TRACER
143 bool "Sysprof Tracer"
4d2df795 144 depends on X86
f06c3810 145 select TRACING
b22f4858 146 select CONTEXT_SWITCH_TRACER
f06c3810
IM
147 help
148 This tracer provides the trace needed by the 'Sysprof' userspace
149 tool.
150
352ad25a
SR
151config SCHED_TRACER
152 bool "Scheduling Latency Tracer"
352ad25a
SR
153 select TRACING
154 select CONTEXT_SWITCH_TRACER
155 select TRACER_MAX_TRACE
156 help
157 This tracer tracks the latency of the highest priority task
158 to be scheduled in, starting from the point it has woken up.
159
35e8e302
SR
160config CONTEXT_SWITCH_TRACER
161 bool "Trace process context switches"
35e8e302
SR
162 select TRACING
163 select MARKERS
164 help
165 This tracer gets called from the context switch and records
166 all switching of tasks.
167
b77e38aa
SR
168config EVENT_TRACER
169 bool "Trace various events in the kernel"
b77e38aa
SR
170 select TRACING
171 help
172 This tracer hooks to various trace points in the kernel
173 allowing the user to pick and choose which trace point they
174 want to trace.
175
1f5c2abb
FW
176config BOOT_TRACER
177 bool "Trace boot initcalls"
1f5c2abb 178 select TRACING
ea31e72d 179 select CONTEXT_SWITCH_TRACER
1f5c2abb
FW
180 help
181 This tracer helps developers to optimize boot times: it records
98d9c66a
IM
182 the timings of the initcalls and traces key events and the identity
183 of tasks that can cause boot delays, such as context-switches.
184
185 Its aim is to be parsed by the /scripts/bootgraph.pl tool to
186 produce pretty graphics about boot inefficiencies, giving a visual
187 representation of the delays during initcalls - but the raw
188 /debug/tracing/trace text output is readable too.
189
79fb0768
SR
190 You must pass in ftrace=initcall to the kernel command line
191 to enable this on bootup.
1f5c2abb 192
2ed84eeb 193config TRACE_BRANCH_PROFILING
1f0d69a9 194 bool "Trace likely/unlikely profiler"
1f0d69a9
SR
195 select TRACING
196 help
197 This tracer profiles all the the likely and unlikely macros
198 in the kernel. It will display the results in:
199
45b79749 200 /debugfs/tracing/profile_annotated_branch
1f0d69a9
SR
201
202 Note: this will add a significant overhead, only turn this
203 on if you need to profile the system's use of these macros.
204
205 Say N if unsure.
206
2bcd521a
SR
207config PROFILE_ALL_BRANCHES
208 bool "Profile all if conditionals"
209 depends on TRACE_BRANCH_PROFILING
210 help
211 This tracer profiles all branch conditions. Every if ()
212 taken in the kernel is recorded whether it hit or miss.
213 The results will be displayed in:
214
215 /debugfs/tracing/profile_branch
216
217 This configuration, when enabled, will impose a great overhead
218 on the system. This should only be enabled when the system
219 is to be analyzed
220
221 Say N if unsure.
222
2ed84eeb 223config TRACING_BRANCHES
52f232cb
SR
224 bool
225 help
226 Selected by tracers that will trace the likely and unlikely
227 conditions. This prevents the tracers themselves from being
228 profiled. Profiling the tracing infrastructure can only happen
229 when the likelys and unlikelys are not being traced.
230
2ed84eeb 231config BRANCH_TRACER
52f232cb 232 bool "Trace likely/unlikely instances"
2ed84eeb
SR
233 depends on TRACE_BRANCH_PROFILING
234 select TRACING_BRANCHES
52f232cb
SR
235 help
236 This traces the events of likely and unlikely condition
237 calls in the kernel. The difference between this and the
238 "Trace likely/unlikely profiler" is that this is not a
239 histogram of the callers, but actually places the calling
240 events into a running trace buffer to see when and where the
241 events happened, as well as their results.
242
243 Say N if unsure.
244
f3f47a67
AV
245config POWER_TRACER
246 bool "Trace power consumption behavior"
f3f47a67
AV
247 depends on X86
248 select TRACING
249 help
250 This tracer helps developers to analyze and optimize the kernels
251 power management decisions, specifically the C-state and P-state
252 behavior.
253
254
e5a81b62
SR
255config STACK_TRACER
256 bool "Trace max stack"
606576ce 257 depends on HAVE_FUNCTION_TRACER
606576ce 258 select FUNCTION_TRACER
e5a81b62 259 select STACKTRACE
4d7a077c 260 select KALLSYMS
e5a81b62 261 help
4519d9e5
IM
262 This special tracer records the maximum stack footprint of the
263 kernel and displays it in debugfs/tracing/stack_trace.
264
265 This tracer works by hooking into every function call that the
266 kernel executes, and keeping a maximum stack depth value and
f38f1d2a
SR
267 stack-trace saved. If this is configured with DYNAMIC_FTRACE
268 then it will not have any overhead while the stack tracer
269 is disabled.
270
271 To enable the stack tracer on bootup, pass in 'stacktrace'
272 on the kernel command line.
273
274 The stack tracer can also be enabled or disabled via the
275 sysctl kernel.stack_tracer_enabled
4519d9e5
IM
276
277 Say N if unsure.
e5a81b62 278
a93751ca 279config HW_BRANCH_TRACER
1e9b51c2 280 depends on HAVE_HW_BRANCH_TRACER
a93751ca 281 bool "Trace hw branches"
1e9b51c2
MM
282 select TRACING
283 help
284 This tracer records all branches on the system in a circular
285 buffer giving access to the last N branches for each cpu.
286
36994e58
FW
287config KMEMTRACE
288 bool "Trace SLAB allocations"
289 select TRACING
36994e58
FW
290 help
291 kmemtrace provides tracing for slab allocator functions, such as
292 kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
293 data is then fed to the userspace application in order to analyse
294 allocation hotspots, internal fragmentation and so on, making it
295 possible to see how well an allocator performs, as well as debug
296 and profile kernel code.
297
298 This requires an userspace application to use. See
299 Documentation/vm/kmemtrace.txt for more information.
300
301 Saying Y will make the kernel somewhat larger and slower. However,
302 if you disable kmemtrace at run-time or boot-time, the performance
303 impact is minimal (depending on the arch the kernel is built for).
304
305 If unsure, say N.
306
e1d8aa9f
FW
307config WORKQUEUE_TRACER
308 bool "Trace workqueues"
309 select TRACING
310 help
311 The workqueue tracer provides some statistical informations
312 about each cpu workqueue thread such as the number of the
313 works inserted and executed since their creation. It can help
314 to evaluate the amount of work each of them have to perform.
315 For example it can help a developer to decide whether he should
316 choose a per cpu workqueue instead of a singlethreaded one.
317
2db270a8
FW
318config BLK_DEV_IO_TRACE
319 bool "Support for tracing block io actions"
320 depends on SYSFS
1dfba05d 321 depends on BLOCK
2db270a8
FW
322 select RELAY
323 select DEBUG_FS
324 select TRACEPOINTS
325 select TRACING
326 select STACKTRACE
327 help
328 Say Y here if you want to be able to trace the block layer actions
329 on a given queue. Tracing allows you to see any traffic happening
330 on a block device queue. For more information (and the userspace
331 support tools needed), fetch the blktrace tools from:
332
333 git://git.kernel.dk/blktrace.git
334
335 Tracing also is possible using the ftrace interface, e.g.:
336
337 echo 1 > /sys/block/sda/sda1/trace/enable
338 echo blk > /sys/kernel/debug/tracing/current_tracer
339 cat /sys/kernel/debug/tracing/trace_pipe
340
341 If unsure, say N.
36994e58 342
3d083395
SR
343config DYNAMIC_FTRACE
344 bool "enable/disable ftrace tracepoints dynamically"
606576ce 345 depends on FUNCTION_TRACER
677aa9f7 346 depends on HAVE_DYNAMIC_FTRACE
3d083395
SR
347 default y
348 help
349 This option will modify all the calls to ftrace dynamically
350 (will patch them out of the binary image and replaces them
351 with a No-Op instruction) as they are called. A table is
352 created to dynamically enable them again.
353
606576ce 354 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
3d083395
SR
355 has native performance as long as no tracing is active.
356
357 The changes to the code are done by a kernel thread that
358 wakes up once a second and checks to see if any ftrace calls
359 were made. If so, it runs stop_machine (stops all CPUS)
360 and modifies the code to jump over the call to ftrace.
60a11774 361
8da3821b
SR
362config FTRACE_MCOUNT_RECORD
363 def_bool y
364 depends on DYNAMIC_FTRACE
365 depends on HAVE_FTRACE_MCOUNT_RECORD
366
60a11774
SR
367config FTRACE_SELFTEST
368 bool
369
370config FTRACE_STARTUP_TEST
371 bool "Perform a startup test on ftrace"
40ada30f 372 depends on TRACING
60a11774
SR
373 select FTRACE_SELFTEST
374 help
375 This option performs a series of startup tests on ftrace. On bootup
376 a series of tests are made to verify that the tracer is
377 functioning properly. It will do tests on all the configured
378 tracers of ftrace.
17d80fd0 379
fe6f90e5
PP
380config MMIOTRACE
381 bool "Memory mapped IO tracing"
40ada30f 382 depends on HAVE_MMIOTRACE_SUPPORT && PCI
fe6f90e5
PP
383 select TRACING
384 help
385 Mmiotrace traces Memory Mapped I/O access and is meant for
386 debugging and reverse engineering. It is called from the ioremap
387 implementation and works via page faults. Tracing is disabled by
388 default and can be enabled at run-time.
389
390 See Documentation/tracers/mmiotrace.txt.
391 If you are not helping to develop drivers, say N.
392
393config MMIOTRACE_TEST
394 tristate "Test module for mmiotrace"
395 depends on MMIOTRACE && m
396 help
397 This is a dumb module for testing mmiotrace. It is very dangerous
398 as it will write garbage to IO memory starting at a given address.
399 However, it should be safe to use on e.g. unused portion of VRAM.
400
401 Say N, unless you absolutely know what you are doing.
402
17d80fd0 403endmenu
40ada30f
IM
404
405endif # TRACING_SUPPORT
406