]>
Commit | Line | Data |
---|---|---|
0793a61d TG |
1 | /* |
2 | * Performance counters: | |
3 | * | |
4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * Data type definitions, declarations, prototypes. | |
8 | * | |
9 | * Started by: Thomas Gleixner and Ingo Molnar | |
10 | * | |
11 | * For licencing details see kernel-base/COPYING | |
12 | */ | |
13 | #ifndef _LINUX_PERF_COUNTER_H | |
14 | #define _LINUX_PERF_COUNTER_H | |
15 | ||
f3dfd265 PM |
16 | #include <linux/types.h> |
17 | #include <linux/ioctl.h> | |
9aaa131a | 18 | #include <asm/byteorder.h> |
0793a61d TG |
19 | |
20 | /* | |
9f66a381 IM |
21 | * User-space ABI bits: |
22 | */ | |
23 | ||
24 | /* | |
0d48696f | 25 | * attr.type |
0793a61d | 26 | */ |
b8e83514 PZ |
27 | enum perf_event_types { |
28 | PERF_TYPE_HARDWARE = 0, | |
29 | PERF_TYPE_SOFTWARE = 1, | |
30 | PERF_TYPE_TRACEPOINT = 2, | |
31 | ||
0793a61d | 32 | /* |
b8e83514 | 33 | * available TYPE space, raw is the max value. |
0793a61d | 34 | */ |
9f66a381 | 35 | |
b8e83514 PZ |
36 | PERF_TYPE_RAW = 128, |
37 | }; | |
6c594c21 | 38 | |
b8e83514 | 39 | /* |
0d48696f | 40 | * Generalized performance counter event types, used by the attr.event_id |
b8e83514 PZ |
41 | * parameter of the sys_perf_counter_open() syscall: |
42 | */ | |
0d48696f | 43 | enum attr_ids { |
9f66a381 | 44 | /* |
b8e83514 | 45 | * Common hardware events, generalized by the kernel: |
9f66a381 | 46 | */ |
b8e83514 PZ |
47 | PERF_COUNT_CPU_CYCLES = 0, |
48 | PERF_COUNT_INSTRUCTIONS = 1, | |
49 | PERF_COUNT_CACHE_REFERENCES = 2, | |
50 | PERF_COUNT_CACHE_MISSES = 3, | |
51 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, | |
52 | PERF_COUNT_BRANCH_MISSES = 5, | |
53 | PERF_COUNT_BUS_CYCLES = 6, | |
54 | ||
55 | PERF_HW_EVENTS_MAX = 7, | |
56 | }; | |
e077df4f | 57 | |
b8e83514 PZ |
58 | /* |
59 | * Special "software" counters provided by the kernel, even if the hardware | |
60 | * does not support performance counters. These counters measure various | |
61 | * physical and sw events of the kernel (and allow the profiling of them as | |
62 | * well): | |
63 | */ | |
64 | enum sw_event_ids { | |
65 | PERF_COUNT_CPU_CLOCK = 0, | |
66 | PERF_COUNT_TASK_CLOCK = 1, | |
67 | PERF_COUNT_PAGE_FAULTS = 2, | |
68 | PERF_COUNT_CONTEXT_SWITCHES = 3, | |
69 | PERF_COUNT_CPU_MIGRATIONS = 4, | |
70 | PERF_COUNT_PAGE_FAULTS_MIN = 5, | |
71 | PERF_COUNT_PAGE_FAULTS_MAJ = 6, | |
72 | ||
73 | PERF_SW_EVENTS_MAX = 7, | |
0793a61d TG |
74 | }; |
75 | ||
0127c3ea | 76 | #define __PERF_COUNTER_MASK(name) \ |
f4a2deb4 PZ |
77 | (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ |
78 | PERF_COUNTER_##name##_SHIFT) | |
79 | ||
80 | #define PERF_COUNTER_RAW_BITS 1 | |
81 | #define PERF_COUNTER_RAW_SHIFT 63 | |
82 | #define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) | |
83 | ||
84 | #define PERF_COUNTER_CONFIG_BITS 63 | |
85 | #define PERF_COUNTER_CONFIG_SHIFT 0 | |
86 | #define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) | |
87 | ||
88 | #define PERF_COUNTER_TYPE_BITS 7 | |
89 | #define PERF_COUNTER_TYPE_SHIFT 56 | |
90 | #define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) | |
91 | ||
92 | #define PERF_COUNTER_EVENT_BITS 56 | |
93 | #define PERF_COUNTER_EVENT_SHIFT 0 | |
94 | #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) | |
95 | ||
8a057d84 | 96 | /* |
0d48696f | 97 | * Bits that can be set in attr.sample_type to request information |
8a057d84 PZ |
98 | * in the overflow packets. |
99 | */ | |
b23f3325 PZ |
100 | enum perf_counter_sample_format { |
101 | PERF_SAMPLE_IP = 1U << 0, | |
102 | PERF_SAMPLE_TID = 1U << 1, | |
103 | PERF_SAMPLE_TIME = 1U << 2, | |
104 | PERF_SAMPLE_ADDR = 1U << 3, | |
105 | PERF_SAMPLE_GROUP = 1U << 4, | |
106 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | |
ac4bcf88 | 107 | PERF_SAMPLE_ID = 1U << 6, |
b23f3325 | 108 | PERF_SAMPLE_CPU = 1U << 7, |
689802b2 | 109 | PERF_SAMPLE_PERIOD = 1U << 8, |
8a057d84 PZ |
110 | }; |
111 | ||
53cfbf59 | 112 | /* |
0d48696f | 113 | * Bits that can be set in attr.read_format to request that |
53cfbf59 PM |
114 | * reads on the counter should return the indicated quantities, |
115 | * in increasing order of bit value, after the counter value. | |
116 | */ | |
117 | enum perf_counter_read_format { | |
8e5799b1 PZ |
118 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, |
119 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | |
120 | PERF_FORMAT_ID = 1U << 2, | |
53cfbf59 PM |
121 | }; |
122 | ||
9f66a381 IM |
123 | /* |
124 | * Hardware event to monitor via a performance monitoring counter: | |
125 | */ | |
0d48696f | 126 | struct perf_counter_attr { |
f4a2deb4 PZ |
127 | /* |
128 | * The MSB of the config word signifies if the rest contains cpu | |
129 | * specific (raw) counter configuration data, if unset, the next | |
130 | * 7 bits are an event type and the rest of the bits are the event | |
131 | * identifier. | |
132 | */ | |
133 | __u64 config; | |
9f66a381 | 134 | |
60db5e09 | 135 | union { |
b23f3325 PZ |
136 | __u64 sample_period; |
137 | __u64 sample_freq; | |
60db5e09 PZ |
138 | }; |
139 | ||
b23f3325 PZ |
140 | __u64 sample_type; |
141 | __u64 read_format; | |
9f66a381 | 142 | |
2743a5b0 | 143 | __u64 disabled : 1, /* off by default */ |
0475f9ea PM |
144 | inherit : 1, /* children inherit it */ |
145 | pinned : 1, /* must always be on PMU */ | |
146 | exclusive : 1, /* only group on PMU */ | |
147 | exclude_user : 1, /* don't count user */ | |
148 | exclude_kernel : 1, /* ditto kernel */ | |
149 | exclude_hv : 1, /* ditto hypervisor */ | |
2743a5b0 | 150 | exclude_idle : 1, /* don't count when idle */ |
0a4a9391 | 151 | mmap : 1, /* include mmap data */ |
8d1b2d93 | 152 | comm : 1, /* include comm data */ |
60db5e09 | 153 | freq : 1, /* use freq, not period */ |
0475f9ea | 154 | |
d99e9446 | 155 | __reserved_1 : 53; |
2743a5b0 | 156 | |
c457810a | 157 | __u32 wakeup_events; /* wakeup every n events */ |
e527ea31 | 158 | __u32 __reserved_2; |
9f66a381 | 159 | |
2743a5b0 | 160 | __u64 __reserved_3; |
e527ea31 | 161 | __u64 __reserved_4; |
eab656ae TG |
162 | }; |
163 | ||
d859e29f PM |
164 | /* |
165 | * Ioctls that can be done on a perf counter fd: | |
166 | */ | |
08247e31 PZ |
167 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) |
168 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | |
169 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | |
170 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | |
171 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | |
3df5edad PZ |
172 | |
173 | enum perf_counter_ioc_flags { | |
174 | PERF_IOC_FLAG_GROUP = 1U << 0, | |
175 | }; | |
d859e29f | 176 | |
37d81828 PM |
177 | /* |
178 | * Structure of the page that can be mapped via mmap | |
179 | */ | |
180 | struct perf_counter_mmap_page { | |
181 | __u32 version; /* version number of this structure */ | |
182 | __u32 compat_version; /* lowest version this is compat with */ | |
38ff667b PZ |
183 | |
184 | /* | |
185 | * Bits needed to read the hw counters in user-space. | |
186 | * | |
92f22a38 PZ |
187 | * u32 seq; |
188 | * s64 count; | |
38ff667b | 189 | * |
a2e87d06 PZ |
190 | * do { |
191 | * seq = pc->lock; | |
38ff667b | 192 | * |
a2e87d06 PZ |
193 | * barrier() |
194 | * if (pc->index) { | |
195 | * count = pmc_read(pc->index - 1); | |
196 | * count += pc->offset; | |
197 | * } else | |
198 | * goto regular_read; | |
38ff667b | 199 | * |
a2e87d06 PZ |
200 | * barrier(); |
201 | * } while (pc->lock != seq); | |
38ff667b | 202 | * |
92f22a38 PZ |
203 | * NOTE: for obvious reason this only works on self-monitoring |
204 | * processes. | |
38ff667b | 205 | */ |
37d81828 PM |
206 | __u32 lock; /* seqlock for synchronization */ |
207 | __u32 index; /* hardware counter identifier */ | |
208 | __s64 offset; /* add to hardware counter value */ | |
7b732a75 | 209 | |
38ff667b PZ |
210 | /* |
211 | * Control data for the mmap() data buffer. | |
212 | * | |
213 | * User-space reading this value should issue an rmb(), on SMP capable | |
214 | * platforms, after reading this value -- see perf_counter_wakeup(). | |
215 | */ | |
8e3747c1 | 216 | __u64 data_head; /* head in the data section */ |
37d81828 PM |
217 | }; |
218 | ||
9d23a90a PM |
219 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) |
220 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | |
6b6e5486 | 221 | #define PERF_EVENT_MISC_KERNEL (1 << 0) |
9d23a90a PM |
222 | #define PERF_EVENT_MISC_USER (2 << 0) |
223 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | |
6b6e5486 | 224 | #define PERF_EVENT_MISC_OVERFLOW (1 << 2) |
6fab0192 | 225 | |
5c148194 PZ |
226 | struct perf_event_header { |
227 | __u32 type; | |
6fab0192 PZ |
228 | __u16 misc; |
229 | __u16 size; | |
5c148194 PZ |
230 | }; |
231 | ||
232 | enum perf_event_type { | |
5ed00415 | 233 | |
0c593b34 PZ |
234 | /* |
235 | * The MMAP events record the PROT_EXEC mappings so that we can | |
236 | * correlate userspace IPs to code. They have the following structure: | |
237 | * | |
238 | * struct { | |
0127c3ea | 239 | * struct perf_event_header header; |
0c593b34 | 240 | * |
0127c3ea IM |
241 | * u32 pid, tid; |
242 | * u64 addr; | |
243 | * u64 len; | |
244 | * u64 pgoff; | |
245 | * char filename[]; | |
0c593b34 PZ |
246 | * }; |
247 | */ | |
8a057d84 | 248 | PERF_EVENT_MMAP = 1, |
0a4a9391 | 249 | |
8d1b2d93 PZ |
250 | /* |
251 | * struct { | |
0127c3ea | 252 | * struct perf_event_header header; |
8d1b2d93 | 253 | * |
0127c3ea IM |
254 | * u32 pid, tid; |
255 | * char comm[]; | |
8d1b2d93 PZ |
256 | * }; |
257 | */ | |
258 | PERF_EVENT_COMM = 3, | |
259 | ||
26b119bc PZ |
260 | /* |
261 | * struct { | |
0127c3ea IM |
262 | * struct perf_event_header header; |
263 | * u64 time; | |
689802b2 | 264 | * u64 id; |
b23f3325 | 265 | * u64 sample_period; |
26b119bc PZ |
266 | * }; |
267 | */ | |
268 | PERF_EVENT_PERIOD = 4, | |
269 | ||
a78ac325 PZ |
270 | /* |
271 | * struct { | |
0127c3ea IM |
272 | * struct perf_event_header header; |
273 | * u64 time; | |
a78ac325 PZ |
274 | * }; |
275 | */ | |
276 | PERF_EVENT_THROTTLE = 5, | |
277 | PERF_EVENT_UNTHROTTLE = 6, | |
278 | ||
60313ebe PZ |
279 | /* |
280 | * struct { | |
281 | * struct perf_event_header header; | |
282 | * u32 pid, ppid; | |
283 | * }; | |
284 | */ | |
285 | PERF_EVENT_FORK = 7, | |
286 | ||
8a057d84 | 287 | /* |
6b6e5486 PZ |
288 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field |
289 | * will be PERF_RECORD_* | |
0c593b34 PZ |
290 | * |
291 | * struct { | |
0127c3ea | 292 | * struct perf_event_header header; |
0c593b34 | 293 | * |
0127c3ea IM |
294 | * { u64 ip; } && PERF_RECORD_IP |
295 | * { u32 pid, tid; } && PERF_RECORD_TID | |
296 | * { u64 time; } && PERF_RECORD_TIME | |
297 | * { u64 addr; } && PERF_RECORD_ADDR | |
298 | * { u64 config; } && PERF_RECORD_CONFIG | |
299 | * { u32 cpu, res; } && PERF_RECORD_CPU | |
0c593b34 | 300 | * |
0127c3ea | 301 | * { u64 nr; |
8e5799b1 | 302 | * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP |
0c593b34 | 303 | * |
0127c3ea IM |
304 | * { u16 nr, |
305 | * hv, | |
306 | * kernel, | |
307 | * user; | |
308 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN | |
0c593b34 | 309 | * }; |
8a057d84 | 310 | */ |
5c148194 PZ |
311 | }; |
312 | ||
f3dfd265 | 313 | #ifdef __KERNEL__ |
9f66a381 | 314 | /* |
f3dfd265 | 315 | * Kernel-internal data types and definitions: |
9f66a381 IM |
316 | */ |
317 | ||
f3dfd265 PM |
318 | #ifdef CONFIG_PERF_COUNTERS |
319 | # include <asm/perf_counter.h> | |
320 | #endif | |
321 | ||
322 | #include <linux/list.h> | |
323 | #include <linux/mutex.h> | |
324 | #include <linux/rculist.h> | |
325 | #include <linux/rcupdate.h> | |
326 | #include <linux/spinlock.h> | |
d6d020e9 | 327 | #include <linux/hrtimer.h> |
3c446b3d | 328 | #include <linux/fs.h> |
709e50cf | 329 | #include <linux/pid_namespace.h> |
f3dfd265 PM |
330 | #include <asm/atomic.h> |
331 | ||
332 | struct task_struct; | |
333 | ||
0d48696f | 334 | static inline u64 perf_event_raw(struct perf_counter_attr *attr) |
f4a2deb4 | 335 | { |
0d48696f | 336 | return attr->config & PERF_COUNTER_RAW_MASK; |
f4a2deb4 PZ |
337 | } |
338 | ||
0d48696f | 339 | static inline u64 perf_event_config(struct perf_counter_attr *attr) |
f4a2deb4 | 340 | { |
0d48696f | 341 | return attr->config & PERF_COUNTER_CONFIG_MASK; |
f4a2deb4 PZ |
342 | } |
343 | ||
0d48696f | 344 | static inline u64 perf_event_type(struct perf_counter_attr *attr) |
f4a2deb4 | 345 | { |
0d48696f | 346 | return (attr->config & PERF_COUNTER_TYPE_MASK) >> |
f4a2deb4 PZ |
347 | PERF_COUNTER_TYPE_SHIFT; |
348 | } | |
349 | ||
0d48696f | 350 | static inline u64 perf_event_id(struct perf_counter_attr *attr) |
f4a2deb4 | 351 | { |
0d48696f | 352 | return attr->config & PERF_COUNTER_EVENT_MASK; |
f4a2deb4 PZ |
353 | } |
354 | ||
0793a61d | 355 | /** |
9f66a381 | 356 | * struct hw_perf_counter - performance counter hardware details: |
0793a61d TG |
357 | */ |
358 | struct hw_perf_counter { | |
ee06094f | 359 | #ifdef CONFIG_PERF_COUNTERS |
d6d020e9 PZ |
360 | union { |
361 | struct { /* hardware */ | |
362 | u64 config; | |
363 | unsigned long config_base; | |
364 | unsigned long counter_base; | |
6f00cada | 365 | int idx; |
d6d020e9 PZ |
366 | }; |
367 | union { /* software */ | |
368 | atomic64_t count; | |
369 | struct hrtimer hrtimer; | |
370 | }; | |
371 | }; | |
ee06094f | 372 | atomic64_t prev_count; |
b23f3325 | 373 | u64 sample_period; |
ee06094f | 374 | atomic64_t period_left; |
60db5e09 | 375 | u64 interrupts; |
6a24ed6c PZ |
376 | |
377 | u64 freq_count; | |
378 | u64 freq_interrupts; | |
ee06094f | 379 | #endif |
0793a61d TG |
380 | }; |
381 | ||
621a01ea IM |
382 | struct perf_counter; |
383 | ||
384 | /** | |
4aeb0b42 | 385 | * struct pmu - generic performance monitoring unit |
621a01ea | 386 | */ |
4aeb0b42 | 387 | struct pmu { |
95cdd2e7 | 388 | int (*enable) (struct perf_counter *counter); |
7671581f IM |
389 | void (*disable) (struct perf_counter *counter); |
390 | void (*read) (struct perf_counter *counter); | |
a78ac325 | 391 | void (*unthrottle) (struct perf_counter *counter); |
621a01ea IM |
392 | }; |
393 | ||
6a930700 IM |
394 | /** |
395 | * enum perf_counter_active_state - the states of a counter | |
396 | */ | |
397 | enum perf_counter_active_state { | |
3b6f9e5c | 398 | PERF_COUNTER_STATE_ERROR = -2, |
6a930700 IM |
399 | PERF_COUNTER_STATE_OFF = -1, |
400 | PERF_COUNTER_STATE_INACTIVE = 0, | |
401 | PERF_COUNTER_STATE_ACTIVE = 1, | |
402 | }; | |
403 | ||
9b51f66d IM |
404 | struct file; |
405 | ||
7b732a75 PZ |
406 | struct perf_mmap_data { |
407 | struct rcu_head rcu_head; | |
8740f941 | 408 | int nr_pages; /* nr of data pages */ |
c5078f78 | 409 | int nr_locked; /* nr pages mlocked */ |
8740f941 | 410 | |
c33a0bc4 | 411 | atomic_t poll; /* POLL_ for wakeups */ |
8740f941 PZ |
412 | atomic_t events; /* event limit */ |
413 | ||
8e3747c1 PZ |
414 | atomic_long_t head; /* write position */ |
415 | atomic_long_t done_head; /* completed head */ | |
416 | ||
c33a0bc4 PZ |
417 | atomic_t lock; /* concurrent writes */ |
418 | ||
c66de4a5 PZ |
419 | atomic_t wakeup; /* needs a wakeup */ |
420 | ||
7b732a75 | 421 | struct perf_counter_mmap_page *user_page; |
0127c3ea | 422 | void *data_pages[0]; |
7b732a75 PZ |
423 | }; |
424 | ||
671dec5d PZ |
425 | struct perf_pending_entry { |
426 | struct perf_pending_entry *next; | |
427 | void (*func)(struct perf_pending_entry *); | |
925d519a PZ |
428 | }; |
429 | ||
0793a61d TG |
430 | /** |
431 | * struct perf_counter - performance counter kernel representation: | |
432 | */ | |
433 | struct perf_counter { | |
ee06094f | 434 | #ifdef CONFIG_PERF_COUNTERS |
04289bb9 | 435 | struct list_head list_entry; |
592903cd | 436 | struct list_head event_entry; |
04289bb9 | 437 | struct list_head sibling_list; |
0127c3ea | 438 | int nr_siblings; |
04289bb9 | 439 | struct perf_counter *group_leader; |
4aeb0b42 | 440 | const struct pmu *pmu; |
04289bb9 | 441 | |
6a930700 | 442 | enum perf_counter_active_state state; |
0793a61d | 443 | atomic64_t count; |
ee06094f | 444 | |
53cfbf59 PM |
445 | /* |
446 | * These are the total time in nanoseconds that the counter | |
447 | * has been enabled (i.e. eligible to run, and the task has | |
448 | * been scheduled in, if this is a per-task counter) | |
449 | * and running (scheduled onto the CPU), respectively. | |
450 | * | |
451 | * They are computed from tstamp_enabled, tstamp_running and | |
452 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | |
453 | */ | |
454 | u64 total_time_enabled; | |
455 | u64 total_time_running; | |
456 | ||
457 | /* | |
458 | * These are timestamps used for computing total_time_enabled | |
459 | * and total_time_running when the counter is in INACTIVE or | |
460 | * ACTIVE state, measured in nanoseconds from an arbitrary point | |
461 | * in time. | |
462 | * tstamp_enabled: the notional time when the counter was enabled | |
463 | * tstamp_running: the notional time when the counter was scheduled on | |
464 | * tstamp_stopped: in INACTIVE state, the notional time when the | |
465 | * counter was scheduled off. | |
466 | */ | |
467 | u64 tstamp_enabled; | |
468 | u64 tstamp_running; | |
469 | u64 tstamp_stopped; | |
470 | ||
0d48696f | 471 | struct perf_counter_attr attr; |
0793a61d TG |
472 | struct hw_perf_counter hw; |
473 | ||
474 | struct perf_counter_context *ctx; | |
9b51f66d | 475 | struct file *filp; |
0793a61d | 476 | |
53cfbf59 PM |
477 | /* |
478 | * These accumulate total time (in nanoseconds) that children | |
479 | * counters have been enabled and running, respectively. | |
480 | */ | |
481 | atomic64_t child_total_time_enabled; | |
482 | atomic64_t child_total_time_running; | |
483 | ||
0793a61d | 484 | /* |
d859e29f | 485 | * Protect attach/detach and child_list: |
0793a61d | 486 | */ |
fccc714b PZ |
487 | struct mutex child_mutex; |
488 | struct list_head child_list; | |
489 | struct perf_counter *parent; | |
0793a61d TG |
490 | |
491 | int oncpu; | |
492 | int cpu; | |
493 | ||
082ff5a2 PZ |
494 | struct list_head owner_entry; |
495 | struct task_struct *owner; | |
496 | ||
7b732a75 PZ |
497 | /* mmap bits */ |
498 | struct mutex mmap_mutex; | |
499 | atomic_t mmap_count; | |
500 | struct perf_mmap_data *data; | |
37d81828 | 501 | |
7b732a75 | 502 | /* poll related */ |
0793a61d | 503 | wait_queue_head_t waitq; |
3c446b3d | 504 | struct fasync_struct *fasync; |
79f14641 PZ |
505 | |
506 | /* delayed work for NMIs and such */ | |
507 | int pending_wakeup; | |
4c9e2542 | 508 | int pending_kill; |
79f14641 | 509 | int pending_disable; |
671dec5d | 510 | struct perf_pending_entry pending; |
592903cd | 511 | |
79f14641 PZ |
512 | atomic_t event_limit; |
513 | ||
e077df4f | 514 | void (*destroy)(struct perf_counter *); |
592903cd | 515 | struct rcu_head rcu_head; |
709e50cf PZ |
516 | |
517 | struct pid_namespace *ns; | |
8e5799b1 | 518 | u64 id; |
ee06094f | 519 | #endif |
0793a61d TG |
520 | }; |
521 | ||
522 | /** | |
523 | * struct perf_counter_context - counter context structure | |
524 | * | |
525 | * Used as a container for task counters and CPU counters as well: | |
526 | */ | |
527 | struct perf_counter_context { | |
0793a61d | 528 | /* |
d859e29f PM |
529 | * Protect the states of the counters in the list, |
530 | * nr_active, and the list: | |
0793a61d TG |
531 | */ |
532 | spinlock_t lock; | |
d859e29f PM |
533 | /* |
534 | * Protect the list of counters. Locking either mutex or lock | |
535 | * is sufficient to ensure the list doesn't change; to change | |
536 | * the list you need to lock both the mutex and the spinlock. | |
537 | */ | |
538 | struct mutex mutex; | |
04289bb9 IM |
539 | |
540 | struct list_head counter_list; | |
592903cd | 541 | struct list_head event_list; |
0793a61d TG |
542 | int nr_counters; |
543 | int nr_active; | |
d859e29f | 544 | int is_active; |
a63eaf34 | 545 | atomic_t refcount; |
0793a61d | 546 | struct task_struct *task; |
53cfbf59 PM |
547 | |
548 | /* | |
4af4998b | 549 | * Context clock, runs when context enabled. |
53cfbf59 | 550 | */ |
4af4998b PZ |
551 | u64 time; |
552 | u64 timestamp; | |
564c2b21 PM |
553 | |
554 | /* | |
555 | * These fields let us detect when two contexts have both | |
556 | * been cloned (inherited) from a common ancestor. | |
557 | */ | |
558 | struct perf_counter_context *parent_ctx; | |
c93f7669 PM |
559 | u64 parent_gen; |
560 | u64 generation; | |
25346b93 | 561 | int pin_count; |
c93f7669 | 562 | struct rcu_head rcu_head; |
0793a61d TG |
563 | }; |
564 | ||
565 | /** | |
566 | * struct perf_counter_cpu_context - per cpu counter context structure | |
567 | */ | |
568 | struct perf_cpu_context { | |
569 | struct perf_counter_context ctx; | |
570 | struct perf_counter_context *task_ctx; | |
571 | int active_oncpu; | |
572 | int max_pertask; | |
3b6f9e5c | 573 | int exclusive; |
96f6d444 PZ |
574 | |
575 | /* | |
576 | * Recursion avoidance: | |
577 | * | |
578 | * task, softirq, irq, nmi context | |
579 | */ | |
22a4f650 | 580 | int recursion[4]; |
0793a61d TG |
581 | }; |
582 | ||
829b42dd RR |
583 | #ifdef CONFIG_PERF_COUNTERS |
584 | ||
0793a61d TG |
585 | /* |
586 | * Set by architecture code: | |
587 | */ | |
588 | extern int perf_max_counters; | |
589 | ||
4aeb0b42 | 590 | extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); |
621a01ea | 591 | |
0793a61d | 592 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
564c2b21 PM |
593 | extern void perf_counter_task_sched_out(struct task_struct *task, |
594 | struct task_struct *next, int cpu); | |
0793a61d | 595 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); |
6ab423e0 | 596 | extern int perf_counter_init_task(struct task_struct *child); |
9b51f66d | 597 | extern void perf_counter_exit_task(struct task_struct *child); |
bbbee908 | 598 | extern void perf_counter_free_task(struct task_struct *task); |
925d519a | 599 | extern void perf_counter_do_pending(void); |
0793a61d | 600 | extern void perf_counter_print_debug(void); |
9e35ad38 PZ |
601 | extern void __perf_disable(void); |
602 | extern bool __perf_enable(void); | |
603 | extern void perf_disable(void); | |
604 | extern void perf_enable(void); | |
1d1c7ddb IM |
605 | extern int perf_counter_task_disable(void); |
606 | extern int perf_counter_task_enable(void); | |
3cbed429 PM |
607 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
608 | struct perf_cpu_context *cpuctx, | |
609 | struct perf_counter_context *ctx, int cpu); | |
37d81828 | 610 | extern void perf_counter_update_userpage(struct perf_counter *counter); |
5c92d124 | 611 | |
f6c7d5fe | 612 | extern int perf_counter_overflow(struct perf_counter *counter, |
78f13e95 | 613 | int nmi, struct pt_regs *regs, u64 addr); |
3b6f9e5c PM |
614 | /* |
615 | * Return 1 for a software counter, 0 for a hardware counter | |
616 | */ | |
617 | static inline int is_software_counter(struct perf_counter *counter) | |
618 | { | |
0d48696f PZ |
619 | return !perf_event_raw(&counter->attr) && |
620 | perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE; | |
3b6f9e5c PM |
621 | } |
622 | ||
78f13e95 | 623 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); |
15dbf27c | 624 | |
089dd79d PZ |
625 | extern void __perf_counter_mmap(struct vm_area_struct *vma); |
626 | ||
627 | static inline void perf_counter_mmap(struct vm_area_struct *vma) | |
628 | { | |
629 | if (vma->vm_flags & VM_EXEC) | |
630 | __perf_counter_mmap(vma); | |
631 | } | |
0a4a9391 | 632 | |
8d1b2d93 | 633 | extern void perf_counter_comm(struct task_struct *tsk); |
60313ebe | 634 | extern void perf_counter_fork(struct task_struct *tsk); |
8d1b2d93 | 635 | |
3f731ca6 PM |
636 | extern void perf_counter_task_migration(struct task_struct *task, int cpu); |
637 | ||
9c03d88e | 638 | #define MAX_STACK_DEPTH 255 |
394ee076 PZ |
639 | |
640 | struct perf_callchain_entry { | |
9c03d88e | 641 | u16 nr, hv, kernel, user; |
394ee076 PZ |
642 | u64 ip[MAX_STACK_DEPTH]; |
643 | }; | |
644 | ||
645 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | |
646 | ||
1ccd1549 | 647 | extern int sysctl_perf_counter_priv; |
c5078f78 | 648 | extern int sysctl_perf_counter_mlock; |
a78ac325 | 649 | extern int sysctl_perf_counter_limit; |
1ccd1549 | 650 | |
0d905bca IM |
651 | extern void perf_counter_init(void); |
652 | ||
9d23a90a PM |
653 | #ifndef perf_misc_flags |
654 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | |
655 | PERF_EVENT_MISC_KERNEL) | |
656 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | |
657 | #endif | |
658 | ||
0793a61d TG |
659 | #else |
660 | static inline void | |
661 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | |
662 | static inline void | |
910431c7 IM |
663 | perf_counter_task_sched_out(struct task_struct *task, |
664 | struct task_struct *next, int cpu) { } | |
0793a61d TG |
665 | static inline void |
666 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | |
d3e78ee3 | 667 | static inline int perf_counter_init_task(struct task_struct *child) { return 0; } |
9b51f66d | 668 | static inline void perf_counter_exit_task(struct task_struct *child) { } |
bbbee908 | 669 | static inline void perf_counter_free_task(struct task_struct *task) { } |
925d519a | 670 | static inline void perf_counter_do_pending(void) { } |
0793a61d | 671 | static inline void perf_counter_print_debug(void) { } |
9e35ad38 PZ |
672 | static inline void perf_disable(void) { } |
673 | static inline void perf_enable(void) { } | |
1d1c7ddb IM |
674 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
675 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | |
15dbf27c | 676 | |
925d519a | 677 | static inline void |
78f13e95 PZ |
678 | perf_swcounter_event(u32 event, u64 nr, int nmi, |
679 | struct pt_regs *regs, u64 addr) { } | |
0a4a9391 | 680 | |
089dd79d | 681 | static inline void perf_counter_mmap(struct vm_area_struct *vma) { } |
8d1b2d93 | 682 | static inline void perf_counter_comm(struct task_struct *tsk) { } |
60313ebe | 683 | static inline void perf_counter_fork(struct task_struct *tsk) { } |
0d905bca | 684 | static inline void perf_counter_init(void) { } |
3f731ca6 PM |
685 | static inline void perf_counter_task_migration(struct task_struct *task, |
686 | int cpu) { } | |
0793a61d TG |
687 | #endif |
688 | ||
f3dfd265 | 689 | #endif /* __KERNEL__ */ |
0793a61d | 690 | #endif /* _LINUX_PERF_COUNTER_H */ |