]>
Commit | Line | Data |
---|---|---|
0793a61d TG |
1 | /* |
2 | * Performance counters: | |
3 | * | |
4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * Data type definitions, declarations, prototypes. | |
8 | * | |
9 | * Started by: Thomas Gleixner and Ingo Molnar | |
10 | * | |
11 | * For licencing details see kernel-base/COPYING | |
12 | */ | |
13 | #ifndef _LINUX_PERF_COUNTER_H | |
14 | #define _LINUX_PERF_COUNTER_H | |
15 | ||
f3dfd265 PM |
16 | #include <linux/types.h> |
17 | #include <linux/ioctl.h> | |
9aaa131a | 18 | #include <asm/byteorder.h> |
0793a61d TG |
19 | |
20 | /* | |
9f66a381 IM |
21 | * User-space ABI bits: |
22 | */ | |
23 | ||
24 | /* | |
b8e83514 | 25 | * hw_event.type |
0793a61d | 26 | */ |
b8e83514 PZ |
27 | enum perf_event_types { |
28 | PERF_TYPE_HARDWARE = 0, | |
29 | PERF_TYPE_SOFTWARE = 1, | |
30 | PERF_TYPE_TRACEPOINT = 2, | |
31 | ||
0793a61d | 32 | /* |
b8e83514 | 33 | * available TYPE space, raw is the max value. |
0793a61d | 34 | */ |
9f66a381 | 35 | |
b8e83514 PZ |
36 | PERF_TYPE_RAW = 128, |
37 | }; | |
6c594c21 | 38 | |
b8e83514 PZ |
39 | /* |
40 | * Generalized performance counter event types, used by the hw_event.event_id | |
41 | * parameter of the sys_perf_counter_open() syscall: | |
42 | */ | |
43 | enum hw_event_ids { | |
9f66a381 | 44 | /* |
b8e83514 | 45 | * Common hardware events, generalized by the kernel: |
9f66a381 | 46 | */ |
b8e83514 PZ |
47 | PERF_COUNT_CPU_CYCLES = 0, |
48 | PERF_COUNT_INSTRUCTIONS = 1, | |
49 | PERF_COUNT_CACHE_REFERENCES = 2, | |
50 | PERF_COUNT_CACHE_MISSES = 3, | |
51 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, | |
52 | PERF_COUNT_BRANCH_MISSES = 5, | |
53 | PERF_COUNT_BUS_CYCLES = 6, | |
54 | ||
55 | PERF_HW_EVENTS_MAX = 7, | |
56 | }; | |
e077df4f | 57 | |
b8e83514 PZ |
58 | /* |
59 | * Special "software" counters provided by the kernel, even if the hardware | |
60 | * does not support performance counters. These counters measure various | |
61 | * physical and sw events of the kernel (and allow the profiling of them as | |
62 | * well): | |
63 | */ | |
64 | enum sw_event_ids { | |
65 | PERF_COUNT_CPU_CLOCK = 0, | |
66 | PERF_COUNT_TASK_CLOCK = 1, | |
67 | PERF_COUNT_PAGE_FAULTS = 2, | |
68 | PERF_COUNT_CONTEXT_SWITCHES = 3, | |
69 | PERF_COUNT_CPU_MIGRATIONS = 4, | |
70 | PERF_COUNT_PAGE_FAULTS_MIN = 5, | |
71 | PERF_COUNT_PAGE_FAULTS_MAJ = 6, | |
72 | ||
73 | PERF_SW_EVENTS_MAX = 7, | |
0793a61d TG |
74 | }; |
75 | ||
f4a2deb4 PZ |
76 | #define __PERF_COUNTER_MASK(name) \ |
77 | (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \ | |
78 | PERF_COUNTER_##name##_SHIFT) | |
79 | ||
80 | #define PERF_COUNTER_RAW_BITS 1 | |
81 | #define PERF_COUNTER_RAW_SHIFT 63 | |
82 | #define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW) | |
83 | ||
84 | #define PERF_COUNTER_CONFIG_BITS 63 | |
85 | #define PERF_COUNTER_CONFIG_SHIFT 0 | |
86 | #define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG) | |
87 | ||
88 | #define PERF_COUNTER_TYPE_BITS 7 | |
89 | #define PERF_COUNTER_TYPE_SHIFT 56 | |
90 | #define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE) | |
91 | ||
92 | #define PERF_COUNTER_EVENT_BITS 56 | |
93 | #define PERF_COUNTER_EVENT_SHIFT 0 | |
94 | #define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT) | |
95 | ||
8a057d84 PZ |
96 | /* |
97 | * Bits that can be set in hw_event.record_type to request information | |
98 | * in the overflow packets. | |
99 | */ | |
100 | enum perf_counter_record_format { | |
101 | PERF_RECORD_IP = 1U << 0, | |
102 | PERF_RECORD_TID = 1U << 1, | |
103 | PERF_RECORD_GROUP = 1U << 2, | |
104 | PERF_RECORD_CALLCHAIN = 1U << 3, | |
339f7c90 | 105 | PERF_RECORD_TIME = 1U << 4, |
8a057d84 PZ |
106 | }; |
107 | ||
53cfbf59 PM |
108 | /* |
109 | * Bits that can be set in hw_event.read_format to request that | |
110 | * reads on the counter should return the indicated quantities, | |
111 | * in increasing order of bit value, after the counter value. | |
112 | */ | |
113 | enum perf_counter_read_format { | |
114 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1, | |
115 | PERF_FORMAT_TOTAL_TIME_RUNNING = 2, | |
116 | }; | |
117 | ||
9f66a381 IM |
118 | /* |
119 | * Hardware event to monitor via a performance monitoring counter: | |
120 | */ | |
121 | struct perf_counter_hw_event { | |
f4a2deb4 PZ |
122 | /* |
123 | * The MSB of the config word signifies if the rest contains cpu | |
124 | * specific (raw) counter configuration data, if unset, the next | |
125 | * 7 bits are an event type and the rest of the bits are the event | |
126 | * identifier. | |
127 | */ | |
128 | __u64 config; | |
9f66a381 | 129 | |
f3dfd265 | 130 | __u64 irq_period; |
8a057d84 PZ |
131 | __u32 record_type; |
132 | __u32 read_format; | |
9f66a381 | 133 | |
2743a5b0 | 134 | __u64 disabled : 1, /* off by default */ |
0475f9ea | 135 | nmi : 1, /* NMI sampling */ |
0475f9ea PM |
136 | inherit : 1, /* children inherit it */ |
137 | pinned : 1, /* must always be on PMU */ | |
138 | exclusive : 1, /* only group on PMU */ | |
139 | exclude_user : 1, /* don't count user */ | |
140 | exclude_kernel : 1, /* ditto kernel */ | |
141 | exclude_hv : 1, /* ditto hypervisor */ | |
2743a5b0 | 142 | exclude_idle : 1, /* don't count when idle */ |
0a4a9391 PZ |
143 | mmap : 1, /* include mmap data */ |
144 | munmap : 1, /* include munmap data */ | |
0475f9ea | 145 | |
8a057d84 | 146 | __reserved_1 : 53; |
2743a5b0 PM |
147 | |
148 | __u32 extra_config_len; | |
c457810a | 149 | __u32 wakeup_events; /* wakeup every n events */ |
9f66a381 | 150 | |
f3dfd265 | 151 | __u64 __reserved_2; |
2743a5b0 | 152 | __u64 __reserved_3; |
eab656ae TG |
153 | }; |
154 | ||
d859e29f PM |
155 | /* |
156 | * Ioctls that can be done on a perf counter fd: | |
157 | */ | |
79f14641 PZ |
158 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) |
159 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | |
160 | #define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32) | |
d859e29f | 161 | |
37d81828 PM |
162 | /* |
163 | * Structure of the page that can be mapped via mmap | |
164 | */ | |
165 | struct perf_counter_mmap_page { | |
166 | __u32 version; /* version number of this structure */ | |
167 | __u32 compat_version; /* lowest version this is compat with */ | |
38ff667b PZ |
168 | |
169 | /* | |
170 | * Bits needed to read the hw counters in user-space. | |
171 | * | |
92f22a38 PZ |
172 | * u32 seq; |
173 | * s64 count; | |
38ff667b | 174 | * |
a2e87d06 PZ |
175 | * do { |
176 | * seq = pc->lock; | |
38ff667b | 177 | * |
a2e87d06 PZ |
178 | * barrier() |
179 | * if (pc->index) { | |
180 | * count = pmc_read(pc->index - 1); | |
181 | * count += pc->offset; | |
182 | * } else | |
183 | * goto regular_read; | |
38ff667b | 184 | * |
a2e87d06 PZ |
185 | * barrier(); |
186 | * } while (pc->lock != seq); | |
38ff667b | 187 | * |
92f22a38 PZ |
188 | * NOTE: for obvious reason this only works on self-monitoring |
189 | * processes. | |
38ff667b | 190 | */ |
37d81828 PM |
191 | __u32 lock; /* seqlock for synchronization */ |
192 | __u32 index; /* hardware counter identifier */ | |
193 | __s64 offset; /* add to hardware counter value */ | |
7b732a75 | 194 | |
38ff667b PZ |
195 | /* |
196 | * Control data for the mmap() data buffer. | |
197 | * | |
198 | * User-space reading this value should issue an rmb(), on SMP capable | |
199 | * platforms, after reading this value -- see perf_counter_wakeup(). | |
200 | */ | |
7b732a75 | 201 | __u32 data_head; /* head in the data section */ |
37d81828 PM |
202 | }; |
203 | ||
5c148194 PZ |
204 | struct perf_event_header { |
205 | __u32 type; | |
206 | __u32 size; | |
207 | }; | |
208 | ||
209 | enum perf_event_type { | |
5ed00415 | 210 | |
0c593b34 PZ |
211 | /* |
212 | * The MMAP events record the PROT_EXEC mappings so that we can | |
213 | * correlate userspace IPs to code. They have the following structure: | |
214 | * | |
215 | * struct { | |
216 | * struct perf_event_header header; | |
217 | * | |
218 | * u32 pid, tid; | |
219 | * u64 addr; | |
220 | * u64 len; | |
221 | * u64 pgoff; | |
222 | * char filename[]; | |
223 | * }; | |
224 | */ | |
8a057d84 PZ |
225 | PERF_EVENT_MMAP = 1, |
226 | PERF_EVENT_MUNMAP = 2, | |
0a4a9391 | 227 | |
8a057d84 PZ |
228 | /* |
229 | * Half the event type space is reserved for the counter overflow | |
230 | * bitfields, as found in hw_event.record_type. | |
231 | * | |
232 | * These events will have types of the form: | |
233 | * PERF_EVENT_COUNTER_OVERFLOW { | __PERF_EVENT_* } * | |
0c593b34 PZ |
234 | * |
235 | * struct { | |
236 | * struct perf_event_header header; | |
237 | * | |
238 | * { u64 ip; } && __PERF_EVENT_IP | |
239 | * { u32 pid, tid; } && __PERF_EVENT_TID | |
240 | * | |
241 | * { u64 nr; | |
242 | * { u64 event, val; } cnt[nr]; } && __PERF_EVENT_GROUP | |
243 | * | |
244 | * { u16 nr, | |
245 | * hv, | |
246 | * kernel, | |
247 | * user; | |
248 | * u64 ips[nr]; } && __PERF_EVENT_CALLCHAIN | |
249 | * | |
250 | * { u64 time; } && __PERF_EVENT_TIME | |
251 | * }; | |
8a057d84 PZ |
252 | */ |
253 | PERF_EVENT_COUNTER_OVERFLOW = 1UL << 31, | |
254 | __PERF_EVENT_IP = PERF_RECORD_IP, | |
255 | __PERF_EVENT_TID = PERF_RECORD_TID, | |
256 | __PERF_EVENT_GROUP = PERF_RECORD_GROUP, | |
257 | __PERF_EVENT_CALLCHAIN = PERF_RECORD_CALLCHAIN, | |
339f7c90 | 258 | __PERF_EVENT_TIME = PERF_RECORD_TIME, |
5c148194 PZ |
259 | }; |
260 | ||
f3dfd265 | 261 | #ifdef __KERNEL__ |
9f66a381 | 262 | /* |
f3dfd265 | 263 | * Kernel-internal data types and definitions: |
9f66a381 IM |
264 | */ |
265 | ||
f3dfd265 PM |
266 | #ifdef CONFIG_PERF_COUNTERS |
267 | # include <asm/perf_counter.h> | |
268 | #endif | |
269 | ||
270 | #include <linux/list.h> | |
271 | #include <linux/mutex.h> | |
272 | #include <linux/rculist.h> | |
273 | #include <linux/rcupdate.h> | |
274 | #include <linux/spinlock.h> | |
d6d020e9 | 275 | #include <linux/hrtimer.h> |
3c446b3d | 276 | #include <linux/fs.h> |
f3dfd265 PM |
277 | #include <asm/atomic.h> |
278 | ||
279 | struct task_struct; | |
280 | ||
f4a2deb4 PZ |
281 | static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event) |
282 | { | |
283 | return hw_event->config & PERF_COUNTER_RAW_MASK; | |
284 | } | |
285 | ||
286 | static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event) | |
287 | { | |
288 | return hw_event->config & PERF_COUNTER_CONFIG_MASK; | |
289 | } | |
290 | ||
291 | static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event) | |
292 | { | |
293 | return (hw_event->config & PERF_COUNTER_TYPE_MASK) >> | |
294 | PERF_COUNTER_TYPE_SHIFT; | |
295 | } | |
296 | ||
297 | static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event) | |
298 | { | |
299 | return hw_event->config & PERF_COUNTER_EVENT_MASK; | |
300 | } | |
301 | ||
0793a61d | 302 | /** |
9f66a381 | 303 | * struct hw_perf_counter - performance counter hardware details: |
0793a61d TG |
304 | */ |
305 | struct hw_perf_counter { | |
ee06094f | 306 | #ifdef CONFIG_PERF_COUNTERS |
d6d020e9 PZ |
307 | union { |
308 | struct { /* hardware */ | |
309 | u64 config; | |
310 | unsigned long config_base; | |
311 | unsigned long counter_base; | |
312 | int nmi; | |
313 | unsigned int idx; | |
314 | }; | |
315 | union { /* software */ | |
316 | atomic64_t count; | |
317 | struct hrtimer hrtimer; | |
318 | }; | |
319 | }; | |
ee06094f | 320 | atomic64_t prev_count; |
9f66a381 | 321 | u64 irq_period; |
ee06094f IM |
322 | atomic64_t period_left; |
323 | #endif | |
0793a61d TG |
324 | }; |
325 | ||
621a01ea IM |
326 | struct perf_counter; |
327 | ||
328 | /** | |
329 | * struct hw_perf_counter_ops - performance counter hw ops | |
330 | */ | |
331 | struct hw_perf_counter_ops { | |
95cdd2e7 | 332 | int (*enable) (struct perf_counter *counter); |
7671581f IM |
333 | void (*disable) (struct perf_counter *counter); |
334 | void (*read) (struct perf_counter *counter); | |
621a01ea IM |
335 | }; |
336 | ||
6a930700 IM |
337 | /** |
338 | * enum perf_counter_active_state - the states of a counter | |
339 | */ | |
340 | enum perf_counter_active_state { | |
3b6f9e5c | 341 | PERF_COUNTER_STATE_ERROR = -2, |
6a930700 IM |
342 | PERF_COUNTER_STATE_OFF = -1, |
343 | PERF_COUNTER_STATE_INACTIVE = 0, | |
344 | PERF_COUNTER_STATE_ACTIVE = 1, | |
345 | }; | |
346 | ||
9b51f66d IM |
347 | struct file; |
348 | ||
7b732a75 PZ |
349 | struct perf_mmap_data { |
350 | struct rcu_head rcu_head; | |
351 | int nr_pages; | |
c7138f37 | 352 | atomic_t wakeup; |
7b732a75 | 353 | atomic_t head; |
c457810a | 354 | atomic_t events; |
7b732a75 PZ |
355 | struct perf_counter_mmap_page *user_page; |
356 | void *data_pages[0]; | |
357 | }; | |
358 | ||
671dec5d PZ |
359 | struct perf_pending_entry { |
360 | struct perf_pending_entry *next; | |
361 | void (*func)(struct perf_pending_entry *); | |
925d519a PZ |
362 | }; |
363 | ||
0793a61d TG |
364 | /** |
365 | * struct perf_counter - performance counter kernel representation: | |
366 | */ | |
367 | struct perf_counter { | |
ee06094f | 368 | #ifdef CONFIG_PERF_COUNTERS |
04289bb9 | 369 | struct list_head list_entry; |
592903cd | 370 | struct list_head event_entry; |
04289bb9 | 371 | struct list_head sibling_list; |
5c148194 | 372 | int nr_siblings; |
04289bb9 | 373 | struct perf_counter *group_leader; |
5c92d124 | 374 | const struct hw_perf_counter_ops *hw_ops; |
04289bb9 | 375 | |
6a930700 | 376 | enum perf_counter_active_state state; |
c07c99b6 | 377 | enum perf_counter_active_state prev_state; |
0793a61d | 378 | atomic64_t count; |
ee06094f | 379 | |
53cfbf59 PM |
380 | /* |
381 | * These are the total time in nanoseconds that the counter | |
382 | * has been enabled (i.e. eligible to run, and the task has | |
383 | * been scheduled in, if this is a per-task counter) | |
384 | * and running (scheduled onto the CPU), respectively. | |
385 | * | |
386 | * They are computed from tstamp_enabled, tstamp_running and | |
387 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | |
388 | */ | |
389 | u64 total_time_enabled; | |
390 | u64 total_time_running; | |
391 | ||
392 | /* | |
393 | * These are timestamps used for computing total_time_enabled | |
394 | * and total_time_running when the counter is in INACTIVE or | |
395 | * ACTIVE state, measured in nanoseconds from an arbitrary point | |
396 | * in time. | |
397 | * tstamp_enabled: the notional time when the counter was enabled | |
398 | * tstamp_running: the notional time when the counter was scheduled on | |
399 | * tstamp_stopped: in INACTIVE state, the notional time when the | |
400 | * counter was scheduled off. | |
401 | */ | |
402 | u64 tstamp_enabled; | |
403 | u64 tstamp_running; | |
404 | u64 tstamp_stopped; | |
405 | ||
9f66a381 | 406 | struct perf_counter_hw_event hw_event; |
0793a61d TG |
407 | struct hw_perf_counter hw; |
408 | ||
409 | struct perf_counter_context *ctx; | |
410 | struct task_struct *task; | |
9b51f66d | 411 | struct file *filp; |
0793a61d | 412 | |
9b51f66d | 413 | struct perf_counter *parent; |
d859e29f PM |
414 | struct list_head child_list; |
415 | ||
53cfbf59 PM |
416 | /* |
417 | * These accumulate total time (in nanoseconds) that children | |
418 | * counters have been enabled and running, respectively. | |
419 | */ | |
420 | atomic64_t child_total_time_enabled; | |
421 | atomic64_t child_total_time_running; | |
422 | ||
0793a61d | 423 | /* |
d859e29f | 424 | * Protect attach/detach and child_list: |
0793a61d TG |
425 | */ |
426 | struct mutex mutex; | |
427 | ||
428 | int oncpu; | |
429 | int cpu; | |
430 | ||
7b732a75 PZ |
431 | /* mmap bits */ |
432 | struct mutex mmap_mutex; | |
433 | atomic_t mmap_count; | |
434 | struct perf_mmap_data *data; | |
37d81828 | 435 | |
7b732a75 | 436 | /* poll related */ |
0793a61d | 437 | wait_queue_head_t waitq; |
3c446b3d | 438 | struct fasync_struct *fasync; |
79f14641 PZ |
439 | |
440 | /* delayed work for NMIs and such */ | |
441 | int pending_wakeup; | |
4c9e2542 | 442 | int pending_kill; |
79f14641 | 443 | int pending_disable; |
671dec5d | 444 | struct perf_pending_entry pending; |
592903cd | 445 | |
79f14641 PZ |
446 | atomic_t event_limit; |
447 | ||
e077df4f | 448 | void (*destroy)(struct perf_counter *); |
592903cd | 449 | struct rcu_head rcu_head; |
ee06094f | 450 | #endif |
0793a61d TG |
451 | }; |
452 | ||
453 | /** | |
454 | * struct perf_counter_context - counter context structure | |
455 | * | |
456 | * Used as a container for task counters and CPU counters as well: | |
457 | */ | |
458 | struct perf_counter_context { | |
459 | #ifdef CONFIG_PERF_COUNTERS | |
460 | /* | |
d859e29f PM |
461 | * Protect the states of the counters in the list, |
462 | * nr_active, and the list: | |
0793a61d TG |
463 | */ |
464 | spinlock_t lock; | |
d859e29f PM |
465 | /* |
466 | * Protect the list of counters. Locking either mutex or lock | |
467 | * is sufficient to ensure the list doesn't change; to change | |
468 | * the list you need to lock both the mutex and the spinlock. | |
469 | */ | |
470 | struct mutex mutex; | |
04289bb9 IM |
471 | |
472 | struct list_head counter_list; | |
592903cd | 473 | struct list_head event_list; |
0793a61d TG |
474 | int nr_counters; |
475 | int nr_active; | |
d859e29f | 476 | int is_active; |
0793a61d | 477 | struct task_struct *task; |
53cfbf59 PM |
478 | |
479 | /* | |
4af4998b | 480 | * Context clock, runs when context enabled. |
53cfbf59 | 481 | */ |
4af4998b PZ |
482 | u64 time; |
483 | u64 timestamp; | |
0793a61d TG |
484 | #endif |
485 | }; | |
486 | ||
487 | /** | |
488 | * struct perf_counter_cpu_context - per cpu counter context structure | |
489 | */ | |
490 | struct perf_cpu_context { | |
491 | struct perf_counter_context ctx; | |
492 | struct perf_counter_context *task_ctx; | |
493 | int active_oncpu; | |
494 | int max_pertask; | |
3b6f9e5c | 495 | int exclusive; |
96f6d444 PZ |
496 | |
497 | /* | |
498 | * Recursion avoidance: | |
499 | * | |
500 | * task, softirq, irq, nmi context | |
501 | */ | |
502 | int recursion[4]; | |
0793a61d TG |
503 | }; |
504 | ||
505 | /* | |
506 | * Set by architecture code: | |
507 | */ | |
508 | extern int perf_max_counters; | |
509 | ||
510 | #ifdef CONFIG_PERF_COUNTERS | |
5c92d124 | 511 | extern const struct hw_perf_counter_ops * |
621a01ea IM |
512 | hw_perf_counter_init(struct perf_counter *counter); |
513 | ||
0793a61d TG |
514 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
515 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); | |
516 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |
9b51f66d IM |
517 | extern void perf_counter_init_task(struct task_struct *child); |
518 | extern void perf_counter_exit_task(struct task_struct *child); | |
925d519a | 519 | extern void perf_counter_do_pending(void); |
0793a61d | 520 | extern void perf_counter_print_debug(void); |
1b023a96 | 521 | extern void perf_counter_unthrottle(void); |
01b2838c IM |
522 | extern u64 hw_perf_save_disable(void); |
523 | extern void hw_perf_restore(u64 ctrl); | |
1d1c7ddb IM |
524 | extern int perf_counter_task_disable(void); |
525 | extern int perf_counter_task_enable(void); | |
3cbed429 PM |
526 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
527 | struct perf_cpu_context *cpuctx, | |
528 | struct perf_counter_context *ctx, int cpu); | |
37d81828 | 529 | extern void perf_counter_update_userpage(struct perf_counter *counter); |
5c92d124 | 530 | |
f6c7d5fe PZ |
531 | extern int perf_counter_overflow(struct perf_counter *counter, |
532 | int nmi, struct pt_regs *regs); | |
3b6f9e5c PM |
533 | /* |
534 | * Return 1 for a software counter, 0 for a hardware counter | |
535 | */ | |
536 | static inline int is_software_counter(struct perf_counter *counter) | |
537 | { | |
f4a2deb4 PZ |
538 | return !perf_event_raw(&counter->hw_event) && |
539 | perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE; | |
3b6f9e5c PM |
540 | } |
541 | ||
b8e83514 | 542 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *); |
15dbf27c | 543 | |
0a4a9391 PZ |
544 | extern void perf_counter_mmap(unsigned long addr, unsigned long len, |
545 | unsigned long pgoff, struct file *file); | |
546 | ||
547 | extern void perf_counter_munmap(unsigned long addr, unsigned long len, | |
548 | unsigned long pgoff, struct file *file); | |
549 | ||
9c03d88e | 550 | #define MAX_STACK_DEPTH 255 |
394ee076 PZ |
551 | |
552 | struct perf_callchain_entry { | |
9c03d88e | 553 | u16 nr, hv, kernel, user; |
394ee076 PZ |
554 | u64 ip[MAX_STACK_DEPTH]; |
555 | }; | |
556 | ||
557 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | |
558 | ||
0793a61d TG |
559 | #else |
560 | static inline void | |
561 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | |
562 | static inline void | |
563 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } | |
564 | static inline void | |
565 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | |
9b51f66d IM |
566 | static inline void perf_counter_init_task(struct task_struct *child) { } |
567 | static inline void perf_counter_exit_task(struct task_struct *child) { } | |
925d519a | 568 | static inline void perf_counter_do_pending(void) { } |
0793a61d | 569 | static inline void perf_counter_print_debug(void) { } |
1b023a96 | 570 | static inline void perf_counter_unthrottle(void) { } |
15dbf27c | 571 | static inline void hw_perf_restore(u64 ctrl) { } |
01b2838c | 572 | static inline u64 hw_perf_save_disable(void) { return 0; } |
1d1c7ddb IM |
573 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
574 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | |
15dbf27c | 575 | |
925d519a PZ |
576 | static inline void |
577 | perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs) { } | |
578 | ||
0a4a9391 PZ |
579 | |
580 | static inline void | |
581 | perf_counter_mmap(unsigned long addr, unsigned long len, | |
582 | unsigned long pgoff, struct file *file) { } | |
583 | ||
584 | static inline void | |
585 | perf_counter_munmap(unsigned long addr, unsigned long len, | |
586 | unsigned long pgoff, struct file *file) { } | |
587 | ||
0793a61d TG |
588 | #endif |
589 | ||
f3dfd265 | 590 | #endif /* __KERNEL__ */ |
0793a61d | 591 | #endif /* _LINUX_PERF_COUNTER_H */ |