]>
Commit | Line | Data |
---|---|---|
0793a61d TG |
1 | /* |
2 | * Performance counters: | |
3 | * | |
4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * Data type definitions, declarations, prototypes. | |
8 | * | |
9 | * Started by: Thomas Gleixner and Ingo Molnar | |
10 | * | |
11 | * For licencing details see kernel-base/COPYING | |
12 | */ | |
13 | #ifndef _LINUX_PERF_COUNTER_H | |
14 | #define _LINUX_PERF_COUNTER_H | |
15 | ||
f3dfd265 PM |
16 | #include <linux/types.h> |
17 | #include <linux/ioctl.h> | |
0793a61d TG |
18 | |
19 | /* | |
9f66a381 IM |
20 | * User-space ABI bits: |
21 | */ | |
22 | ||
23 | /* | |
24 | * Generalized performance counter event types, used by the hw_event.type | |
25 | * parameter of the sys_perf_counter_open() syscall: | |
0793a61d TG |
26 | */ |
27 | enum hw_event_types { | |
0793a61d | 28 | /* |
9f66a381 | 29 | * Common hardware events, generalized by the kernel: |
0793a61d | 30 | */ |
f650a672 | 31 | PERF_COUNT_CPU_CYCLES = 0, |
9f66a381 IM |
32 | PERF_COUNT_INSTRUCTIONS = 1, |
33 | PERF_COUNT_CACHE_REFERENCES = 2, | |
34 | PERF_COUNT_CACHE_MISSES = 3, | |
35 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, | |
36 | PERF_COUNT_BRANCH_MISSES = 5, | |
f650a672 | 37 | PERF_COUNT_BUS_CYCLES = 6, |
9f66a381 | 38 | |
f650a672 | 39 | PERF_HW_EVENTS_MAX = 7, |
6c594c21 | 40 | |
9f66a381 IM |
41 | /* |
42 | * Special "software" counters provided by the kernel, even if | |
43 | * the hardware does not support performance counters. These | |
44 | * counters measure various physical and sw events of the | |
45 | * kernel (and allow the profiling of them as well): | |
46 | */ | |
47 | PERF_COUNT_CPU_CLOCK = -1, | |
48 | PERF_COUNT_TASK_CLOCK = -2, | |
5d6a27d8 IM |
49 | PERF_COUNT_PAGE_FAULTS = -3, |
50 | PERF_COUNT_CONTEXT_SWITCHES = -4, | |
6c594c21 IM |
51 | PERF_COUNT_CPU_MIGRATIONS = -5, |
52 | ||
53 | PERF_SW_EVENTS_MIN = -6, | |
0793a61d TG |
54 | }; |
55 | ||
56 | /* | |
57 | * IRQ-notification data record type: | |
58 | */ | |
9f66a381 IM |
59 | enum perf_counter_record_type { |
60 | PERF_RECORD_SIMPLE = 0, | |
61 | PERF_RECORD_IRQ = 1, | |
62 | PERF_RECORD_GROUP = 2, | |
0793a61d TG |
63 | }; |
64 | ||
9f66a381 IM |
65 | /* |
66 | * Hardware event to monitor via a performance monitoring counter: | |
67 | */ | |
68 | struct perf_counter_hw_event { | |
f3dfd265 | 69 | __s64 type; |
9f66a381 | 70 | |
f3dfd265 | 71 | __u64 irq_period; |
2743a5b0 PM |
72 | __u64 record_type; |
73 | __u64 read_format; | |
9f66a381 | 74 | |
2743a5b0 | 75 | __u64 disabled : 1, /* off by default */ |
0475f9ea PM |
76 | nmi : 1, /* NMI sampling */ |
77 | raw : 1, /* raw event type */ | |
78 | inherit : 1, /* children inherit it */ | |
79 | pinned : 1, /* must always be on PMU */ | |
80 | exclusive : 1, /* only group on PMU */ | |
81 | exclude_user : 1, /* don't count user */ | |
82 | exclude_kernel : 1, /* ditto kernel */ | |
83 | exclude_hv : 1, /* ditto hypervisor */ | |
2743a5b0 | 84 | exclude_idle : 1, /* don't count when idle */ |
0475f9ea | 85 | |
2485e518 | 86 | __reserved_1 : 54; |
2743a5b0 PM |
87 | |
88 | __u32 extra_config_len; | |
89 | __u32 __reserved_4; | |
9f66a381 | 90 | |
f3dfd265 | 91 | __u64 __reserved_2; |
2743a5b0 | 92 | __u64 __reserved_3; |
eab656ae TG |
93 | }; |
94 | ||
d859e29f PM |
95 | /* |
96 | * Ioctls that can be done on a perf counter fd: | |
97 | */ | |
98 | #define PERF_COUNTER_IOC_ENABLE _IO('$', 0) | |
99 | #define PERF_COUNTER_IOC_DISABLE _IO('$', 1) | |
100 | ||
f3dfd265 | 101 | #ifdef __KERNEL__ |
9f66a381 | 102 | /* |
f3dfd265 | 103 | * Kernel-internal data types and definitions: |
9f66a381 IM |
104 | */ |
105 | ||
f3dfd265 PM |
106 | #ifdef CONFIG_PERF_COUNTERS |
107 | # include <asm/perf_counter.h> | |
108 | #endif | |
109 | ||
110 | #include <linux/list.h> | |
111 | #include <linux/mutex.h> | |
112 | #include <linux/rculist.h> | |
113 | #include <linux/rcupdate.h> | |
114 | #include <linux/spinlock.h> | |
115 | #include <asm/atomic.h> | |
116 | ||
117 | struct task_struct; | |
118 | ||
0793a61d | 119 | /** |
9f66a381 | 120 | * struct hw_perf_counter - performance counter hardware details: |
0793a61d TG |
121 | */ |
122 | struct hw_perf_counter { | |
ee06094f | 123 | #ifdef CONFIG_PERF_COUNTERS |
9f66a381 IM |
124 | u64 config; |
125 | unsigned long config_base; | |
126 | unsigned long counter_base; | |
127 | int nmi; | |
128 | unsigned int idx; | |
ee06094f | 129 | atomic64_t prev_count; |
9f66a381 | 130 | u64 irq_period; |
ee06094f IM |
131 | atomic64_t period_left; |
132 | #endif | |
0793a61d TG |
133 | }; |
134 | ||
135 | /* | |
136 | * Hardcoded buffer length limit for now, for IRQ-fed events: | |
137 | */ | |
9f66a381 | 138 | #define PERF_DATA_BUFLEN 2048 |
0793a61d TG |
139 | |
140 | /** | |
141 | * struct perf_data - performance counter IRQ data sampling ... | |
142 | */ | |
143 | struct perf_data { | |
9f66a381 IM |
144 | int len; |
145 | int rd_idx; | |
146 | int overrun; | |
147 | u8 data[PERF_DATA_BUFLEN]; | |
0793a61d TG |
148 | }; |
149 | ||
621a01ea IM |
150 | struct perf_counter; |
151 | ||
152 | /** | |
153 | * struct hw_perf_counter_ops - performance counter hw ops | |
154 | */ | |
155 | struct hw_perf_counter_ops { | |
95cdd2e7 | 156 | int (*enable) (struct perf_counter *counter); |
7671581f IM |
157 | void (*disable) (struct perf_counter *counter); |
158 | void (*read) (struct perf_counter *counter); | |
621a01ea IM |
159 | }; |
160 | ||
6a930700 IM |
161 | /** |
162 | * enum perf_counter_active_state - the states of a counter | |
163 | */ | |
164 | enum perf_counter_active_state { | |
3b6f9e5c | 165 | PERF_COUNTER_STATE_ERROR = -2, |
6a930700 IM |
166 | PERF_COUNTER_STATE_OFF = -1, |
167 | PERF_COUNTER_STATE_INACTIVE = 0, | |
168 | PERF_COUNTER_STATE_ACTIVE = 1, | |
169 | }; | |
170 | ||
9b51f66d IM |
171 | struct file; |
172 | ||
0793a61d TG |
173 | /** |
174 | * struct perf_counter - performance counter kernel representation: | |
175 | */ | |
176 | struct perf_counter { | |
ee06094f | 177 | #ifdef CONFIG_PERF_COUNTERS |
04289bb9 IM |
178 | struct list_head list_entry; |
179 | struct list_head sibling_list; | |
180 | struct perf_counter *group_leader; | |
5c92d124 | 181 | const struct hw_perf_counter_ops *hw_ops; |
04289bb9 | 182 | |
6a930700 | 183 | enum perf_counter_active_state state; |
c07c99b6 | 184 | enum perf_counter_active_state prev_state; |
0793a61d | 185 | atomic64_t count; |
ee06094f | 186 | |
9f66a381 | 187 | struct perf_counter_hw_event hw_event; |
0793a61d TG |
188 | struct hw_perf_counter hw; |
189 | ||
190 | struct perf_counter_context *ctx; | |
191 | struct task_struct *task; | |
9b51f66d | 192 | struct file *filp; |
0793a61d | 193 | |
9b51f66d | 194 | struct perf_counter *parent; |
d859e29f PM |
195 | struct list_head child_list; |
196 | ||
0793a61d | 197 | /* |
d859e29f | 198 | * Protect attach/detach and child_list: |
0793a61d TG |
199 | */ |
200 | struct mutex mutex; | |
201 | ||
202 | int oncpu; | |
203 | int cpu; | |
204 | ||
0793a61d TG |
205 | /* read() / irq related data */ |
206 | wait_queue_head_t waitq; | |
207 | /* optional: for NMIs */ | |
208 | int wakeup_pending; | |
209 | struct perf_data *irqdata; | |
210 | struct perf_data *usrdata; | |
211 | struct perf_data data[2]; | |
ee06094f | 212 | #endif |
0793a61d TG |
213 | }; |
214 | ||
215 | /** | |
216 | * struct perf_counter_context - counter context structure | |
217 | * | |
218 | * Used as a container for task counters and CPU counters as well: | |
219 | */ | |
220 | struct perf_counter_context { | |
221 | #ifdef CONFIG_PERF_COUNTERS | |
222 | /* | |
d859e29f PM |
223 | * Protect the states of the counters in the list, |
224 | * nr_active, and the list: | |
0793a61d TG |
225 | */ |
226 | spinlock_t lock; | |
d859e29f PM |
227 | /* |
228 | * Protect the list of counters. Locking either mutex or lock | |
229 | * is sufficient to ensure the list doesn't change; to change | |
230 | * the list you need to lock both the mutex and the spinlock. | |
231 | */ | |
232 | struct mutex mutex; | |
04289bb9 IM |
233 | |
234 | struct list_head counter_list; | |
0793a61d TG |
235 | int nr_counters; |
236 | int nr_active; | |
d859e29f | 237 | int is_active; |
0793a61d TG |
238 | struct task_struct *task; |
239 | #endif | |
240 | }; | |
241 | ||
242 | /** | |
243 | * struct perf_counter_cpu_context - per cpu counter context structure | |
244 | */ | |
245 | struct perf_cpu_context { | |
246 | struct perf_counter_context ctx; | |
247 | struct perf_counter_context *task_ctx; | |
248 | int active_oncpu; | |
249 | int max_pertask; | |
3b6f9e5c | 250 | int exclusive; |
0793a61d TG |
251 | }; |
252 | ||
253 | /* | |
254 | * Set by architecture code: | |
255 | */ | |
256 | extern int perf_max_counters; | |
257 | ||
258 | #ifdef CONFIG_PERF_COUNTERS | |
5c92d124 | 259 | extern const struct hw_perf_counter_ops * |
621a01ea IM |
260 | hw_perf_counter_init(struct perf_counter *counter); |
261 | ||
0793a61d TG |
262 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); |
263 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); | |
264 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |
9b51f66d IM |
265 | extern void perf_counter_init_task(struct task_struct *child); |
266 | extern void perf_counter_exit_task(struct task_struct *child); | |
0793a61d TG |
267 | extern void perf_counter_notify(struct pt_regs *regs); |
268 | extern void perf_counter_print_debug(void); | |
1b023a96 | 269 | extern void perf_counter_unthrottle(void); |
01b2838c IM |
270 | extern u64 hw_perf_save_disable(void); |
271 | extern void hw_perf_restore(u64 ctrl); | |
1d1c7ddb IM |
272 | extern int perf_counter_task_disable(void); |
273 | extern int perf_counter_task_enable(void); | |
3cbed429 PM |
274 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, |
275 | struct perf_cpu_context *cpuctx, | |
276 | struct perf_counter_context *ctx, int cpu); | |
5c92d124 | 277 | |
3b6f9e5c PM |
278 | /* |
279 | * Return 1 for a software counter, 0 for a hardware counter | |
280 | */ | |
281 | static inline int is_software_counter(struct perf_counter *counter) | |
282 | { | |
283 | return !counter->hw_event.raw && counter->hw_event.type < 0; | |
284 | } | |
285 | ||
0793a61d TG |
286 | #else |
287 | static inline void | |
288 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | |
289 | static inline void | |
290 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } | |
291 | static inline void | |
292 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | |
9b51f66d IM |
293 | static inline void perf_counter_init_task(struct task_struct *child) { } |
294 | static inline void perf_counter_exit_task(struct task_struct *child) { } | |
0793a61d TG |
295 | static inline void perf_counter_notify(struct pt_regs *regs) { } |
296 | static inline void perf_counter_print_debug(void) { } | |
1b023a96 | 297 | static inline void perf_counter_unthrottle(void) { } |
01b2838c IM |
298 | static inline void hw_perf_restore(u64 ctrl) { } |
299 | static inline u64 hw_perf_save_disable(void) { return 0; } | |
1d1c7ddb IM |
300 | static inline int perf_counter_task_disable(void) { return -EINVAL; } |
301 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | |
0793a61d TG |
302 | #endif |
303 | ||
f3dfd265 | 304 | #endif /* __KERNEL__ */ |
0793a61d | 305 | #endif /* _LINUX_PERF_COUNTER_H */ |