]>
Commit | Line | Data |
---|---|---|
0793a61d | 1 | /* |
57c0c15b | 2 | * Performance events: |
0793a61d | 3 | * |
a308444c | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
e7e7ee2e IM |
5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | |
0793a61d | 7 | * |
57c0c15b | 8 | * Data type definitions, declarations, prototypes. |
0793a61d | 9 | * |
a308444c | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
0793a61d | 11 | * |
57c0c15b | 12 | * For licencing details see kernel-base/COPYING |
0793a61d | 13 | */ |
cdd6c482 IM |
14 | #ifndef _LINUX_PERF_EVENT_H |
15 | #define _LINUX_PERF_EVENT_H | |
0793a61d | 16 | |
607ca46e | 17 | #include <uapi/linux/perf_event.h> |
0793a61d | 18 | |
9f66a381 | 19 | /* |
f3dfd265 | 20 | * Kernel-internal data types and definitions: |
9f66a381 IM |
21 | */ |
22 | ||
cdd6c482 IM |
23 | #ifdef CONFIG_PERF_EVENTS |
24 | # include <asm/perf_event.h> | |
7be79236 | 25 | # include <asm/local64.h> |
f3dfd265 PM |
26 | #endif |
27 | ||
39447b38 | 28 | struct perf_guest_info_callbacks { |
e7e7ee2e IM |
29 | int (*is_in_guest)(void); |
30 | int (*is_user_mode)(void); | |
31 | unsigned long (*get_guest_ip)(void); | |
39447b38 ZY |
32 | }; |
33 | ||
2ff6cfd7 AB |
34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
35 | #include <asm/hw_breakpoint.h> | |
36 | #endif | |
37 | ||
f3dfd265 PM |
38 | #include <linux/list.h> |
39 | #include <linux/mutex.h> | |
40 | #include <linux/rculist.h> | |
41 | #include <linux/rcupdate.h> | |
42 | #include <linux/spinlock.h> | |
d6d020e9 | 43 | #include <linux/hrtimer.h> |
3c446b3d | 44 | #include <linux/fs.h> |
709e50cf | 45 | #include <linux/pid_namespace.h> |
906010b2 | 46 | #include <linux/workqueue.h> |
5331d7b8 | 47 | #include <linux/ftrace.h> |
85cfabbc | 48 | #include <linux/cpu.h> |
e360adbe | 49 | #include <linux/irq_work.h> |
c5905afb | 50 | #include <linux/static_key.h> |
851cf6e7 | 51 | #include <linux/jump_label_ratelimit.h> |
60063497 | 52 | #include <linux/atomic.h> |
641cc938 | 53 | #include <linux/sysfs.h> |
4018994f | 54 | #include <linux/perf_regs.h> |
fadfe7be | 55 | #include <linux/workqueue.h> |
39bed6cb | 56 | #include <linux/cgroup.h> |
fa588151 | 57 | #include <asm/local.h> |
f3dfd265 | 58 | |
f9188e02 PZ |
59 | struct perf_callchain_entry { |
60 | __u64 nr; | |
c5dfd78e | 61 | __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ |
f9188e02 PZ |
62 | }; |
63 | ||
cfbcf468 ACM |
64 | struct perf_callchain_entry_ctx { |
65 | struct perf_callchain_entry *entry; | |
66 | u32 max_stack; | |
3b1fff08 | 67 | u32 nr; |
c85b0334 ACM |
68 | short contexts; |
69 | bool contexts_maxed; | |
cfbcf468 ACM |
70 | }; |
71 | ||
7e3f977e DB |
72 | typedef unsigned long (*perf_copy_f)(void *dst, const void *src, |
73 | unsigned long len); | |
74 | ||
75 | struct perf_raw_frag { | |
76 | union { | |
77 | struct perf_raw_frag *next; | |
78 | unsigned long pad; | |
79 | }; | |
80 | perf_copy_f copy; | |
81 | void *data; | |
82 | u32 size; | |
83 | } __packed; | |
84 | ||
3a43ce68 | 85 | struct perf_raw_record { |
7e3f977e | 86 | struct perf_raw_frag frag; |
3a43ce68 | 87 | u32 size; |
f413cdb8 FW |
88 | }; |
89 | ||
bce38cd5 SE |
90 | /* |
91 | * branch stack layout: | |
92 | * nr: number of taken branches stored in entries[] | |
93 | * | |
94 | * Note that nr can vary from sample to sample | |
95 | * branches (to, from) are stored from most recent | |
96 | * to least recent, i.e., entries[0] contains the most | |
97 | * recent branch. | |
98 | */ | |
caff2bef PZ |
99 | struct perf_branch_stack { |
100 | __u64 nr; | |
101 | struct perf_branch_entry entries[0]; | |
102 | }; | |
103 | ||
f3dfd265 PM |
104 | struct task_struct; |
105 | ||
efc9f05d SE |
106 | /* |
107 | * extra PMU register associated with an event | |
108 | */ | |
109 | struct hw_perf_event_extra { | |
110 | u64 config; /* register value */ | |
111 | unsigned int reg; /* register address or index */ | |
112 | int alloc; /* extra register already allocated */ | |
113 | int idx; /* index in shared_regs->regs[] */ | |
114 | }; | |
115 | ||
0793a61d | 116 | /** |
cdd6c482 | 117 | * struct hw_perf_event - performance event hardware details: |
0793a61d | 118 | */ |
cdd6c482 IM |
119 | struct hw_perf_event { |
120 | #ifdef CONFIG_PERF_EVENTS | |
d6d020e9 PZ |
121 | union { |
122 | struct { /* hardware */ | |
a308444c | 123 | u64 config; |
447a194b | 124 | u64 last_tag; |
a308444c | 125 | unsigned long config_base; |
cdd6c482 | 126 | unsigned long event_base; |
c48b6053 | 127 | int event_base_rdpmc; |
a308444c | 128 | int idx; |
447a194b | 129 | int last_cpu; |
9fac2cf3 | 130 | int flags; |
bce38cd5 | 131 | |
efc9f05d | 132 | struct hw_perf_event_extra extra_reg; |
bce38cd5 | 133 | struct hw_perf_event_extra branch_reg; |
d6d020e9 | 134 | }; |
721a669b | 135 | struct { /* software */ |
a308444c | 136 | struct hrtimer hrtimer; |
d6d020e9 | 137 | }; |
f22c1bb6 | 138 | struct { /* tracepoint */ |
f22c1bb6 ON |
139 | /* for tp_event->class */ |
140 | struct list_head tp_list; | |
141 | }; | |
4afbb24c MF |
142 | struct { /* intel_cqm */ |
143 | int cqm_state; | |
b3df4ec4 | 144 | u32 cqm_rmid; |
a223c1c7 | 145 | int is_group_event; |
4afbb24c MF |
146 | struct list_head cqm_events_entry; |
147 | struct list_head cqm_groups_entry; | |
148 | struct list_head cqm_group_entry; | |
149 | }; | |
ec0d7729 AS |
150 | struct { /* itrace */ |
151 | int itrace_started; | |
152 | }; | |
c7ab62bf HR |
153 | struct { /* amd_power */ |
154 | u64 pwr_acc; | |
155 | u64 ptsc; | |
156 | }; | |
24f1e32c | 157 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
45a73372 | 158 | struct { /* breakpoint */ |
d580ff86 PZ |
159 | /* |
160 | * Crufty hack to avoid the chicken and egg | |
161 | * problem hw_breakpoint has with context | |
162 | * creation and event initalization. | |
163 | */ | |
f22c1bb6 ON |
164 | struct arch_hw_breakpoint info; |
165 | struct list_head bp_list; | |
45a73372 | 166 | }; |
24f1e32c | 167 | #endif |
d6d020e9 | 168 | }; |
b0e87875 PZ |
169 | /* |
170 | * If the event is a per task event, this will point to the task in | |
171 | * question. See the comment in perf_event_alloc(). | |
172 | */ | |
50f16a8b | 173 | struct task_struct *target; |
b0e87875 | 174 | |
375637bc AS |
175 | /* |
176 | * PMU would store hardware filter configuration | |
177 | * here. | |
178 | */ | |
179 | void *addr_filters; | |
180 | ||
181 | /* Last sync'ed generation of filters */ | |
182 | unsigned long addr_filters_gen; | |
183 | ||
b0e87875 PZ |
184 | /* |
185 | * hw_perf_event::state flags; used to track the PERF_EF_* state. | |
186 | */ | |
187 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | |
188 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | |
189 | #define PERF_HES_ARCH 0x04 | |
190 | ||
a4eaf7f1 | 191 | int state; |
b0e87875 PZ |
192 | |
193 | /* | |
194 | * The last observed hardware counter value, updated with a | |
195 | * local64_cmpxchg() such that pmu::read() can be called nested. | |
196 | */ | |
e7850595 | 197 | local64_t prev_count; |
b0e87875 PZ |
198 | |
199 | /* | |
200 | * The period to start the next sample with. | |
201 | */ | |
b23f3325 | 202 | u64 sample_period; |
b0e87875 PZ |
203 | |
204 | /* | |
205 | * The period we started this sample with. | |
206 | */ | |
9e350de3 | 207 | u64 last_period; |
b0e87875 PZ |
208 | |
209 | /* | |
210 | * However much is left of the current period; note that this is | |
211 | * a full 64bit value and allows for generation of periods longer | |
212 | * than hardware might allow. | |
213 | */ | |
e7850595 | 214 | local64_t period_left; |
b0e87875 PZ |
215 | |
216 | /* | |
217 | * State for throttling the event, see __perf_event_overflow() and | |
218 | * perf_adjust_freq_unthr_context(). | |
219 | */ | |
e050e3f0 | 220 | u64 interrupts_seq; |
60db5e09 | 221 | u64 interrupts; |
6a24ed6c | 222 | |
b0e87875 PZ |
223 | /* |
224 | * State for freq target events, see __perf_event_overflow() and | |
225 | * perf_adjust_freq_unthr_context(). | |
226 | */ | |
abd50713 PZ |
227 | u64 freq_time_stamp; |
228 | u64 freq_count_stamp; | |
ee06094f | 229 | #endif |
0793a61d TG |
230 | }; |
231 | ||
cdd6c482 | 232 | struct perf_event; |
621a01ea | 233 | |
8d2cacbb PZ |
234 | /* |
235 | * Common implementation detail of pmu::{start,commit,cancel}_txn | |
236 | */ | |
fbbe0701 | 237 | #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ |
4a00c16e | 238 | #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ |
fbbe0701 | 239 | |
53b25335 VW |
240 | /** |
241 | * pmu::capabilities flags | |
242 | */ | |
243 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | |
34f43927 | 244 | #define PERF_PMU_CAP_NO_NMI 0x02 |
0a4e38e6 | 245 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 |
6a279230 | 246 | #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 |
bed5b25a | 247 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 |
ec0d7729 | 248 | #define PERF_PMU_CAP_ITRACE 0x20 |
5101ef20 | 249 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 |
53b25335 | 250 | |
621a01ea | 251 | /** |
4aeb0b42 | 252 | * struct pmu - generic performance monitoring unit |
621a01ea | 253 | */ |
4aeb0b42 | 254 | struct pmu { |
b0a873eb PZ |
255 | struct list_head entry; |
256 | ||
c464c76e | 257 | struct module *module; |
abe43400 | 258 | struct device *dev; |
0c9d42ed | 259 | const struct attribute_group **attr_groups; |
03d8e80b | 260 | const char *name; |
2e80a82a PZ |
261 | int type; |
262 | ||
53b25335 VW |
263 | /* |
264 | * various common per-pmu feature flags | |
265 | */ | |
266 | int capabilities; | |
267 | ||
108b02cf PZ |
268 | int * __percpu pmu_disable_count; |
269 | struct perf_cpu_context * __percpu pmu_cpu_context; | |
bed5b25a | 270 | atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ |
8dc85d54 | 271 | int task_ctx_nr; |
62b85639 | 272 | int hrtimer_interval_ms; |
6bde9b6c | 273 | |
375637bc AS |
274 | /* number of address filters this PMU can do */ |
275 | unsigned int nr_addr_filters; | |
276 | ||
6bde9b6c | 277 | /* |
a4eaf7f1 PZ |
278 | * Fully disable/enable this PMU, can be used to protect from the PMI |
279 | * as well as for lazy/batch writing of the MSRs. | |
6bde9b6c | 280 | */ |
ad5133b7 PZ |
281 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
282 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | |
6bde9b6c | 283 | |
8d2cacbb | 284 | /* |
a4eaf7f1 | 285 | * Try and initialize the event for this PMU. |
b0e87875 PZ |
286 | * |
287 | * Returns: | |
288 | * -ENOENT -- @event is not for this PMU | |
289 | * | |
290 | * -ENODEV -- @event is for this PMU but PMU not present | |
291 | * -EBUSY -- @event is for this PMU but PMU temporarily unavailable | |
292 | * -EINVAL -- @event is for this PMU but @event is not valid | |
293 | * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported | |
294 | * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges | |
295 | * | |
296 | * 0 -- @event is for this PMU and valid | |
297 | * | |
298 | * Other error return values are allowed. | |
8d2cacbb | 299 | */ |
b0a873eb PZ |
300 | int (*event_init) (struct perf_event *event); |
301 | ||
1e0fb9ec AL |
302 | /* |
303 | * Notification that the event was mapped or unmapped. Called | |
304 | * in the context of the mapping task. | |
305 | */ | |
306 | void (*event_mapped) (struct perf_event *event); /*optional*/ | |
307 | void (*event_unmapped) (struct perf_event *event); /*optional*/ | |
308 | ||
b0e87875 PZ |
309 | /* |
310 | * Flags for ->add()/->del()/ ->start()/->stop(). There are | |
311 | * matching hw_perf_event::state flags. | |
312 | */ | |
a4eaf7f1 PZ |
313 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
314 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | |
315 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | |
316 | ||
8d2cacbb | 317 | /* |
b0e87875 PZ |
318 | * Adds/Removes a counter to/from the PMU, can be done inside a |
319 | * transaction, see the ->*_txn() methods. | |
320 | * | |
321 | * The add/del callbacks will reserve all hardware resources required | |
322 | * to service the event, this includes any counter constraint | |
323 | * scheduling etc. | |
324 | * | |
325 | * Called with IRQs disabled and the PMU disabled on the CPU the event | |
326 | * is on. | |
327 | * | |
328 | * ->add() called without PERF_EF_START should result in the same state | |
329 | * as ->add() followed by ->stop(). | |
330 | * | |
331 | * ->del() must always PERF_EF_UPDATE stop an event. If it calls | |
332 | * ->stop() that must deal with already being stopped without | |
333 | * PERF_EF_UPDATE. | |
a4eaf7f1 PZ |
334 | */ |
335 | int (*add) (struct perf_event *event, int flags); | |
336 | void (*del) (struct perf_event *event, int flags); | |
337 | ||
338 | /* | |
b0e87875 PZ |
339 | * Starts/Stops a counter present on the PMU. |
340 | * | |
341 | * The PMI handler should stop the counter when perf_event_overflow() | |
342 | * returns !0. ->start() will be used to continue. | |
343 | * | |
344 | * Also used to change the sample period. | |
345 | * | |
346 | * Called with IRQs disabled and the PMU disabled on the CPU the event | |
347 | * is on -- will be called from NMI context with the PMU generates | |
348 | * NMIs. | |
349 | * | |
350 | * ->stop() with PERF_EF_UPDATE will read the counter and update | |
351 | * period/count values like ->read() would. | |
352 | * | |
353 | * ->start() with PERF_EF_RELOAD will reprogram the the counter | |
354 | * value, must be preceded by a ->stop() with PERF_EF_UPDATE. | |
a4eaf7f1 PZ |
355 | */ |
356 | void (*start) (struct perf_event *event, int flags); | |
357 | void (*stop) (struct perf_event *event, int flags); | |
358 | ||
359 | /* | |
360 | * Updates the counter value of the event. | |
b0e87875 PZ |
361 | * |
362 | * For sampling capable PMUs this will also update the software period | |
363 | * hw_perf_event::period_left field. | |
a4eaf7f1 | 364 | */ |
cdd6c482 | 365 | void (*read) (struct perf_event *event); |
6bde9b6c LM |
366 | |
367 | /* | |
24cd7f54 PZ |
368 | * Group events scheduling is treated as a transaction, add |
369 | * group events as a whole and perform one schedulability test. | |
370 | * If the test fails, roll back the whole group | |
a4eaf7f1 PZ |
371 | * |
372 | * Start the transaction, after this ->add() doesn't need to | |
24cd7f54 | 373 | * do schedulability tests. |
fbbe0701 SB |
374 | * |
375 | * Optional. | |
8d2cacbb | 376 | */ |
fbbe0701 | 377 | void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); |
8d2cacbb | 378 | /* |
a4eaf7f1 | 379 | * If ->start_txn() disabled the ->add() schedulability test |
8d2cacbb PZ |
380 | * then ->commit_txn() is required to perform one. On success |
381 | * the transaction is closed. On error the transaction is kept | |
382 | * open until ->cancel_txn() is called. | |
fbbe0701 SB |
383 | * |
384 | * Optional. | |
8d2cacbb | 385 | */ |
fbbe0701 | 386 | int (*commit_txn) (struct pmu *pmu); |
8d2cacbb | 387 | /* |
a4eaf7f1 | 388 | * Will cancel the transaction, assumes ->del() is called |
25985edc | 389 | * for each successful ->add() during the transaction. |
fbbe0701 SB |
390 | * |
391 | * Optional. | |
8d2cacbb | 392 | */ |
fbbe0701 | 393 | void (*cancel_txn) (struct pmu *pmu); |
35edc2a5 PZ |
394 | |
395 | /* | |
396 | * Will return the value for perf_event_mmap_page::index for this event, | |
397 | * if no implementation is provided it will default to: event->hw.idx + 1. | |
398 | */ | |
399 | int (*event_idx) (struct perf_event *event); /*optional */ | |
d010b332 | 400 | |
ba532500 YZ |
401 | /* |
402 | * context-switches callback | |
403 | */ | |
404 | void (*sched_task) (struct perf_event_context *ctx, | |
405 | bool sched_in); | |
4af57ef2 YZ |
406 | /* |
407 | * PMU specific data size | |
408 | */ | |
409 | size_t task_ctx_size; | |
ba532500 | 410 | |
eacd3ecc MF |
411 | |
412 | /* | |
413 | * Return the count value for a counter. | |
414 | */ | |
415 | u64 (*count) (struct perf_event *event); /*optional*/ | |
45bfb2e5 PZ |
416 | |
417 | /* | |
418 | * Set up pmu-private data structures for an AUX area | |
419 | */ | |
420 | void *(*setup_aux) (int cpu, void **pages, | |
421 | int nr_pages, bool overwrite); | |
422 | /* optional */ | |
423 | ||
424 | /* | |
425 | * Free pmu-private AUX data structures | |
426 | */ | |
427 | void (*free_aux) (void *aux); /* optional */ | |
66eb579e | 428 | |
375637bc AS |
429 | /* |
430 | * Validate address range filters: make sure the HW supports the | |
431 | * requested configuration and number of filters; return 0 if the | |
432 | * supplied filters are valid, -errno otherwise. | |
433 | * | |
434 | * Runs in the context of the ioctl()ing process and is not serialized | |
435 | * with the rest of the PMU callbacks. | |
436 | */ | |
437 | int (*addr_filters_validate) (struct list_head *filters); | |
438 | /* optional */ | |
439 | ||
440 | /* | |
441 | * Synchronize address range filter configuration: | |
442 | * translate hw-agnostic filters into hardware configuration in | |
443 | * event::hw::addr_filters. | |
444 | * | |
445 | * Runs as a part of filter sync sequence that is done in ->start() | |
446 | * callback by calling perf_event_addr_filters_sync(). | |
447 | * | |
448 | * May (and should) traverse event::addr_filters::list, for which its | |
449 | * caller provides necessary serialization. | |
450 | */ | |
451 | void (*addr_filters_sync) (struct perf_event *event); | |
452 | /* optional */ | |
453 | ||
66eb579e MR |
454 | /* |
455 | * Filter events for PMU-specific reasons. | |
456 | */ | |
457 | int (*filter_match) (struct perf_event *event); /* optional */ | |
621a01ea IM |
458 | }; |
459 | ||
375637bc AS |
460 | /** |
461 | * struct perf_addr_filter - address range filter definition | |
462 | * @entry: event's filter list linkage | |
463 | * @inode: object file's inode for file-based filters | |
464 | * @offset: filter range offset | |
465 | * @size: filter range size | |
466 | * @range: 1: range, 0: address | |
467 | * @filter: 1: filter/start, 0: stop | |
468 | * | |
469 | * This is a hardware-agnostic filter configuration as specified by the user. | |
470 | */ | |
471 | struct perf_addr_filter { | |
472 | struct list_head entry; | |
473 | struct inode *inode; | |
474 | unsigned long offset; | |
475 | unsigned long size; | |
476 | unsigned int range : 1, | |
477 | filter : 1; | |
478 | }; | |
479 | ||
480 | /** | |
481 | * struct perf_addr_filters_head - container for address range filters | |
482 | * @list: list of filters for this event | |
483 | * @lock: spinlock that serializes accesses to the @list and event's | |
484 | * (and its children's) filter generations. | |
485 | * | |
486 | * A child event will use parent's @list (and therefore @lock), so they are | |
487 | * bundled together; see perf_event_addr_filters(). | |
488 | */ | |
489 | struct perf_addr_filters_head { | |
490 | struct list_head list; | |
491 | raw_spinlock_t lock; | |
492 | }; | |
493 | ||
6a930700 | 494 | /** |
cdd6c482 | 495 | * enum perf_event_active_state - the states of a event |
6a930700 | 496 | */ |
cdd6c482 | 497 | enum perf_event_active_state { |
a69b0ca4 | 498 | PERF_EVENT_STATE_DEAD = -4, |
179033b3 | 499 | PERF_EVENT_STATE_EXIT = -3, |
57c0c15b | 500 | PERF_EVENT_STATE_ERROR = -2, |
cdd6c482 IM |
501 | PERF_EVENT_STATE_OFF = -1, |
502 | PERF_EVENT_STATE_INACTIVE = 0, | |
57c0c15b | 503 | PERF_EVENT_STATE_ACTIVE = 1, |
6a930700 IM |
504 | }; |
505 | ||
9b51f66d | 506 | struct file; |
453f19ee PZ |
507 | struct perf_sample_data; |
508 | ||
a8b0ca17 | 509 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
b326e956 FW |
510 | struct perf_sample_data *, |
511 | struct pt_regs *regs); | |
512 | ||
d6f962b5 | 513 | enum perf_group_flag { |
e7e7ee2e | 514 | PERF_GROUP_SOFTWARE = 0x1, |
d6f962b5 FW |
515 | }; |
516 | ||
e7e7ee2e IM |
517 | #define SWEVENT_HLIST_BITS 8 |
518 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | |
76e1d904 FW |
519 | |
520 | struct swevent_hlist { | |
e7e7ee2e IM |
521 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
522 | struct rcu_head rcu_head; | |
76e1d904 FW |
523 | }; |
524 | ||
8a49542c PZ |
525 | #define PERF_ATTACH_CONTEXT 0x01 |
526 | #define PERF_ATTACH_GROUP 0x02 | |
d580ff86 | 527 | #define PERF_ATTACH_TASK 0x04 |
4af57ef2 | 528 | #define PERF_ATTACH_TASK_DATA 0x08 |
8a49542c | 529 | |
877c6856 | 530 | struct perf_cgroup; |
76369139 FW |
531 | struct ring_buffer; |
532 | ||
0793a61d | 533 | /** |
cdd6c482 | 534 | * struct perf_event - performance event kernel representation: |
0793a61d | 535 | */ |
cdd6c482 IM |
536 | struct perf_event { |
537 | #ifdef CONFIG_PERF_EVENTS | |
9886167d PZ |
538 | /* |
539 | * entry onto perf_event_context::event_list; | |
540 | * modifications require ctx->lock | |
541 | * RCU safe iterations. | |
542 | */ | |
592903cd | 543 | struct list_head event_entry; |
9886167d PZ |
544 | |
545 | /* | |
546 | * XXX: group_entry and sibling_list should be mutually exclusive; | |
547 | * either you're a sibling on a group, or you're the group leader. | |
548 | * Rework the code to always use the same list element. | |
549 | * | |
550 | * Locked for modification by both ctx->mutex and ctx->lock; holding | |
551 | * either sufficies for read. | |
552 | */ | |
553 | struct list_head group_entry; | |
04289bb9 | 554 | struct list_head sibling_list; |
9886167d PZ |
555 | |
556 | /* | |
557 | * We need storage to track the entries in perf_pmu_migrate_context; we | |
558 | * cannot use the event_entry because of RCU and we want to keep the | |
559 | * group in tact which avoids us using the other two entries. | |
560 | */ | |
561 | struct list_head migrate_entry; | |
562 | ||
f3ae75de SE |
563 | struct hlist_node hlist_entry; |
564 | struct list_head active_entry; | |
0127c3ea | 565 | int nr_siblings; |
d6f962b5 | 566 | int group_flags; |
cdd6c482 | 567 | struct perf_event *group_leader; |
a4eaf7f1 | 568 | struct pmu *pmu; |
54d751d4 | 569 | void *pmu_private; |
04289bb9 | 570 | |
cdd6c482 | 571 | enum perf_event_active_state state; |
8a49542c | 572 | unsigned int attach_state; |
e7850595 | 573 | local64_t count; |
a6e6dea6 | 574 | atomic64_t child_count; |
ee06094f | 575 | |
53cfbf59 | 576 | /* |
cdd6c482 | 577 | * These are the total time in nanoseconds that the event |
53cfbf59 | 578 | * has been enabled (i.e. eligible to run, and the task has |
cdd6c482 | 579 | * been scheduled in, if this is a per-task event) |
53cfbf59 PM |
580 | * and running (scheduled onto the CPU), respectively. |
581 | * | |
582 | * They are computed from tstamp_enabled, tstamp_running and | |
cdd6c482 | 583 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. |
53cfbf59 PM |
584 | */ |
585 | u64 total_time_enabled; | |
586 | u64 total_time_running; | |
587 | ||
588 | /* | |
589 | * These are timestamps used for computing total_time_enabled | |
cdd6c482 | 590 | * and total_time_running when the event is in INACTIVE or |
53cfbf59 PM |
591 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
592 | * in time. | |
cdd6c482 IM |
593 | * tstamp_enabled: the notional time when the event was enabled |
594 | * tstamp_running: the notional time when the event was scheduled on | |
53cfbf59 | 595 | * tstamp_stopped: in INACTIVE state, the notional time when the |
cdd6c482 | 596 | * event was scheduled off. |
53cfbf59 PM |
597 | */ |
598 | u64 tstamp_enabled; | |
599 | u64 tstamp_running; | |
600 | u64 tstamp_stopped; | |
601 | ||
eed01528 SE |
602 | /* |
603 | * timestamp shadows the actual context timing but it can | |
604 | * be safely used in NMI interrupt context. It reflects the | |
605 | * context time as it was when the event was last scheduled in. | |
606 | * | |
607 | * ctx_time already accounts for ctx->timestamp. Therefore to | |
608 | * compute ctx_time for a sample, simply add perf_clock(). | |
609 | */ | |
610 | u64 shadow_ctx_time; | |
611 | ||
24f1e32c | 612 | struct perf_event_attr attr; |
c320c7b7 | 613 | u16 header_size; |
6844c09d | 614 | u16 id_header_size; |
c320c7b7 | 615 | u16 read_size; |
cdd6c482 | 616 | struct hw_perf_event hw; |
0793a61d | 617 | |
cdd6c482 | 618 | struct perf_event_context *ctx; |
a6fa941d | 619 | atomic_long_t refcount; |
0793a61d | 620 | |
53cfbf59 PM |
621 | /* |
622 | * These accumulate total time (in nanoseconds) that children | |
cdd6c482 | 623 | * events have been enabled and running, respectively. |
53cfbf59 PM |
624 | */ |
625 | atomic64_t child_total_time_enabled; | |
626 | atomic64_t child_total_time_running; | |
627 | ||
0793a61d | 628 | /* |
d859e29f | 629 | * Protect attach/detach and child_list: |
0793a61d | 630 | */ |
fccc714b PZ |
631 | struct mutex child_mutex; |
632 | struct list_head child_list; | |
cdd6c482 | 633 | struct perf_event *parent; |
0793a61d TG |
634 | |
635 | int oncpu; | |
636 | int cpu; | |
637 | ||
082ff5a2 PZ |
638 | struct list_head owner_entry; |
639 | struct task_struct *owner; | |
640 | ||
7b732a75 PZ |
641 | /* mmap bits */ |
642 | struct mutex mmap_mutex; | |
643 | atomic_t mmap_count; | |
26cb63ad | 644 | |
76369139 | 645 | struct ring_buffer *rb; |
10c6db11 | 646 | struct list_head rb_entry; |
b69cf536 PZ |
647 | unsigned long rcu_batches; |
648 | int rcu_pending; | |
37d81828 | 649 | |
7b732a75 | 650 | /* poll related */ |
0793a61d | 651 | wait_queue_head_t waitq; |
3c446b3d | 652 | struct fasync_struct *fasync; |
79f14641 PZ |
653 | |
654 | /* delayed work for NMIs and such */ | |
655 | int pending_wakeup; | |
4c9e2542 | 656 | int pending_kill; |
79f14641 | 657 | int pending_disable; |
e360adbe | 658 | struct irq_work pending; |
592903cd | 659 | |
79f14641 PZ |
660 | atomic_t event_limit; |
661 | ||
375637bc AS |
662 | /* address range filters */ |
663 | struct perf_addr_filters_head addr_filters; | |
664 | /* vma address array for file-based filders */ | |
665 | unsigned long *addr_filters_offs; | |
666 | unsigned long addr_filters_gen; | |
667 | ||
cdd6c482 | 668 | void (*destroy)(struct perf_event *); |
592903cd | 669 | struct rcu_head rcu_head; |
709e50cf PZ |
670 | |
671 | struct pid_namespace *ns; | |
8e5799b1 | 672 | u64 id; |
6fb2915d | 673 | |
34f43927 | 674 | u64 (*clock)(void); |
b326e956 | 675 | perf_overflow_handler_t overflow_handler; |
4dc0da86 | 676 | void *overflow_handler_context; |
453f19ee | 677 | |
07b139c8 | 678 | #ifdef CONFIG_EVENT_TRACING |
2425bcb9 | 679 | struct trace_event_call *tp_event; |
6fb2915d | 680 | struct event_filter *filter; |
ced39002 JO |
681 | #ifdef CONFIG_FUNCTION_TRACER |
682 | struct ftrace_ops ftrace_ops; | |
683 | #endif | |
ee06094f | 684 | #endif |
6fb2915d | 685 | |
e5d1367f SE |
686 | #ifdef CONFIG_CGROUP_PERF |
687 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ | |
688 | int cgrp_defer_enabled; | |
689 | #endif | |
690 | ||
6fb2915d | 691 | #endif /* CONFIG_PERF_EVENTS */ |
0793a61d TG |
692 | }; |
693 | ||
694 | /** | |
cdd6c482 | 695 | * struct perf_event_context - event context structure |
0793a61d | 696 | * |
cdd6c482 | 697 | * Used as a container for task events and CPU events as well: |
0793a61d | 698 | */ |
cdd6c482 | 699 | struct perf_event_context { |
108b02cf | 700 | struct pmu *pmu; |
0793a61d | 701 | /* |
cdd6c482 | 702 | * Protect the states of the events in the list, |
d859e29f | 703 | * nr_active, and the list: |
0793a61d | 704 | */ |
e625cce1 | 705 | raw_spinlock_t lock; |
d859e29f | 706 | /* |
cdd6c482 | 707 | * Protect the list of events. Locking either mutex or lock |
d859e29f PM |
708 | * is sufficient to ensure the list doesn't change; to change |
709 | * the list you need to lock both the mutex and the spinlock. | |
710 | */ | |
a308444c | 711 | struct mutex mutex; |
04289bb9 | 712 | |
2fde4f94 | 713 | struct list_head active_ctx_list; |
889ff015 FW |
714 | struct list_head pinned_groups; |
715 | struct list_head flexible_groups; | |
a308444c | 716 | struct list_head event_list; |
cdd6c482 | 717 | int nr_events; |
a308444c IM |
718 | int nr_active; |
719 | int is_active; | |
bfbd3381 | 720 | int nr_stat; |
0f5a2601 | 721 | int nr_freq; |
dddd3379 | 722 | int rotate_disable; |
a308444c IM |
723 | atomic_t refcount; |
724 | struct task_struct *task; | |
53cfbf59 PM |
725 | |
726 | /* | |
4af4998b | 727 | * Context clock, runs when context enabled. |
53cfbf59 | 728 | */ |
a308444c IM |
729 | u64 time; |
730 | u64 timestamp; | |
564c2b21 PM |
731 | |
732 | /* | |
733 | * These fields let us detect when two contexts have both | |
734 | * been cloned (inherited) from a common ancestor. | |
735 | */ | |
cdd6c482 | 736 | struct perf_event_context *parent_ctx; |
a308444c IM |
737 | u64 parent_gen; |
738 | u64 generation; | |
739 | int pin_count; | |
d010b332 | 740 | int nr_cgroups; /* cgroup evts */ |
4af57ef2 | 741 | void *task_ctx_data; /* pmu specific data */ |
28009ce4 | 742 | struct rcu_head rcu_head; |
0793a61d TG |
743 | }; |
744 | ||
7ae07ea3 FW |
745 | /* |
746 | * Number of contexts where an event can trigger: | |
e7e7ee2e | 747 | * task, softirq, hardirq, nmi. |
7ae07ea3 FW |
748 | */ |
749 | #define PERF_NR_CONTEXTS 4 | |
750 | ||
0793a61d | 751 | /** |
cdd6c482 | 752 | * struct perf_event_cpu_context - per cpu event context structure |
0793a61d TG |
753 | */ |
754 | struct perf_cpu_context { | |
cdd6c482 IM |
755 | struct perf_event_context ctx; |
756 | struct perf_event_context *task_ctx; | |
0793a61d | 757 | int active_oncpu; |
3b6f9e5c | 758 | int exclusive; |
4cfafd30 PZ |
759 | |
760 | raw_spinlock_t hrtimer_lock; | |
9e630205 SE |
761 | struct hrtimer hrtimer; |
762 | ktime_t hrtimer_interval; | |
4cfafd30 PZ |
763 | unsigned int hrtimer_active; |
764 | ||
3f1f3320 | 765 | struct pmu *unique_pmu; |
e5d1367f | 766 | struct perf_cgroup *cgrp; |
0793a61d TG |
767 | }; |
768 | ||
5622f295 | 769 | struct perf_output_handle { |
57c0c15b | 770 | struct perf_event *event; |
76369139 | 771 | struct ring_buffer *rb; |
6d1acfd5 | 772 | unsigned long wakeup; |
5d967a8b | 773 | unsigned long size; |
fdc26706 AS |
774 | union { |
775 | void *addr; | |
776 | unsigned long head; | |
777 | }; | |
5d967a8b | 778 | int page; |
5622f295 MM |
779 | }; |
780 | ||
39bed6cb MF |
781 | #ifdef CONFIG_CGROUP_PERF |
782 | ||
783 | /* | |
784 | * perf_cgroup_info keeps track of time_enabled for a cgroup. | |
785 | * This is a per-cpu dynamically allocated data structure. | |
786 | */ | |
787 | struct perf_cgroup_info { | |
788 | u64 time; | |
789 | u64 timestamp; | |
790 | }; | |
791 | ||
792 | struct perf_cgroup { | |
793 | struct cgroup_subsys_state css; | |
794 | struct perf_cgroup_info __percpu *info; | |
795 | }; | |
796 | ||
797 | /* | |
798 | * Must ensure cgroup is pinned (css_get) before calling | |
799 | * this function. In other words, we cannot call this function | |
800 | * if there is no cgroup event for the current CPU context. | |
801 | */ | |
802 | static inline struct perf_cgroup * | |
614e4c4e | 803 | perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) |
39bed6cb | 804 | { |
614e4c4e SE |
805 | return container_of(task_css_check(task, perf_event_cgrp_id, |
806 | ctx ? lockdep_is_held(&ctx->lock) | |
807 | : true), | |
39bed6cb MF |
808 | struct perf_cgroup, css); |
809 | } | |
810 | #endif /* CONFIG_CGROUP_PERF */ | |
811 | ||
cdd6c482 | 812 | #ifdef CONFIG_PERF_EVENTS |
829b42dd | 813 | |
fdc26706 AS |
814 | extern void *perf_aux_output_begin(struct perf_output_handle *handle, |
815 | struct perf_event *event); | |
816 | extern void perf_aux_output_end(struct perf_output_handle *handle, | |
817 | unsigned long size, bool truncated); | |
818 | extern int perf_aux_output_skip(struct perf_output_handle *handle, | |
819 | unsigned long size); | |
820 | extern void *perf_get_aux(struct perf_output_handle *handle); | |
821 | ||
03d8e80b | 822 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); |
b0a873eb | 823 | extern void perf_pmu_unregister(struct pmu *pmu); |
621a01ea | 824 | |
3bf101ba | 825 | extern int perf_num_counters(void); |
84c79910 | 826 | extern const char *perf_pmu_name(void); |
ab0cce56 JO |
827 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
828 | struct task_struct *task); | |
829 | extern void __perf_event_task_sched_out(struct task_struct *prev, | |
830 | struct task_struct *next); | |
cdd6c482 IM |
831 | extern int perf_event_init_task(struct task_struct *child); |
832 | extern void perf_event_exit_task(struct task_struct *child); | |
833 | extern void perf_event_free_task(struct task_struct *task); | |
4e231c79 | 834 | extern void perf_event_delayed_put(struct task_struct *task); |
e03e7ee3 | 835 | extern struct file *perf_event_get(unsigned int fd); |
ffe8690c | 836 | extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); |
cdd6c482 | 837 | extern void perf_event_print_debug(void); |
33696fc0 PZ |
838 | extern void perf_pmu_disable(struct pmu *pmu); |
839 | extern void perf_pmu_enable(struct pmu *pmu); | |
ba532500 YZ |
840 | extern void perf_sched_cb_dec(struct pmu *pmu); |
841 | extern void perf_sched_cb_inc(struct pmu *pmu); | |
cdd6c482 IM |
842 | extern int perf_event_task_disable(void); |
843 | extern int perf_event_task_enable(void); | |
26ca5c11 | 844 | extern int perf_event_refresh(struct perf_event *event, int refresh); |
cdd6c482 | 845 | extern void perf_event_update_userpage(struct perf_event *event); |
fb0459d7 AV |
846 | extern int perf_event_release_kernel(struct perf_event *event); |
847 | extern struct perf_event * | |
848 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | |
849 | int cpu, | |
38a81da2 | 850 | struct task_struct *task, |
4dc0da86 AK |
851 | perf_overflow_handler_t callback, |
852 | void *context); | |
0cda4c02 YZ |
853 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
854 | int src_cpu, int dst_cpu); | |
ffe8690c | 855 | extern u64 perf_event_read_local(struct perf_event *event); |
59ed446f PZ |
856 | extern u64 perf_event_read_value(struct perf_event *event, |
857 | u64 *enabled, u64 *running); | |
5c92d124 | 858 | |
d010b332 | 859 | |
df1a132b | 860 | struct perf_sample_data { |
2565711f PZ |
861 | /* |
862 | * Fields set by perf_sample_data_init(), group so as to | |
863 | * minimize the cachelines touched. | |
864 | */ | |
865 | u64 addr; | |
866 | struct perf_raw_record *raw; | |
867 | struct perf_branch_stack *br_stack; | |
868 | u64 period; | |
869 | u64 weight; | |
870 | u64 txn; | |
871 | union perf_mem_data_src data_src; | |
5622f295 | 872 | |
2565711f PZ |
873 | /* |
874 | * The other fields, optionally {set,used} by | |
875 | * perf_{prepare,output}_sample(). | |
876 | */ | |
877 | u64 type; | |
5622f295 MM |
878 | u64 ip; |
879 | struct { | |
880 | u32 pid; | |
881 | u32 tid; | |
882 | } tid_entry; | |
883 | u64 time; | |
5622f295 MM |
884 | u64 id; |
885 | u64 stream_id; | |
886 | struct { | |
887 | u32 cpu; | |
888 | u32 reserved; | |
889 | } cpu_entry; | |
5622f295 | 890 | struct perf_callchain_entry *callchain; |
88a7c26a AL |
891 | |
892 | /* | |
893 | * regs_user may point to task_pt_regs or to regs_user_copy, depending | |
894 | * on arch details. | |
895 | */ | |
60e2364e | 896 | struct perf_regs regs_user; |
88a7c26a AL |
897 | struct pt_regs regs_user_copy; |
898 | ||
60e2364e | 899 | struct perf_regs regs_intr; |
c5ebcedb | 900 | u64 stack_user_size; |
2565711f | 901 | } ____cacheline_aligned; |
df1a132b | 902 | |
770eee1f SE |
903 | /* default value for data source */ |
904 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ | |
905 | PERF_MEM_S(LVL, NA) |\ | |
906 | PERF_MEM_S(SNOOP, NA) |\ | |
907 | PERF_MEM_S(LOCK, NA) |\ | |
908 | PERF_MEM_S(TLB, NA)) | |
909 | ||
fd0d000b RR |
910 | static inline void perf_sample_data_init(struct perf_sample_data *data, |
911 | u64 addr, u64 period) | |
dc1d628a | 912 | { |
fd0d000b | 913 | /* remaining struct members initialized in perf_prepare_sample() */ |
dc1d628a PZ |
914 | data->addr = addr; |
915 | data->raw = NULL; | |
bce38cd5 | 916 | data->br_stack = NULL; |
4018994f | 917 | data->period = period; |
c3feedf2 | 918 | data->weight = 0; |
770eee1f | 919 | data->data_src.val = PERF_MEM_NA; |
fdfbbd07 | 920 | data->txn = 0; |
dc1d628a PZ |
921 | } |
922 | ||
5622f295 MM |
923 | extern void perf_output_sample(struct perf_output_handle *handle, |
924 | struct perf_event_header *header, | |
925 | struct perf_sample_data *data, | |
cdd6c482 | 926 | struct perf_event *event); |
5622f295 MM |
927 | extern void perf_prepare_sample(struct perf_event_header *header, |
928 | struct perf_sample_data *data, | |
cdd6c482 | 929 | struct perf_event *event, |
5622f295 MM |
930 | struct pt_regs *regs); |
931 | ||
a8b0ca17 | 932 | extern int perf_event_overflow(struct perf_event *event, |
5622f295 MM |
933 | struct perf_sample_data *data, |
934 | struct pt_regs *regs); | |
df1a132b | 935 | |
9ecda41a WN |
936 | extern void perf_event_output_forward(struct perf_event *event, |
937 | struct perf_sample_data *data, | |
938 | struct pt_regs *regs); | |
939 | extern void perf_event_output_backward(struct perf_event *event, | |
940 | struct perf_sample_data *data, | |
941 | struct pt_regs *regs); | |
21509084 | 942 | extern void perf_event_output(struct perf_event *event, |
9ecda41a WN |
943 | struct perf_sample_data *data, |
944 | struct pt_regs *regs); | |
21509084 | 945 | |
1879445d WN |
946 | static inline bool |
947 | is_default_overflow_handler(struct perf_event *event) | |
948 | { | |
9ecda41a WN |
949 | if (likely(event->overflow_handler == perf_event_output_forward)) |
950 | return true; | |
951 | if (unlikely(event->overflow_handler == perf_event_output_backward)) | |
952 | return true; | |
953 | return false; | |
1879445d WN |
954 | } |
955 | ||
21509084 YZ |
956 | extern void |
957 | perf_event_header__init_id(struct perf_event_header *header, | |
958 | struct perf_sample_data *data, | |
959 | struct perf_event *event); | |
960 | extern void | |
961 | perf_event__output_id_sample(struct perf_event *event, | |
962 | struct perf_output_handle *handle, | |
963 | struct perf_sample_data *sample); | |
964 | ||
f38b0dbb KL |
965 | extern void |
966 | perf_log_lost_samples(struct perf_event *event, u64 lost); | |
967 | ||
6c7e550f FBH |
968 | static inline bool is_sampling_event(struct perf_event *event) |
969 | { | |
970 | return event->attr.sample_period != 0; | |
971 | } | |
972 | ||
3b6f9e5c | 973 | /* |
cdd6c482 | 974 | * Return 1 for a software event, 0 for a hardware event |
3b6f9e5c | 975 | */ |
cdd6c482 | 976 | static inline int is_software_event(struct perf_event *event) |
3b6f9e5c | 977 | { |
89a1e187 | 978 | return event->pmu->task_ctx_nr == perf_sw_context; |
3b6f9e5c PM |
979 | } |
980 | ||
c5905afb | 981 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
f29ac756 | 982 | |
86038c5e | 983 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); |
a8b0ca17 | 984 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
f29ac756 | 985 | |
b0f82b81 | 986 | #ifndef perf_arch_fetch_caller_regs |
e7e7ee2e | 987 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
b0f82b81 | 988 | #endif |
5331d7b8 FW |
989 | |
990 | /* | |
991 | * Take a snapshot of the regs. Skip ip and frame pointer to | |
992 | * the nth caller. We only need a few of the regs: | |
993 | * - ip for PERF_SAMPLE_IP | |
994 | * - cs for user_mode() tests | |
995 | * - bp for callchains | |
996 | * - eflags, for future purposes, just in case | |
997 | */ | |
b0f82b81 | 998 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
5331d7b8 | 999 | { |
b0f82b81 | 1000 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
5331d7b8 FW |
1001 | } |
1002 | ||
7e54a5a0 | 1003 | static __always_inline void |
a8b0ca17 | 1004 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
e49a5bd3 | 1005 | { |
86038c5e PZI |
1006 | if (static_key_false(&perf_swevent_enabled[event_id])) |
1007 | __perf_sw_event(event_id, nr, regs, addr); | |
1008 | } | |
1009 | ||
1010 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); | |
7e54a5a0 | 1011 | |
86038c5e PZI |
1012 | /* |
1013 | * 'Special' version for the scheduler, it hard assumes no recursion, | |
1014 | * which is guaranteed by us not actually scheduling inside other swevents | |
1015 | * because those disable preemption. | |
1016 | */ | |
1017 | static __always_inline void | |
1018 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) | |
1019 | { | |
c5905afb | 1020 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
86038c5e PZI |
1021 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
1022 | ||
1023 | perf_fetch_caller_regs(regs); | |
1024 | ___perf_sw_event(event_id, nr, regs, addr); | |
e49a5bd3 FW |
1025 | } |
1026 | } | |
1027 | ||
9107c89e | 1028 | extern struct static_key_false perf_sched_events; |
ee6dcfa4 | 1029 | |
ff303e66 PZ |
1030 | static __always_inline bool |
1031 | perf_sw_migrate_enabled(void) | |
1032 | { | |
1033 | if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) | |
1034 | return true; | |
1035 | return false; | |
1036 | } | |
1037 | ||
1038 | static inline void perf_event_task_migrate(struct task_struct *task) | |
1039 | { | |
1040 | if (perf_sw_migrate_enabled()) | |
1041 | task->sched_migrated = 1; | |
1042 | } | |
1043 | ||
ab0cce56 | 1044 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
a8d757ef | 1045 | struct task_struct *task) |
ab0cce56 | 1046 | { |
9107c89e | 1047 | if (static_branch_unlikely(&perf_sched_events)) |
ab0cce56 | 1048 | __perf_event_task_sched_in(prev, task); |
ff303e66 PZ |
1049 | |
1050 | if (perf_sw_migrate_enabled() && task->sched_migrated) { | |
1051 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); | |
1052 | ||
1053 | perf_fetch_caller_regs(regs); | |
1054 | ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); | |
1055 | task->sched_migrated = 0; | |
1056 | } | |
ab0cce56 JO |
1057 | } |
1058 | ||
1059 | static inline void perf_event_task_sched_out(struct task_struct *prev, | |
1060 | struct task_struct *next) | |
ee6dcfa4 | 1061 | { |
86038c5e | 1062 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); |
ee6dcfa4 | 1063 | |
9107c89e | 1064 | if (static_branch_unlikely(&perf_sched_events)) |
ab0cce56 | 1065 | __perf_event_task_sched_out(prev, next); |
ee6dcfa4 PZ |
1066 | } |
1067 | ||
eacd3ecc MF |
1068 | static inline u64 __perf_event_count(struct perf_event *event) |
1069 | { | |
1070 | return local64_read(&event->count) + atomic64_read(&event->child_count); | |
1071 | } | |
1072 | ||
3af9e859 | 1073 | extern void perf_event_mmap(struct vm_area_struct *vma); |
39447b38 | 1074 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
dcf46b94 ZY |
1075 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
1076 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | |
39447b38 | 1077 | |
e041e328 | 1078 | extern void perf_event_exec(void); |
82b89778 | 1079 | extern void perf_event_comm(struct task_struct *tsk, bool exec); |
cdd6c482 | 1080 | extern void perf_event_fork(struct task_struct *tsk); |
8d1b2d93 | 1081 | |
56962b44 FW |
1082 | /* Callchains */ |
1083 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | |
1084 | ||
cfbcf468 ACM |
1085 | extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); |
1086 | extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); | |
568b329a AS |
1087 | extern struct perf_callchain_entry * |
1088 | get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, | |
cfbcf468 | 1089 | u32 max_stack, bool crosstask, bool add_mark); |
568b329a AS |
1090 | extern int get_callchain_buffers(void); |
1091 | extern void put_callchain_buffers(void); | |
394ee076 | 1092 | |
c5dfd78e | 1093 | extern int sysctl_perf_event_max_stack; |
c85b0334 | 1094 | extern int sysctl_perf_event_max_contexts_per_stack; |
c5dfd78e | 1095 | |
c85b0334 ACM |
1096 | static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) |
1097 | { | |
1098 | if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { | |
1099 | struct perf_callchain_entry *entry = ctx->entry; | |
1100 | entry->ip[entry->nr++] = ip; | |
1101 | ++ctx->contexts; | |
1102 | return 0; | |
1103 | } else { | |
1104 | ctx->contexts_maxed = true; | |
1105 | return -1; /* no more room, stop walking the stack */ | |
1106 | } | |
1107 | } | |
c5dfd78e | 1108 | |
cfbcf468 | 1109 | static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) |
70791ce9 | 1110 | { |
c85b0334 | 1111 | if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { |
3b1fff08 | 1112 | struct perf_callchain_entry *entry = ctx->entry; |
70791ce9 | 1113 | entry->ip[entry->nr++] = ip; |
3b1fff08 | 1114 | ++ctx->nr; |
568b329a AS |
1115 | return 0; |
1116 | } else { | |
1117 | return -1; /* no more room, stop walking the stack */ | |
1118 | } | |
70791ce9 | 1119 | } |
394ee076 | 1120 | |
cdd6c482 IM |
1121 | extern int sysctl_perf_event_paranoid; |
1122 | extern int sysctl_perf_event_mlock; | |
1123 | extern int sysctl_perf_event_sample_rate; | |
14c63f17 DH |
1124 | extern int sysctl_perf_cpu_time_max_percent; |
1125 | ||
1126 | extern void perf_sample_event_took(u64 sample_len_ns); | |
1ccd1549 | 1127 | |
163ec435 PZ |
1128 | extern int perf_proc_update_handler(struct ctl_table *table, int write, |
1129 | void __user *buffer, size_t *lenp, | |
1130 | loff_t *ppos); | |
14c63f17 DH |
1131 | extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
1132 | void __user *buffer, size_t *lenp, | |
1133 | loff_t *ppos); | |
1134 | ||
c5dfd78e ACM |
1135 | int perf_event_max_stack_handler(struct ctl_table *table, int write, |
1136 | void __user *buffer, size_t *lenp, loff_t *ppos); | |
163ec435 | 1137 | |
320ebf09 PZ |
1138 | static inline bool perf_paranoid_tracepoint_raw(void) |
1139 | { | |
1140 | return sysctl_perf_event_paranoid > -1; | |
1141 | } | |
1142 | ||
1143 | static inline bool perf_paranoid_cpu(void) | |
1144 | { | |
1145 | return sysctl_perf_event_paranoid > 0; | |
1146 | } | |
1147 | ||
1148 | static inline bool perf_paranoid_kernel(void) | |
1149 | { | |
1150 | return sysctl_perf_event_paranoid > 1; | |
1151 | } | |
1152 | ||
cdd6c482 | 1153 | extern void perf_event_init(void); |
1e1dcd93 | 1154 | extern void perf_tp_event(u16 event_type, u64 count, void *record, |
1c024eca | 1155 | int entry_size, struct pt_regs *regs, |
e6dab5ff AV |
1156 | struct hlist_head *head, int rctx, |
1157 | struct task_struct *task); | |
24f1e32c | 1158 | extern void perf_bp_event(struct perf_event *event, void *data); |
0d905bca | 1159 | |
9d23a90a | 1160 | #ifndef perf_misc_flags |
e7e7ee2e IM |
1161 | # define perf_misc_flags(regs) \ |
1162 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | |
1163 | # define perf_instruction_pointer(regs) instruction_pointer(regs) | |
9d23a90a PM |
1164 | #endif |
1165 | ||
bce38cd5 SE |
1166 | static inline bool has_branch_stack(struct perf_event *event) |
1167 | { | |
1168 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | |
a46a2300 YZ |
1169 | } |
1170 | ||
1171 | static inline bool needs_branch_stack(struct perf_event *event) | |
1172 | { | |
1173 | return event->attr.branch_sample_type != 0; | |
bce38cd5 SE |
1174 | } |
1175 | ||
45bfb2e5 PZ |
1176 | static inline bool has_aux(struct perf_event *event) |
1177 | { | |
1178 | return event->pmu->setup_aux; | |
1179 | } | |
1180 | ||
9ecda41a WN |
1181 | static inline bool is_write_backward(struct perf_event *event) |
1182 | { | |
1183 | return !!event->attr.write_backward; | |
1184 | } | |
1185 | ||
375637bc AS |
1186 | static inline bool has_addr_filter(struct perf_event *event) |
1187 | { | |
1188 | return event->pmu->nr_addr_filters; | |
1189 | } | |
1190 | ||
1191 | /* | |
1192 | * An inherited event uses parent's filters | |
1193 | */ | |
1194 | static inline struct perf_addr_filters_head * | |
1195 | perf_event_addr_filters(struct perf_event *event) | |
1196 | { | |
1197 | struct perf_addr_filters_head *ifh = &event->addr_filters; | |
1198 | ||
1199 | if (event->parent) | |
1200 | ifh = &event->parent->addr_filters; | |
1201 | ||
1202 | return ifh; | |
1203 | } | |
1204 | ||
1205 | extern void perf_event_addr_filters_sync(struct perf_event *event); | |
1206 | ||
5622f295 | 1207 | extern int perf_output_begin(struct perf_output_handle *handle, |
a7ac67ea | 1208 | struct perf_event *event, unsigned int size); |
9ecda41a WN |
1209 | extern int perf_output_begin_forward(struct perf_output_handle *handle, |
1210 | struct perf_event *event, | |
1211 | unsigned int size); | |
1212 | extern int perf_output_begin_backward(struct perf_output_handle *handle, | |
1213 | struct perf_event *event, | |
1214 | unsigned int size); | |
1215 | ||
5622f295 | 1216 | extern void perf_output_end(struct perf_output_handle *handle); |
91d7753a | 1217 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, |
5622f295 | 1218 | const void *buf, unsigned int len); |
5685e0ff JO |
1219 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, |
1220 | unsigned int len); | |
4ed7c92d PZ |
1221 | extern int perf_swevent_get_recursion_context(void); |
1222 | extern void perf_swevent_put_recursion_context(int rctx); | |
ab573844 | 1223 | extern u64 perf_swevent_set_period(struct perf_event *event); |
44234adc FW |
1224 | extern void perf_event_enable(struct perf_event *event); |
1225 | extern void perf_event_disable(struct perf_event *event); | |
fae3fde6 | 1226 | extern void perf_event_disable_local(struct perf_event *event); |
e9d2b064 | 1227 | extern void perf_event_task_tick(void); |
e041e328 | 1228 | #else /* !CONFIG_PERF_EVENTS: */ |
fdc26706 AS |
1229 | static inline void * |
1230 | perf_aux_output_begin(struct perf_output_handle *handle, | |
1231 | struct perf_event *event) { return NULL; } | |
1232 | static inline void | |
1233 | perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, | |
1234 | bool truncated) { } | |
1235 | static inline int | |
1236 | perf_aux_output_skip(struct perf_output_handle *handle, | |
1237 | unsigned long size) { return -EINVAL; } | |
1238 | static inline void * | |
1239 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } | |
0793a61d | 1240 | static inline void |
ff303e66 PZ |
1241 | perf_event_task_migrate(struct task_struct *task) { } |
1242 | static inline void | |
ab0cce56 JO |
1243 | perf_event_task_sched_in(struct task_struct *prev, |
1244 | struct task_struct *task) { } | |
1245 | static inline void | |
1246 | perf_event_task_sched_out(struct task_struct *prev, | |
1247 | struct task_struct *next) { } | |
cdd6c482 IM |
1248 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
1249 | static inline void perf_event_exit_task(struct task_struct *child) { } | |
1250 | static inline void perf_event_free_task(struct task_struct *task) { } | |
4e231c79 | 1251 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
e03e7ee3 | 1252 | static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } |
ffe8690c KX |
1253 | static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) |
1254 | { | |
1255 | return ERR_PTR(-EINVAL); | |
1256 | } | |
1257 | static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } | |
57c0c15b | 1258 | static inline void perf_event_print_debug(void) { } |
57c0c15b IM |
1259 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1260 | static inline int perf_event_task_enable(void) { return -EINVAL; } | |
26ca5c11 AK |
1261 | static inline int perf_event_refresh(struct perf_event *event, int refresh) |
1262 | { | |
1263 | return -EINVAL; | |
1264 | } | |
15dbf27c | 1265 | |
925d519a | 1266 | static inline void |
a8b0ca17 | 1267 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
24f1e32c | 1268 | static inline void |
86038c5e PZI |
1269 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } |
1270 | static inline void | |
184f412c | 1271 | perf_bp_event(struct perf_event *event, void *data) { } |
0a4a9391 | 1272 | |
39447b38 | 1273 | static inline int perf_register_guest_info_callbacks |
e7e7ee2e | 1274 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
39447b38 | 1275 | static inline int perf_unregister_guest_info_callbacks |
e7e7ee2e | 1276 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
39447b38 | 1277 | |
57c0c15b | 1278 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
e041e328 | 1279 | static inline void perf_event_exec(void) { } |
82b89778 | 1280 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } |
cdd6c482 IM |
1281 | static inline void perf_event_fork(struct task_struct *tsk) { } |
1282 | static inline void perf_event_init(void) { } | |
184f412c | 1283 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
4ed7c92d | 1284 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
ab573844 | 1285 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } |
44234adc FW |
1286 | static inline void perf_event_enable(struct perf_event *event) { } |
1287 | static inline void perf_event_disable(struct perf_event *event) { } | |
500ad2d8 | 1288 | static inline int __perf_event_disable(void *info) { return -1; } |
e9d2b064 | 1289 | static inline void perf_event_task_tick(void) { } |
ffe8690c | 1290 | static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } |
0793a61d TG |
1291 | #endif |
1292 | ||
6c4d3bc9 DR |
1293 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
1294 | extern void perf_restore_debug_store(void); | |
1295 | #else | |
1d9d8639 | 1296 | static inline void perf_restore_debug_store(void) { } |
0793a61d TG |
1297 | #endif |
1298 | ||
7e3f977e DB |
1299 | static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) |
1300 | { | |
1301 | return frag->pad < sizeof(u64); | |
1302 | } | |
1303 | ||
e7e7ee2e | 1304 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
5622f295 | 1305 | |
3f6da390 | 1306 | /* |
0a0fca9d | 1307 | * This has to have a higher priority than migration_notifier in sched/core.c. |
3f6da390 | 1308 | */ |
e7e7ee2e IM |
1309 | #define perf_cpu_notifier(fn) \ |
1310 | do { \ | |
0db0628d | 1311 | static struct notifier_block fn##_nb = \ |
e7e7ee2e | 1312 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
c13d38e4 | 1313 | unsigned long cpu = smp_processor_id(); \ |
6760bca9 | 1314 | unsigned long flags; \ |
f0bdb5e0 SB |
1315 | \ |
1316 | cpu_notifier_register_begin(); \ | |
e7e7ee2e | 1317 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
c13d38e4 | 1318 | (void *)(unsigned long)cpu); \ |
6760bca9 | 1319 | local_irq_save(flags); \ |
e7e7ee2e | 1320 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
c13d38e4 | 1321 | (void *)(unsigned long)cpu); \ |
6760bca9 | 1322 | local_irq_restore(flags); \ |
e7e7ee2e | 1323 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ |
c13d38e4 | 1324 | (void *)(unsigned long)cpu); \ |
f0bdb5e0 SB |
1325 | __register_cpu_notifier(&fn##_nb); \ |
1326 | cpu_notifier_register_done(); \ | |
3f6da390 PZ |
1327 | } while (0) |
1328 | ||
f0bdb5e0 SB |
1329 | /* |
1330 | * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the | |
1331 | * callback for already online CPUs. | |
1332 | */ | |
1333 | #define __perf_cpu_notifier(fn) \ | |
1334 | do { \ | |
1335 | static struct notifier_block fn##_nb = \ | |
1336 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | |
1337 | \ | |
1338 | __register_cpu_notifier(&fn##_nb); \ | |
1339 | } while (0) | |
641cc938 | 1340 | |
2663960c SB |
1341 | struct perf_pmu_events_attr { |
1342 | struct device_attribute attr; | |
1343 | u64 id; | |
3a54aaa0 | 1344 | const char *event_str; |
2663960c SB |
1345 | }; |
1346 | ||
fd979c01 CS |
1347 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
1348 | char *page); | |
1349 | ||
2663960c SB |
1350 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ |
1351 | static struct perf_pmu_events_attr _var = { \ | |
1352 | .attr = __ATTR(_name, 0444, _show, NULL), \ | |
1353 | .id = _id, \ | |
1354 | }; | |
1355 | ||
f0405b81 CS |
1356 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ |
1357 | static struct perf_pmu_events_attr _var = { \ | |
1358 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ | |
1359 | .id = 0, \ | |
1360 | .event_str = _str, \ | |
1361 | }; | |
1362 | ||
641cc938 JO |
1363 | #define PMU_FORMAT_ATTR(_name, _format) \ |
1364 | static ssize_t \ | |
1365 | _name##_show(struct device *dev, \ | |
1366 | struct device_attribute *attr, \ | |
1367 | char *page) \ | |
1368 | { \ | |
1369 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
1370 | return sprintf(page, _format "\n"); \ | |
1371 | } \ | |
1372 | \ | |
1373 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | |
1374 | ||
cdd6c482 | 1375 | #endif /* _LINUX_PERF_EVENT_H */ |