]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Performance events: | |
3 | * | |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | |
7 | * | |
8 | * Data type definitions, declarations, prototypes. | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | #ifndef _LINUX_PERF_EVENT_H | |
15 | #define _LINUX_PERF_EVENT_H | |
16 | ||
17 | #include <uapi/linux/perf_event.h> | |
18 | #include <uapi/linux/bpf_perf_event.h> | |
19 | ||
20 | /* | |
21 | * Kernel-internal data types and definitions: | |
22 | */ | |
23 | ||
24 | #ifdef CONFIG_PERF_EVENTS | |
25 | # include <asm/perf_event.h> | |
26 | # include <asm/local64.h> | |
27 | #endif | |
28 | ||
29 | struct perf_guest_info_callbacks { | |
30 | int (*is_in_guest)(void); | |
31 | int (*is_user_mode)(void); | |
32 | unsigned long (*get_guest_ip)(void); | |
33 | }; | |
34 | ||
35 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | |
36 | #include <asm/hw_breakpoint.h> | |
37 | #endif | |
38 | ||
39 | #include <linux/list.h> | |
40 | #include <linux/mutex.h> | |
41 | #include <linux/rculist.h> | |
42 | #include <linux/rcupdate.h> | |
43 | #include <linux/spinlock.h> | |
44 | #include <linux/hrtimer.h> | |
45 | #include <linux/fs.h> | |
46 | #include <linux/pid_namespace.h> | |
47 | #include <linux/workqueue.h> | |
48 | #include <linux/ftrace.h> | |
49 | #include <linux/cpu.h> | |
50 | #include <linux/irq_work.h> | |
51 | #include <linux/static_key.h> | |
52 | #include <linux/jump_label_ratelimit.h> | |
53 | #include <linux/atomic.h> | |
54 | #include <linux/sysfs.h> | |
55 | #include <linux/perf_regs.h> | |
56 | #include <linux/workqueue.h> | |
57 | #include <linux/cgroup.h> | |
58 | #include <asm/local.h> | |
59 | ||
60 | struct perf_callchain_entry { | |
61 | __u64 nr; | |
62 | __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ | |
63 | }; | |
64 | ||
65 | struct perf_callchain_entry_ctx { | |
66 | struct perf_callchain_entry *entry; | |
67 | u32 max_stack; | |
68 | u32 nr; | |
69 | short contexts; | |
70 | bool contexts_maxed; | |
71 | }; | |
72 | ||
73 | typedef unsigned long (*perf_copy_f)(void *dst, const void *src, | |
74 | unsigned long off, unsigned long len); | |
75 | ||
76 | struct perf_raw_frag { | |
77 | union { | |
78 | struct perf_raw_frag *next; | |
79 | unsigned long pad; | |
80 | }; | |
81 | perf_copy_f copy; | |
82 | void *data; | |
83 | u32 size; | |
84 | } __packed; | |
85 | ||
86 | struct perf_raw_record { | |
87 | struct perf_raw_frag frag; | |
88 | u32 size; | |
89 | }; | |
90 | ||
91 | /* | |
92 | * branch stack layout: | |
93 | * nr: number of taken branches stored in entries[] | |
94 | * | |
95 | * Note that nr can vary from sample to sample | |
96 | * branches (to, from) are stored from most recent | |
97 | * to least recent, i.e., entries[0] contains the most | |
98 | * recent branch. | |
99 | */ | |
100 | struct perf_branch_stack { | |
101 | __u64 nr; | |
102 | struct perf_branch_entry entries[0]; | |
103 | }; | |
104 | ||
105 | struct task_struct; | |
106 | ||
107 | /* | |
108 | * extra PMU register associated with an event | |
109 | */ | |
110 | struct hw_perf_event_extra { | |
111 | u64 config; /* register value */ | |
112 | unsigned int reg; /* register address or index */ | |
113 | int alloc; /* extra register already allocated */ | |
114 | int idx; /* index in shared_regs->regs[] */ | |
115 | }; | |
116 | ||
117 | /** | |
118 | * struct hw_perf_event - performance event hardware details: | |
119 | */ | |
120 | struct hw_perf_event { | |
121 | #ifdef CONFIG_PERF_EVENTS | |
122 | union { | |
123 | struct { /* hardware */ | |
124 | u64 config; | |
125 | u64 last_tag; | |
126 | unsigned long config_base; | |
127 | unsigned long event_base; | |
128 | int event_base_rdpmc; | |
129 | int idx; | |
130 | int last_cpu; | |
131 | int flags; | |
132 | ||
133 | struct hw_perf_event_extra extra_reg; | |
134 | struct hw_perf_event_extra branch_reg; | |
135 | }; | |
136 | struct { /* software */ | |
137 | struct hrtimer hrtimer; | |
138 | }; | |
139 | struct { /* tracepoint */ | |
140 | /* for tp_event->class */ | |
141 | struct list_head tp_list; | |
142 | }; | |
143 | struct { /* amd_power */ | |
144 | u64 pwr_acc; | |
145 | u64 ptsc; | |
146 | }; | |
147 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | |
148 | struct { /* breakpoint */ | |
149 | /* | |
150 | * Crufty hack to avoid the chicken and egg | |
151 | * problem hw_breakpoint has with context | |
152 | * creation and event initalization. | |
153 | */ | |
154 | struct arch_hw_breakpoint info; | |
155 | struct list_head bp_list; | |
156 | }; | |
157 | #endif | |
158 | struct { /* amd_iommu */ | |
159 | u8 iommu_bank; | |
160 | u8 iommu_cntr; | |
161 | u16 padding; | |
162 | u64 conf; | |
163 | u64 conf1; | |
164 | }; | |
165 | }; | |
166 | /* | |
167 | * If the event is a per task event, this will point to the task in | |
168 | * question. See the comment in perf_event_alloc(). | |
169 | */ | |
170 | struct task_struct *target; | |
171 | ||
172 | /* | |
173 | * PMU would store hardware filter configuration | |
174 | * here. | |
175 | */ | |
176 | void *addr_filters; | |
177 | ||
178 | /* Last sync'ed generation of filters */ | |
179 | unsigned long addr_filters_gen; | |
180 | ||
181 | /* | |
182 | * hw_perf_event::state flags; used to track the PERF_EF_* state. | |
183 | */ | |
184 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | |
185 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | |
186 | #define PERF_HES_ARCH 0x04 | |
187 | ||
188 | int state; | |
189 | ||
190 | /* | |
191 | * The last observed hardware counter value, updated with a | |
192 | * local64_cmpxchg() such that pmu::read() can be called nested. | |
193 | */ | |
194 | local64_t prev_count; | |
195 | ||
196 | /* | |
197 | * The period to start the next sample with. | |
198 | */ | |
199 | u64 sample_period; | |
200 | ||
201 | /* | |
202 | * The period we started this sample with. | |
203 | */ | |
204 | u64 last_period; | |
205 | ||
206 | /* | |
207 | * However much is left of the current period; note that this is | |
208 | * a full 64bit value and allows for generation of periods longer | |
209 | * than hardware might allow. | |
210 | */ | |
211 | local64_t period_left; | |
212 | ||
213 | /* | |
214 | * State for throttling the event, see __perf_event_overflow() and | |
215 | * perf_adjust_freq_unthr_context(). | |
216 | */ | |
217 | u64 interrupts_seq; | |
218 | u64 interrupts; | |
219 | ||
220 | /* | |
221 | * State for freq target events, see __perf_event_overflow() and | |
222 | * perf_adjust_freq_unthr_context(). | |
223 | */ | |
224 | u64 freq_time_stamp; | |
225 | u64 freq_count_stamp; | |
226 | #endif | |
227 | }; | |
228 | ||
229 | struct perf_event; | |
230 | ||
231 | /* | |
232 | * Common implementation detail of pmu::{start,commit,cancel}_txn | |
233 | */ | |
234 | #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ | |
235 | #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ | |
236 | ||
237 | /** | |
238 | * pmu::capabilities flags | |
239 | */ | |
240 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | |
241 | #define PERF_PMU_CAP_NO_NMI 0x02 | |
242 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 | |
243 | #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 | |
244 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 | |
245 | #define PERF_PMU_CAP_ITRACE 0x20 | |
246 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 | |
247 | ||
248 | /** | |
249 | * struct pmu - generic performance monitoring unit | |
250 | */ | |
251 | struct pmu { | |
252 | struct list_head entry; | |
253 | ||
254 | struct module *module; | |
255 | struct device *dev; | |
256 | const struct attribute_group **attr_groups; | |
257 | const char *name; | |
258 | int type; | |
259 | ||
260 | /* | |
261 | * various common per-pmu feature flags | |
262 | */ | |
263 | int capabilities; | |
264 | ||
265 | int * __percpu pmu_disable_count; | |
266 | struct perf_cpu_context * __percpu pmu_cpu_context; | |
267 | atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ | |
268 | int task_ctx_nr; | |
269 | int hrtimer_interval_ms; | |
270 | ||
271 | /* number of address filters this PMU can do */ | |
272 | unsigned int nr_addr_filters; | |
273 | ||
274 | /* | |
275 | * Fully disable/enable this PMU, can be used to protect from the PMI | |
276 | * as well as for lazy/batch writing of the MSRs. | |
277 | */ | |
278 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | |
279 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | |
280 | ||
281 | /* | |
282 | * Try and initialize the event for this PMU. | |
283 | * | |
284 | * Returns: | |
285 | * -ENOENT -- @event is not for this PMU | |
286 | * | |
287 | * -ENODEV -- @event is for this PMU but PMU not present | |
288 | * -EBUSY -- @event is for this PMU but PMU temporarily unavailable | |
289 | * -EINVAL -- @event is for this PMU but @event is not valid | |
290 | * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported | |
291 | * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges | |
292 | * | |
293 | * 0 -- @event is for this PMU and valid | |
294 | * | |
295 | * Other error return values are allowed. | |
296 | */ | |
297 | int (*event_init) (struct perf_event *event); | |
298 | ||
299 | /* | |
300 | * Notification that the event was mapped or unmapped. Called | |
301 | * in the context of the mapping task. | |
302 | */ | |
303 | void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ | |
304 | void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ | |
305 | ||
306 | /* | |
307 | * Flags for ->add()/->del()/ ->start()/->stop(). There are | |
308 | * matching hw_perf_event::state flags. | |
309 | */ | |
310 | #define PERF_EF_START 0x01 /* start the counter when adding */ | |
311 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | |
312 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | |
313 | ||
314 | /* | |
315 | * Adds/Removes a counter to/from the PMU, can be done inside a | |
316 | * transaction, see the ->*_txn() methods. | |
317 | * | |
318 | * The add/del callbacks will reserve all hardware resources required | |
319 | * to service the event, this includes any counter constraint | |
320 | * scheduling etc. | |
321 | * | |
322 | * Called with IRQs disabled and the PMU disabled on the CPU the event | |
323 | * is on. | |
324 | * | |
325 | * ->add() called without PERF_EF_START should result in the same state | |
326 | * as ->add() followed by ->stop(). | |
327 | * | |
328 | * ->del() must always PERF_EF_UPDATE stop an event. If it calls | |
329 | * ->stop() that must deal with already being stopped without | |
330 | * PERF_EF_UPDATE. | |
331 | */ | |
332 | int (*add) (struct perf_event *event, int flags); | |
333 | void (*del) (struct perf_event *event, int flags); | |
334 | ||
335 | /* | |
336 | * Starts/Stops a counter present on the PMU. | |
337 | * | |
338 | * The PMI handler should stop the counter when perf_event_overflow() | |
339 | * returns !0. ->start() will be used to continue. | |
340 | * | |
341 | * Also used to change the sample period. | |
342 | * | |
343 | * Called with IRQs disabled and the PMU disabled on the CPU the event | |
344 | * is on -- will be called from NMI context with the PMU generates | |
345 | * NMIs. | |
346 | * | |
347 | * ->stop() with PERF_EF_UPDATE will read the counter and update | |
348 | * period/count values like ->read() would. | |
349 | * | |
350 | * ->start() with PERF_EF_RELOAD will reprogram the the counter | |
351 | * value, must be preceded by a ->stop() with PERF_EF_UPDATE. | |
352 | */ | |
353 | void (*start) (struct perf_event *event, int flags); | |
354 | void (*stop) (struct perf_event *event, int flags); | |
355 | ||
356 | /* | |
357 | * Updates the counter value of the event. | |
358 | * | |
359 | * For sampling capable PMUs this will also update the software period | |
360 | * hw_perf_event::period_left field. | |
361 | */ | |
362 | void (*read) (struct perf_event *event); | |
363 | ||
364 | /* | |
365 | * Group events scheduling is treated as a transaction, add | |
366 | * group events as a whole and perform one schedulability test. | |
367 | * If the test fails, roll back the whole group | |
368 | * | |
369 | * Start the transaction, after this ->add() doesn't need to | |
370 | * do schedulability tests. | |
371 | * | |
372 | * Optional. | |
373 | */ | |
374 | void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); | |
375 | /* | |
376 | * If ->start_txn() disabled the ->add() schedulability test | |
377 | * then ->commit_txn() is required to perform one. On success | |
378 | * the transaction is closed. On error the transaction is kept | |
379 | * open until ->cancel_txn() is called. | |
380 | * | |
381 | * Optional. | |
382 | */ | |
383 | int (*commit_txn) (struct pmu *pmu); | |
384 | /* | |
385 | * Will cancel the transaction, assumes ->del() is called | |
386 | * for each successful ->add() during the transaction. | |
387 | * | |
388 | * Optional. | |
389 | */ | |
390 | void (*cancel_txn) (struct pmu *pmu); | |
391 | ||
392 | /* | |
393 | * Will return the value for perf_event_mmap_page::index for this event, | |
394 | * if no implementation is provided it will default to: event->hw.idx + 1. | |
395 | */ | |
396 | int (*event_idx) (struct perf_event *event); /*optional */ | |
397 | ||
398 | /* | |
399 | * context-switches callback | |
400 | */ | |
401 | void (*sched_task) (struct perf_event_context *ctx, | |
402 | bool sched_in); | |
403 | /* | |
404 | * PMU specific data size | |
405 | */ | |
406 | size_t task_ctx_size; | |
407 | ||
408 | ||
409 | /* | |
410 | * Set up pmu-private data structures for an AUX area | |
411 | */ | |
412 | void *(*setup_aux) (struct perf_event *event, void **pages, | |
413 | int nr_pages, bool overwrite); | |
414 | /* optional */ | |
415 | ||
416 | /* | |
417 | * Free pmu-private AUX data structures | |
418 | */ | |
419 | void (*free_aux) (void *aux); /* optional */ | |
420 | ||
421 | /* | |
422 | * Validate address range filters: make sure the HW supports the | |
423 | * requested configuration and number of filters; return 0 if the | |
424 | * supplied filters are valid, -errno otherwise. | |
425 | * | |
426 | * Runs in the context of the ioctl()ing process and is not serialized | |
427 | * with the rest of the PMU callbacks. | |
428 | */ | |
429 | int (*addr_filters_validate) (struct list_head *filters); | |
430 | /* optional */ | |
431 | ||
432 | /* | |
433 | * Synchronize address range filter configuration: | |
434 | * translate hw-agnostic filters into hardware configuration in | |
435 | * event::hw::addr_filters. | |
436 | * | |
437 | * Runs as a part of filter sync sequence that is done in ->start() | |
438 | * callback by calling perf_event_addr_filters_sync(). | |
439 | * | |
440 | * May (and should) traverse event::addr_filters::list, for which its | |
441 | * caller provides necessary serialization. | |
442 | */ | |
443 | void (*addr_filters_sync) (struct perf_event *event); | |
444 | /* optional */ | |
445 | ||
446 | /* | |
447 | * Filter events for PMU-specific reasons. | |
448 | */ | |
449 | int (*filter_match) (struct perf_event *event); /* optional */ | |
450 | ||
451 | /* | |
452 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | |
453 | */ | |
454 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | |
455 | }; | |
456 | ||
457 | /** | |
458 | * struct perf_addr_filter - address range filter definition | |
459 | * @entry: event's filter list linkage | |
460 | * @inode: object file's inode for file-based filters | |
461 | * @offset: filter range offset | |
462 | * @size: filter range size | |
463 | * @range: 1: range, 0: address | |
464 | * @filter: 1: filter/start, 0: stop | |
465 | * | |
466 | * This is a hardware-agnostic filter configuration as specified by the user. | |
467 | */ | |
468 | struct perf_addr_filter { | |
469 | struct list_head entry; | |
470 | struct inode *inode; | |
471 | unsigned long offset; | |
472 | unsigned long size; | |
473 | unsigned int range : 1, | |
474 | filter : 1; | |
475 | }; | |
476 | ||
477 | /** | |
478 | * struct perf_addr_filters_head - container for address range filters | |
479 | * @list: list of filters for this event | |
480 | * @lock: spinlock that serializes accesses to the @list and event's | |
481 | * (and its children's) filter generations. | |
482 | * @nr_file_filters: number of file-based filters | |
483 | * | |
484 | * A child event will use parent's @list (and therefore @lock), so they are | |
485 | * bundled together; see perf_event_addr_filters(). | |
486 | */ | |
487 | struct perf_addr_filters_head { | |
488 | struct list_head list; | |
489 | raw_spinlock_t lock; | |
490 | unsigned int nr_file_filters; | |
491 | }; | |
492 | ||
493 | /** | |
494 | * enum perf_event_state - the states of a event | |
495 | */ | |
496 | enum perf_event_state { | |
497 | PERF_EVENT_STATE_DEAD = -4, | |
498 | PERF_EVENT_STATE_EXIT = -3, | |
499 | PERF_EVENT_STATE_ERROR = -2, | |
500 | PERF_EVENT_STATE_OFF = -1, | |
501 | PERF_EVENT_STATE_INACTIVE = 0, | |
502 | PERF_EVENT_STATE_ACTIVE = 1, | |
503 | }; | |
504 | ||
505 | struct file; | |
506 | struct perf_sample_data; | |
507 | ||
508 | typedef void (*perf_overflow_handler_t)(struct perf_event *, | |
509 | struct perf_sample_data *, | |
510 | struct pt_regs *regs); | |
511 | ||
512 | /* | |
513 | * Event capabilities. For event_caps and groups caps. | |
514 | * | |
515 | * PERF_EV_CAP_SOFTWARE: Is a software event. | |
516 | * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read | |
517 | * from any CPU in the package where it is active. | |
518 | */ | |
519 | #define PERF_EV_CAP_SOFTWARE BIT(0) | |
520 | #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) | |
521 | ||
522 | #define SWEVENT_HLIST_BITS 8 | |
523 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | |
524 | ||
525 | struct swevent_hlist { | |
526 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | |
527 | struct rcu_head rcu_head; | |
528 | }; | |
529 | ||
530 | #define PERF_ATTACH_CONTEXT 0x01 | |
531 | #define PERF_ATTACH_GROUP 0x02 | |
532 | #define PERF_ATTACH_TASK 0x04 | |
533 | #define PERF_ATTACH_TASK_DATA 0x08 | |
534 | #define PERF_ATTACH_ITRACE 0x10 | |
535 | ||
536 | struct perf_cgroup; | |
537 | struct ring_buffer; | |
538 | ||
539 | struct pmu_event_list { | |
540 | raw_spinlock_t lock; | |
541 | struct list_head list; | |
542 | }; | |
543 | ||
544 | /** | |
545 | * struct perf_event - performance event kernel representation: | |
546 | */ | |
547 | struct perf_event { | |
548 | #ifdef CONFIG_PERF_EVENTS | |
549 | /* | |
550 | * entry onto perf_event_context::event_list; | |
551 | * modifications require ctx->lock | |
552 | * RCU safe iterations. | |
553 | */ | |
554 | struct list_head event_entry; | |
555 | ||
556 | /* | |
557 | * XXX: group_entry and sibling_list should be mutually exclusive; | |
558 | * either you're a sibling on a group, or you're the group leader. | |
559 | * Rework the code to always use the same list element. | |
560 | * | |
561 | * Locked for modification by both ctx->mutex and ctx->lock; holding | |
562 | * either sufficies for read. | |
563 | */ | |
564 | struct list_head group_entry; | |
565 | struct list_head sibling_list; | |
566 | ||
567 | /* | |
568 | * We need storage to track the entries in perf_pmu_migrate_context; we | |
569 | * cannot use the event_entry because of RCU and we want to keep the | |
570 | * group in tact which avoids us using the other two entries. | |
571 | */ | |
572 | struct list_head migrate_entry; | |
573 | ||
574 | struct hlist_node hlist_entry; | |
575 | struct list_head active_entry; | |
576 | int nr_siblings; | |
577 | ||
578 | /* Not serialized. Only written during event initialization. */ | |
579 | int event_caps; | |
580 | /* The cumulative AND of all event_caps for events in this group. */ | |
581 | int group_caps; | |
582 | ||
583 | struct perf_event *group_leader; | |
584 | struct pmu *pmu; | |
585 | void *pmu_private; | |
586 | ||
587 | enum perf_event_state state; | |
588 | unsigned int attach_state; | |
589 | local64_t count; | |
590 | atomic64_t child_count; | |
591 | ||
592 | /* | |
593 | * These are the total time in nanoseconds that the event | |
594 | * has been enabled (i.e. eligible to run, and the task has | |
595 | * been scheduled in, if this is a per-task event) | |
596 | * and running (scheduled onto the CPU), respectively. | |
597 | */ | |
598 | u64 total_time_enabled; | |
599 | u64 total_time_running; | |
600 | u64 tstamp; | |
601 | ||
602 | /* | |
603 | * timestamp shadows the actual context timing but it can | |
604 | * be safely used in NMI interrupt context. It reflects the | |
605 | * context time as it was when the event was last scheduled in. | |
606 | * | |
607 | * ctx_time already accounts for ctx->timestamp. Therefore to | |
608 | * compute ctx_time for a sample, simply add perf_clock(). | |
609 | */ | |
610 | u64 shadow_ctx_time; | |
611 | ||
612 | struct perf_event_attr attr; | |
613 | u16 header_size; | |
614 | u16 id_header_size; | |
615 | u16 read_size; | |
616 | struct hw_perf_event hw; | |
617 | ||
618 | struct perf_event_context *ctx; | |
619 | atomic_long_t refcount; | |
620 | ||
621 | /* | |
622 | * These accumulate total time (in nanoseconds) that children | |
623 | * events have been enabled and running, respectively. | |
624 | */ | |
625 | atomic64_t child_total_time_enabled; | |
626 | atomic64_t child_total_time_running; | |
627 | ||
628 | /* | |
629 | * Protect attach/detach and child_list: | |
630 | */ | |
631 | struct mutex child_mutex; | |
632 | struct list_head child_list; | |
633 | struct perf_event *parent; | |
634 | ||
635 | int oncpu; | |
636 | int cpu; | |
637 | ||
638 | struct list_head owner_entry; | |
639 | struct task_struct *owner; | |
640 | ||
641 | /* mmap bits */ | |
642 | struct mutex mmap_mutex; | |
643 | atomic_t mmap_count; | |
644 | ||
645 | struct ring_buffer *rb; | |
646 | struct list_head rb_entry; | |
647 | unsigned long rcu_batches; | |
648 | int rcu_pending; | |
649 | ||
650 | /* poll related */ | |
651 | wait_queue_head_t waitq; | |
652 | struct fasync_struct *fasync; | |
653 | ||
654 | /* delayed work for NMIs and such */ | |
655 | int pending_wakeup; | |
656 | int pending_kill; | |
657 | int pending_disable; | |
658 | struct irq_work pending; | |
659 | ||
660 | atomic_t event_limit; | |
661 | ||
662 | /* address range filters */ | |
663 | struct perf_addr_filters_head addr_filters; | |
664 | /* vma address array for file-based filders */ | |
665 | unsigned long *addr_filters_offs; | |
666 | unsigned long addr_filters_gen; | |
667 | ||
668 | void (*destroy)(struct perf_event *); | |
669 | struct rcu_head rcu_head; | |
670 | ||
671 | struct pid_namespace *ns; | |
672 | u64 id; | |
673 | ||
674 | u64 (*clock)(void); | |
675 | perf_overflow_handler_t overflow_handler; | |
676 | void *overflow_handler_context; | |
677 | #ifdef CONFIG_BPF_SYSCALL | |
678 | perf_overflow_handler_t orig_overflow_handler; | |
679 | struct bpf_prog *prog; | |
680 | #endif | |
681 | ||
682 | #ifdef CONFIG_EVENT_TRACING | |
683 | struct trace_event_call *tp_event; | |
684 | struct event_filter *filter; | |
685 | #ifdef CONFIG_FUNCTION_TRACER | |
686 | struct ftrace_ops ftrace_ops; | |
687 | #endif | |
688 | #endif | |
689 | ||
690 | #ifdef CONFIG_CGROUP_PERF | |
691 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ | |
692 | #endif | |
693 | ||
694 | struct list_head sb_list; | |
695 | #endif /* CONFIG_PERF_EVENTS */ | |
696 | }; | |
697 | ||
698 | /** | |
699 | * struct perf_event_context - event context structure | |
700 | * | |
701 | * Used as a container for task events and CPU events as well: | |
702 | */ | |
703 | struct perf_event_context { | |
704 | struct pmu *pmu; | |
705 | /* | |
706 | * Protect the states of the events in the list, | |
707 | * nr_active, and the list: | |
708 | */ | |
709 | raw_spinlock_t lock; | |
710 | /* | |
711 | * Protect the list of events. Locking either mutex or lock | |
712 | * is sufficient to ensure the list doesn't change; to change | |
713 | * the list you need to lock both the mutex and the spinlock. | |
714 | */ | |
715 | struct mutex mutex; | |
716 | ||
717 | struct list_head active_ctx_list; | |
718 | struct list_head pinned_groups; | |
719 | struct list_head flexible_groups; | |
720 | struct list_head event_list; | |
721 | int nr_events; | |
722 | int nr_active; | |
723 | int is_active; | |
724 | int nr_stat; | |
725 | int nr_freq; | |
726 | int rotate_disable; | |
727 | atomic_t refcount; | |
728 | struct task_struct *task; | |
729 | ||
730 | /* | |
731 | * Context clock, runs when context enabled. | |
732 | */ | |
733 | u64 time; | |
734 | u64 timestamp; | |
735 | ||
736 | /* | |
737 | * These fields let us detect when two contexts have both | |
738 | * been cloned (inherited) from a common ancestor. | |
739 | */ | |
740 | struct perf_event_context *parent_ctx; | |
741 | u64 parent_gen; | |
742 | u64 generation; | |
743 | int pin_count; | |
744 | #ifdef CONFIG_CGROUP_PERF | |
745 | int nr_cgroups; /* cgroup evts */ | |
746 | #endif | |
747 | void *task_ctx_data; /* pmu specific data */ | |
748 | struct rcu_head rcu_head; | |
749 | }; | |
750 | ||
751 | /* | |
752 | * Number of contexts where an event can trigger: | |
753 | * task, softirq, hardirq, nmi. | |
754 | */ | |
755 | #define PERF_NR_CONTEXTS 4 | |
756 | ||
757 | /** | |
758 | * struct perf_event_cpu_context - per cpu event context structure | |
759 | */ | |
760 | struct perf_cpu_context { | |
761 | struct perf_event_context ctx; | |
762 | struct perf_event_context *task_ctx; | |
763 | int active_oncpu; | |
764 | int exclusive; | |
765 | ||
766 | raw_spinlock_t hrtimer_lock; | |
767 | struct hrtimer hrtimer; | |
768 | ktime_t hrtimer_interval; | |
769 | unsigned int hrtimer_active; | |
770 | ||
771 | #ifdef CONFIG_CGROUP_PERF | |
772 | struct perf_cgroup *cgrp; | |
773 | struct list_head cgrp_cpuctx_entry; | |
774 | #endif | |
775 | ||
776 | struct list_head sched_cb_entry; | |
777 | int sched_cb_usage; | |
778 | ||
779 | int online; | |
780 | }; | |
781 | ||
782 | struct perf_output_handle { | |
783 | struct perf_event *event; | |
784 | struct ring_buffer *rb; | |
785 | unsigned long wakeup; | |
786 | unsigned long size; | |
787 | u64 aux_flags; | |
788 | union { | |
789 | void *addr; | |
790 | unsigned long head; | |
791 | }; | |
792 | int page; | |
793 | }; | |
794 | ||
795 | struct bpf_perf_event_data_kern { | |
796 | bpf_user_pt_regs_t *regs; | |
797 | struct perf_sample_data *data; | |
798 | struct perf_event *event; | |
799 | }; | |
800 | ||
801 | #ifdef CONFIG_CGROUP_PERF | |
802 | ||
803 | /* | |
804 | * perf_cgroup_info keeps track of time_enabled for a cgroup. | |
805 | * This is a per-cpu dynamically allocated data structure. | |
806 | */ | |
807 | struct perf_cgroup_info { | |
808 | u64 time; | |
809 | u64 timestamp; | |
810 | }; | |
811 | ||
812 | struct perf_cgroup { | |
813 | struct cgroup_subsys_state css; | |
814 | struct perf_cgroup_info __percpu *info; | |
815 | }; | |
816 | ||
817 | /* | |
818 | * Must ensure cgroup is pinned (css_get) before calling | |
819 | * this function. In other words, we cannot call this function | |
820 | * if there is no cgroup event for the current CPU context. | |
821 | */ | |
822 | static inline struct perf_cgroup * | |
823 | perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) | |
824 | { | |
825 | return container_of(task_css_check(task, perf_event_cgrp_id, | |
826 | ctx ? lockdep_is_held(&ctx->lock) | |
827 | : true), | |
828 | struct perf_cgroup, css); | |
829 | } | |
830 | #endif /* CONFIG_CGROUP_PERF */ | |
831 | ||
832 | #ifdef CONFIG_PERF_EVENTS | |
833 | ||
834 | extern void *perf_aux_output_begin(struct perf_output_handle *handle, | |
835 | struct perf_event *event); | |
836 | extern void perf_aux_output_end(struct perf_output_handle *handle, | |
837 | unsigned long size); | |
838 | extern int perf_aux_output_skip(struct perf_output_handle *handle, | |
839 | unsigned long size); | |
840 | extern void *perf_get_aux(struct perf_output_handle *handle); | |
841 | extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); | |
842 | extern void perf_event_itrace_started(struct perf_event *event); | |
843 | ||
844 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); | |
845 | extern void perf_pmu_unregister(struct pmu *pmu); | |
846 | ||
847 | extern int perf_num_counters(void); | |
848 | extern const char *perf_pmu_name(void); | |
849 | extern void __perf_event_task_sched_in(struct task_struct *prev, | |
850 | struct task_struct *task); | |
851 | extern void __perf_event_task_sched_out(struct task_struct *prev, | |
852 | struct task_struct *next); | |
853 | extern int perf_event_init_task(struct task_struct *child); | |
854 | extern void perf_event_exit_task(struct task_struct *child); | |
855 | extern void perf_event_free_task(struct task_struct *task); | |
856 | extern void perf_event_delayed_put(struct task_struct *task); | |
857 | extern struct file *perf_event_get(unsigned int fd); | |
858 | extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); | |
859 | extern void perf_event_print_debug(void); | |
860 | extern void perf_pmu_disable(struct pmu *pmu); | |
861 | extern void perf_pmu_enable(struct pmu *pmu); | |
862 | extern void perf_sched_cb_dec(struct pmu *pmu); | |
863 | extern void perf_sched_cb_inc(struct pmu *pmu); | |
864 | extern int perf_event_task_disable(void); | |
865 | extern int perf_event_task_enable(void); | |
866 | extern int perf_event_refresh(struct perf_event *event, int refresh); | |
867 | extern void perf_event_update_userpage(struct perf_event *event); | |
868 | extern int perf_event_release_kernel(struct perf_event *event); | |
869 | extern struct perf_event * | |
870 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | |
871 | int cpu, | |
872 | struct task_struct *task, | |
873 | perf_overflow_handler_t callback, | |
874 | void *context); | |
875 | extern void perf_pmu_migrate_context(struct pmu *pmu, | |
876 | int src_cpu, int dst_cpu); | |
877 | int perf_event_read_local(struct perf_event *event, u64 *value, | |
878 | u64 *enabled, u64 *running); | |
879 | extern u64 perf_event_read_value(struct perf_event *event, | |
880 | u64 *enabled, u64 *running); | |
881 | ||
882 | ||
883 | struct perf_sample_data { | |
884 | /* | |
885 | * Fields set by perf_sample_data_init(), group so as to | |
886 | * minimize the cachelines touched. | |
887 | */ | |
888 | u64 addr; | |
889 | struct perf_raw_record *raw; | |
890 | struct perf_branch_stack *br_stack; | |
891 | u64 period; | |
892 | u64 weight; | |
893 | u64 txn; | |
894 | union perf_mem_data_src data_src; | |
895 | ||
896 | /* | |
897 | * The other fields, optionally {set,used} by | |
898 | * perf_{prepare,output}_sample(). | |
899 | */ | |
900 | u64 type; | |
901 | u64 ip; | |
902 | struct { | |
903 | u32 pid; | |
904 | u32 tid; | |
905 | } tid_entry; | |
906 | u64 time; | |
907 | u64 id; | |
908 | u64 stream_id; | |
909 | struct { | |
910 | u32 cpu; | |
911 | u32 reserved; | |
912 | } cpu_entry; | |
913 | struct perf_callchain_entry *callchain; | |
914 | ||
915 | /* | |
916 | * regs_user may point to task_pt_regs or to regs_user_copy, depending | |
917 | * on arch details. | |
918 | */ | |
919 | struct perf_regs regs_user; | |
920 | struct pt_regs regs_user_copy; | |
921 | ||
922 | struct perf_regs regs_intr; | |
923 | u64 stack_user_size; | |
924 | ||
925 | u64 phys_addr; | |
926 | } ____cacheline_aligned; | |
927 | ||
928 | /* default value for data source */ | |
929 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ | |
930 | PERF_MEM_S(LVL, NA) |\ | |
931 | PERF_MEM_S(SNOOP, NA) |\ | |
932 | PERF_MEM_S(LOCK, NA) |\ | |
933 | PERF_MEM_S(TLB, NA)) | |
934 | ||
935 | static inline void perf_sample_data_init(struct perf_sample_data *data, | |
936 | u64 addr, u64 period) | |
937 | { | |
938 | /* remaining struct members initialized in perf_prepare_sample() */ | |
939 | data->addr = addr; | |
940 | data->raw = NULL; | |
941 | data->br_stack = NULL; | |
942 | data->period = period; | |
943 | data->weight = 0; | |
944 | data->data_src.val = PERF_MEM_NA; | |
945 | data->txn = 0; | |
946 | } | |
947 | ||
948 | extern void perf_output_sample(struct perf_output_handle *handle, | |
949 | struct perf_event_header *header, | |
950 | struct perf_sample_data *data, | |
951 | struct perf_event *event); | |
952 | extern void perf_prepare_sample(struct perf_event_header *header, | |
953 | struct perf_sample_data *data, | |
954 | struct perf_event *event, | |
955 | struct pt_regs *regs); | |
956 | ||
957 | extern int perf_event_overflow(struct perf_event *event, | |
958 | struct perf_sample_data *data, | |
959 | struct pt_regs *regs); | |
960 | ||
961 | extern void perf_event_output_forward(struct perf_event *event, | |
962 | struct perf_sample_data *data, | |
963 | struct pt_regs *regs); | |
964 | extern void perf_event_output_backward(struct perf_event *event, | |
965 | struct perf_sample_data *data, | |
966 | struct pt_regs *regs); | |
967 | extern void perf_event_output(struct perf_event *event, | |
968 | struct perf_sample_data *data, | |
969 | struct pt_regs *regs); | |
970 | ||
971 | static inline bool | |
972 | is_default_overflow_handler(struct perf_event *event) | |
973 | { | |
974 | if (likely(event->overflow_handler == perf_event_output_forward)) | |
975 | return true; | |
976 | if (unlikely(event->overflow_handler == perf_event_output_backward)) | |
977 | return true; | |
978 | return false; | |
979 | } | |
980 | ||
981 | extern void | |
982 | perf_event_header__init_id(struct perf_event_header *header, | |
983 | struct perf_sample_data *data, | |
984 | struct perf_event *event); | |
985 | extern void | |
986 | perf_event__output_id_sample(struct perf_event *event, | |
987 | struct perf_output_handle *handle, | |
988 | struct perf_sample_data *sample); | |
989 | ||
990 | extern void | |
991 | perf_log_lost_samples(struct perf_event *event, u64 lost); | |
992 | ||
993 | static inline bool is_sampling_event(struct perf_event *event) | |
994 | { | |
995 | return event->attr.sample_period != 0; | |
996 | } | |
997 | ||
998 | /* | |
999 | * Return 1 for a software event, 0 for a hardware event | |
1000 | */ | |
1001 | static inline int is_software_event(struct perf_event *event) | |
1002 | { | |
1003 | return event->event_caps & PERF_EV_CAP_SOFTWARE; | |
1004 | } | |
1005 | ||
1006 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |
1007 | ||
1008 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); | |
1009 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | |
1010 | ||
1011 | #ifndef perf_arch_fetch_caller_regs | |
1012 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | |
1013 | #endif | |
1014 | ||
1015 | /* | |
1016 | * Take a snapshot of the regs. Skip ip and frame pointer to | |
1017 | * the nth caller. We only need a few of the regs: | |
1018 | * - ip for PERF_SAMPLE_IP | |
1019 | * - cs for user_mode() tests | |
1020 | * - bp for callchains | |
1021 | * - eflags, for future purposes, just in case | |
1022 | */ | |
1023 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |
1024 | { | |
1025 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | |
1026 | } | |
1027 | ||
1028 | static __always_inline void | |
1029 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |
1030 | { | |
1031 | if (static_key_false(&perf_swevent_enabled[event_id])) | |
1032 | __perf_sw_event(event_id, nr, regs, addr); | |
1033 | } | |
1034 | ||
1035 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); | |
1036 | ||
1037 | /* | |
1038 | * 'Special' version for the scheduler, it hard assumes no recursion, | |
1039 | * which is guaranteed by us not actually scheduling inside other swevents | |
1040 | * because those disable preemption. | |
1041 | */ | |
1042 | static __always_inline void | |
1043 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) | |
1044 | { | |
1045 | if (static_key_false(&perf_swevent_enabled[event_id])) { | |
1046 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); | |
1047 | ||
1048 | perf_fetch_caller_regs(regs); | |
1049 | ___perf_sw_event(event_id, nr, regs, addr); | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | extern struct static_key_false perf_sched_events; | |
1054 | ||
1055 | static __always_inline bool | |
1056 | perf_sw_migrate_enabled(void) | |
1057 | { | |
1058 | if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) | |
1059 | return true; | |
1060 | return false; | |
1061 | } | |
1062 | ||
1063 | static inline void perf_event_task_migrate(struct task_struct *task) | |
1064 | { | |
1065 | if (perf_sw_migrate_enabled()) | |
1066 | task->sched_migrated = 1; | |
1067 | } | |
1068 | ||
1069 | static inline void perf_event_task_sched_in(struct task_struct *prev, | |
1070 | struct task_struct *task) | |
1071 | { | |
1072 | if (static_branch_unlikely(&perf_sched_events)) | |
1073 | __perf_event_task_sched_in(prev, task); | |
1074 | ||
1075 | if (perf_sw_migrate_enabled() && task->sched_migrated) { | |
1076 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); | |
1077 | ||
1078 | perf_fetch_caller_regs(regs); | |
1079 | ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); | |
1080 | task->sched_migrated = 0; | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | static inline void perf_event_task_sched_out(struct task_struct *prev, | |
1085 | struct task_struct *next) | |
1086 | { | |
1087 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); | |
1088 | ||
1089 | if (static_branch_unlikely(&perf_sched_events)) | |
1090 | __perf_event_task_sched_out(prev, next); | |
1091 | } | |
1092 | ||
1093 | extern void perf_event_mmap(struct vm_area_struct *vma); | |
1094 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | |
1095 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | |
1096 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | |
1097 | ||
1098 | extern void perf_event_exec(void); | |
1099 | extern void perf_event_comm(struct task_struct *tsk, bool exec); | |
1100 | extern void perf_event_namespaces(struct task_struct *tsk); | |
1101 | extern void perf_event_fork(struct task_struct *tsk); | |
1102 | ||
1103 | /* Callchains */ | |
1104 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | |
1105 | ||
1106 | extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); | |
1107 | extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); | |
1108 | extern struct perf_callchain_entry * | |
1109 | get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, | |
1110 | u32 max_stack, bool crosstask, bool add_mark); | |
1111 | extern int get_callchain_buffers(int max_stack); | |
1112 | extern void put_callchain_buffers(void); | |
1113 | ||
1114 | extern int sysctl_perf_event_max_stack; | |
1115 | extern int sysctl_perf_event_max_contexts_per_stack; | |
1116 | ||
1117 | static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) | |
1118 | { | |
1119 | if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { | |
1120 | struct perf_callchain_entry *entry = ctx->entry; | |
1121 | entry->ip[entry->nr++] = ip; | |
1122 | ++ctx->contexts; | |
1123 | return 0; | |
1124 | } else { | |
1125 | ctx->contexts_maxed = true; | |
1126 | return -1; /* no more room, stop walking the stack */ | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) | |
1131 | { | |
1132 | if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { | |
1133 | struct perf_callchain_entry *entry = ctx->entry; | |
1134 | entry->ip[entry->nr++] = ip; | |
1135 | ++ctx->nr; | |
1136 | return 0; | |
1137 | } else { | |
1138 | return -1; /* no more room, stop walking the stack */ | |
1139 | } | |
1140 | } | |
1141 | ||
1142 | extern int sysctl_perf_event_paranoid; | |
1143 | extern int sysctl_perf_event_mlock; | |
1144 | extern int sysctl_perf_event_sample_rate; | |
1145 | extern int sysctl_perf_cpu_time_max_percent; | |
1146 | ||
1147 | extern void perf_sample_event_took(u64 sample_len_ns); | |
1148 | ||
1149 | extern int perf_proc_update_handler(struct ctl_table *table, int write, | |
1150 | void __user *buffer, size_t *lenp, | |
1151 | loff_t *ppos); | |
1152 | extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, | |
1153 | void __user *buffer, size_t *lenp, | |
1154 | loff_t *ppos); | |
1155 | ||
1156 | int perf_event_max_stack_handler(struct ctl_table *table, int write, | |
1157 | void __user *buffer, size_t *lenp, loff_t *ppos); | |
1158 | ||
1159 | static inline bool perf_paranoid_any(void) | |
1160 | { | |
1161 | return sysctl_perf_event_paranoid > 2; | |
1162 | } | |
1163 | ||
1164 | static inline bool perf_paranoid_tracepoint_raw(void) | |
1165 | { | |
1166 | return sysctl_perf_event_paranoid > -1; | |
1167 | } | |
1168 | ||
1169 | static inline bool perf_paranoid_cpu(void) | |
1170 | { | |
1171 | return sysctl_perf_event_paranoid > 0; | |
1172 | } | |
1173 | ||
1174 | static inline bool perf_paranoid_kernel(void) | |
1175 | { | |
1176 | return sysctl_perf_event_paranoid > 1; | |
1177 | } | |
1178 | ||
1179 | extern void perf_event_init(void); | |
1180 | extern void perf_tp_event(u16 event_type, u64 count, void *record, | |
1181 | int entry_size, struct pt_regs *regs, | |
1182 | struct hlist_head *head, int rctx, | |
1183 | struct task_struct *task); | |
1184 | extern void perf_bp_event(struct perf_event *event, void *data); | |
1185 | ||
1186 | #ifndef perf_misc_flags | |
1187 | # define perf_misc_flags(regs) \ | |
1188 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | |
1189 | # define perf_instruction_pointer(regs) instruction_pointer(regs) | |
1190 | #endif | |
1191 | #ifndef perf_arch_bpf_user_pt_regs | |
1192 | # define perf_arch_bpf_user_pt_regs(regs) regs | |
1193 | #endif | |
1194 | ||
1195 | static inline bool has_branch_stack(struct perf_event *event) | |
1196 | { | |
1197 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | |
1198 | } | |
1199 | ||
1200 | static inline bool needs_branch_stack(struct perf_event *event) | |
1201 | { | |
1202 | return event->attr.branch_sample_type != 0; | |
1203 | } | |
1204 | ||
1205 | static inline bool has_aux(struct perf_event *event) | |
1206 | { | |
1207 | return event->pmu->setup_aux; | |
1208 | } | |
1209 | ||
1210 | static inline bool is_write_backward(struct perf_event *event) | |
1211 | { | |
1212 | return !!event->attr.write_backward; | |
1213 | } | |
1214 | ||
1215 | static inline bool has_addr_filter(struct perf_event *event) | |
1216 | { | |
1217 | return event->pmu->nr_addr_filters; | |
1218 | } | |
1219 | ||
1220 | /* | |
1221 | * An inherited event uses parent's filters | |
1222 | */ | |
1223 | static inline struct perf_addr_filters_head * | |
1224 | perf_event_addr_filters(struct perf_event *event) | |
1225 | { | |
1226 | struct perf_addr_filters_head *ifh = &event->addr_filters; | |
1227 | ||
1228 | if (event->parent) | |
1229 | ifh = &event->parent->addr_filters; | |
1230 | ||
1231 | return ifh; | |
1232 | } | |
1233 | ||
1234 | extern void perf_event_addr_filters_sync(struct perf_event *event); | |
1235 | ||
1236 | extern int perf_output_begin(struct perf_output_handle *handle, | |
1237 | struct perf_event *event, unsigned int size); | |
1238 | extern int perf_output_begin_forward(struct perf_output_handle *handle, | |
1239 | struct perf_event *event, | |
1240 | unsigned int size); | |
1241 | extern int perf_output_begin_backward(struct perf_output_handle *handle, | |
1242 | struct perf_event *event, | |
1243 | unsigned int size); | |
1244 | ||
1245 | extern void perf_output_end(struct perf_output_handle *handle); | |
1246 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, | |
1247 | const void *buf, unsigned int len); | |
1248 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, | |
1249 | unsigned int len); | |
1250 | extern int perf_swevent_get_recursion_context(void); | |
1251 | extern void perf_swevent_put_recursion_context(int rctx); | |
1252 | extern u64 perf_swevent_set_period(struct perf_event *event); | |
1253 | extern void perf_event_enable(struct perf_event *event); | |
1254 | extern void perf_event_disable(struct perf_event *event); | |
1255 | extern void perf_event_disable_local(struct perf_event *event); | |
1256 | extern void perf_event_disable_inatomic(struct perf_event *event); | |
1257 | extern void perf_event_task_tick(void); | |
1258 | extern int perf_event_account_interrupt(struct perf_event *event); | |
1259 | #else /* !CONFIG_PERF_EVENTS: */ | |
1260 | static inline void * | |
1261 | perf_aux_output_begin(struct perf_output_handle *handle, | |
1262 | struct perf_event *event) { return NULL; } | |
1263 | static inline void | |
1264 | perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) | |
1265 | { } | |
1266 | static inline int | |
1267 | perf_aux_output_skip(struct perf_output_handle *handle, | |
1268 | unsigned long size) { return -EINVAL; } | |
1269 | static inline void * | |
1270 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } | |
1271 | static inline void | |
1272 | perf_event_task_migrate(struct task_struct *task) { } | |
1273 | static inline void | |
1274 | perf_event_task_sched_in(struct task_struct *prev, | |
1275 | struct task_struct *task) { } | |
1276 | static inline void | |
1277 | perf_event_task_sched_out(struct task_struct *prev, | |
1278 | struct task_struct *next) { } | |
1279 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | |
1280 | static inline void perf_event_exit_task(struct task_struct *child) { } | |
1281 | static inline void perf_event_free_task(struct task_struct *task) { } | |
1282 | static inline void perf_event_delayed_put(struct task_struct *task) { } | |
1283 | static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } | |
1284 | static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) | |
1285 | { | |
1286 | return ERR_PTR(-EINVAL); | |
1287 | } | |
1288 | static inline int perf_event_read_local(struct perf_event *event, u64 *value, | |
1289 | u64 *enabled, u64 *running) | |
1290 | { | |
1291 | return -EINVAL; | |
1292 | } | |
1293 | static inline void perf_event_print_debug(void) { } | |
1294 | static inline int perf_event_task_disable(void) { return -EINVAL; } | |
1295 | static inline int perf_event_task_enable(void) { return -EINVAL; } | |
1296 | static inline int perf_event_refresh(struct perf_event *event, int refresh) | |
1297 | { | |
1298 | return -EINVAL; | |
1299 | } | |
1300 | ||
1301 | static inline void | |
1302 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } | |
1303 | static inline void | |
1304 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } | |
1305 | static inline void | |
1306 | perf_bp_event(struct perf_event *event, void *data) { } | |
1307 | ||
1308 | static inline int perf_register_guest_info_callbacks | |
1309 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | |
1310 | static inline int perf_unregister_guest_info_callbacks | |
1311 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | |
1312 | ||
1313 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | |
1314 | static inline void perf_event_exec(void) { } | |
1315 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } | |
1316 | static inline void perf_event_namespaces(struct task_struct *tsk) { } | |
1317 | static inline void perf_event_fork(struct task_struct *tsk) { } | |
1318 | static inline void perf_event_init(void) { } | |
1319 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | |
1320 | static inline void perf_swevent_put_recursion_context(int rctx) { } | |
1321 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } | |
1322 | static inline void perf_event_enable(struct perf_event *event) { } | |
1323 | static inline void perf_event_disable(struct perf_event *event) { } | |
1324 | static inline int __perf_event_disable(void *info) { return -1; } | |
1325 | static inline void perf_event_task_tick(void) { } | |
1326 | static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } | |
1327 | #endif | |
1328 | ||
1329 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) | |
1330 | extern void perf_restore_debug_store(void); | |
1331 | #else | |
1332 | static inline void perf_restore_debug_store(void) { } | |
1333 | #endif | |
1334 | ||
1335 | static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) | |
1336 | { | |
1337 | return frag->pad < sizeof(u64); | |
1338 | } | |
1339 | ||
1340 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) | |
1341 | ||
1342 | struct perf_pmu_events_attr { | |
1343 | struct device_attribute attr; | |
1344 | u64 id; | |
1345 | const char *event_str; | |
1346 | }; | |
1347 | ||
1348 | struct perf_pmu_events_ht_attr { | |
1349 | struct device_attribute attr; | |
1350 | u64 id; | |
1351 | const char *event_str_ht; | |
1352 | const char *event_str_noht; | |
1353 | }; | |
1354 | ||
1355 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, | |
1356 | char *page); | |
1357 | ||
1358 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ | |
1359 | static struct perf_pmu_events_attr _var = { \ | |
1360 | .attr = __ATTR(_name, 0444, _show, NULL), \ | |
1361 | .id = _id, \ | |
1362 | }; | |
1363 | ||
1364 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ | |
1365 | static struct perf_pmu_events_attr _var = { \ | |
1366 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ | |
1367 | .id = 0, \ | |
1368 | .event_str = _str, \ | |
1369 | }; | |
1370 | ||
1371 | #define PMU_FORMAT_ATTR(_name, _format) \ | |
1372 | static ssize_t \ | |
1373 | _name##_show(struct device *dev, \ | |
1374 | struct device_attribute *attr, \ | |
1375 | char *page) \ | |
1376 | { \ | |
1377 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
1378 | return sprintf(page, _format "\n"); \ | |
1379 | } \ | |
1380 | \ | |
1381 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | |
1382 | ||
1383 | /* Performance counter hotplug functions */ | |
1384 | #ifdef CONFIG_PERF_EVENTS | |
1385 | int perf_event_init_cpu(unsigned int cpu); | |
1386 | int perf_event_exit_cpu(unsigned int cpu); | |
1387 | #else | |
1388 | #define perf_event_init_cpu NULL | |
1389 | #define perf_event_exit_cpu NULL | |
1390 | #endif | |
1391 | ||
1392 | #endif /* _LINUX_PERF_EVENT_H */ |