]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Performance events: | |
3 | * | |
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra | |
7 | * | |
8 | * Data type definitions, declarations, prototypes. | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | #ifndef _LINUX_PERF_EVENT_H | |
15 | #define _LINUX_PERF_EVENT_H | |
16 | ||
17 | #include <uapi/linux/perf_event.h> | |
18 | ||
19 | /* | |
20 | * Kernel-internal data types and definitions: | |
21 | */ | |
22 | ||
23 | #ifdef CONFIG_PERF_EVENTS | |
24 | # include <asm/perf_event.h> | |
25 | # include <asm/local64.h> | |
26 | #endif | |
27 | ||
28 | struct perf_guest_info_callbacks { | |
29 | int (*is_in_guest)(void); | |
30 | int (*is_user_mode)(void); | |
31 | unsigned long (*get_guest_ip)(void); | |
32 | }; | |
33 | ||
34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | |
35 | #include <asm/hw_breakpoint.h> | |
36 | #endif | |
37 | ||
38 | #include <linux/list.h> | |
39 | #include <linux/mutex.h> | |
40 | #include <linux/rculist.h> | |
41 | #include <linux/rcupdate.h> | |
42 | #include <linux/spinlock.h> | |
43 | #include <linux/hrtimer.h> | |
44 | #include <linux/fs.h> | |
45 | #include <linux/pid_namespace.h> | |
46 | #include <linux/workqueue.h> | |
47 | #include <linux/ftrace.h> | |
48 | #include <linux/cpu.h> | |
49 | #include <linux/irq_work.h> | |
50 | #include <linux/static_key.h> | |
51 | #include <linux/jump_label_ratelimit.h> | |
52 | #include <linux/atomic.h> | |
53 | #include <linux/sysfs.h> | |
54 | #include <linux/perf_regs.h> | |
55 | #include <linux/workqueue.h> | |
56 | #include <asm/local.h> | |
57 | ||
58 | struct perf_callchain_entry { | |
59 | __u64 nr; | |
60 | __u64 ip[PERF_MAX_STACK_DEPTH]; | |
61 | }; | |
62 | ||
63 | struct perf_raw_record { | |
64 | u32 size; | |
65 | void *data; | |
66 | }; | |
67 | ||
68 | /* | |
69 | * branch stack layout: | |
70 | * nr: number of taken branches stored in entries[] | |
71 | * | |
72 | * Note that nr can vary from sample to sample | |
73 | * branches (to, from) are stored from most recent | |
74 | * to least recent, i.e., entries[0] contains the most | |
75 | * recent branch. | |
76 | */ | |
77 | struct perf_branch_stack { | |
78 | __u64 nr; | |
79 | struct perf_branch_entry entries[0]; | |
80 | }; | |
81 | ||
82 | struct task_struct; | |
83 | ||
84 | /* | |
85 | * extra PMU register associated with an event | |
86 | */ | |
87 | struct hw_perf_event_extra { | |
88 | u64 config; /* register value */ | |
89 | unsigned int reg; /* register address or index */ | |
90 | int alloc; /* extra register already allocated */ | |
91 | int idx; /* index in shared_regs->regs[] */ | |
92 | }; | |
93 | ||
94 | struct event_constraint; | |
95 | ||
96 | /** | |
97 | * struct hw_perf_event - performance event hardware details: | |
98 | */ | |
99 | struct hw_perf_event { | |
100 | #ifdef CONFIG_PERF_EVENTS | |
101 | union { | |
102 | struct { /* hardware */ | |
103 | u64 config; | |
104 | u64 last_tag; | |
105 | unsigned long config_base; | |
106 | unsigned long event_base; | |
107 | int event_base_rdpmc; | |
108 | int idx; | |
109 | int last_cpu; | |
110 | int flags; | |
111 | ||
112 | struct hw_perf_event_extra extra_reg; | |
113 | struct hw_perf_event_extra branch_reg; | |
114 | ||
115 | struct event_constraint *constraint; | |
116 | }; | |
117 | struct { /* software */ | |
118 | struct hrtimer hrtimer; | |
119 | }; | |
120 | struct { /* tracepoint */ | |
121 | struct task_struct *tp_target; | |
122 | /* for tp_event->class */ | |
123 | struct list_head tp_list; | |
124 | }; | |
125 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | |
126 | struct { /* breakpoint */ | |
127 | /* | |
128 | * Crufty hack to avoid the chicken and egg | |
129 | * problem hw_breakpoint has with context | |
130 | * creation and event initalization. | |
131 | */ | |
132 | struct task_struct *bp_target; | |
133 | struct arch_hw_breakpoint info; | |
134 | struct list_head bp_list; | |
135 | }; | |
136 | #endif | |
137 | }; | |
138 | int state; | |
139 | local64_t prev_count; | |
140 | u64 sample_period; | |
141 | u64 last_period; | |
142 | local64_t period_left; | |
143 | u64 interrupts_seq; | |
144 | u64 interrupts; | |
145 | ||
146 | u64 freq_time_stamp; | |
147 | u64 freq_count_stamp; | |
148 | #endif | |
149 | }; | |
150 | ||
151 | /* | |
152 | * hw_perf_event::state flags | |
153 | */ | |
154 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | |
155 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | |
156 | #define PERF_HES_ARCH 0x04 | |
157 | ||
158 | struct perf_event; | |
159 | ||
160 | /* | |
161 | * Common implementation detail of pmu::{start,commit,cancel}_txn | |
162 | */ | |
163 | #define PERF_EVENT_TXN 0x1 | |
164 | ||
165 | /** | |
166 | * pmu::capabilities flags | |
167 | */ | |
168 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | |
169 | ||
170 | /** | |
171 | * struct pmu - generic performance monitoring unit | |
172 | */ | |
173 | struct pmu { | |
174 | struct list_head entry; | |
175 | ||
176 | struct module *module; | |
177 | struct device *dev; | |
178 | const struct attribute_group **attr_groups; | |
179 | const char *name; | |
180 | int type; | |
181 | ||
182 | /* | |
183 | * various common per-pmu feature flags | |
184 | */ | |
185 | int capabilities; | |
186 | ||
187 | int * __percpu pmu_disable_count; | |
188 | struct perf_cpu_context * __percpu pmu_cpu_context; | |
189 | int task_ctx_nr; | |
190 | int hrtimer_interval_ms; | |
191 | ||
192 | /* | |
193 | * Fully disable/enable this PMU, can be used to protect from the PMI | |
194 | * as well as for lazy/batch writing of the MSRs. | |
195 | */ | |
196 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | |
197 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | |
198 | ||
199 | /* | |
200 | * Try and initialize the event for this PMU. | |
201 | * Should return -ENOENT when the @event doesn't match this PMU. | |
202 | */ | |
203 | int (*event_init) (struct perf_event *event); | |
204 | ||
205 | #define PERF_EF_START 0x01 /* start the counter when adding */ | |
206 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | |
207 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | |
208 | ||
209 | /* | |
210 | * Adds/Removes a counter to/from the PMU, can be done inside | |
211 | * a transaction, see the ->*_txn() methods. | |
212 | */ | |
213 | int (*add) (struct perf_event *event, int flags); | |
214 | void (*del) (struct perf_event *event, int flags); | |
215 | ||
216 | /* | |
217 | * Starts/Stops a counter present on the PMU. The PMI handler | |
218 | * should stop the counter when perf_event_overflow() returns | |
219 | * !0. ->start() will be used to continue. | |
220 | */ | |
221 | void (*start) (struct perf_event *event, int flags); | |
222 | void (*stop) (struct perf_event *event, int flags); | |
223 | ||
224 | /* | |
225 | * Updates the counter value of the event. | |
226 | */ | |
227 | void (*read) (struct perf_event *event); | |
228 | ||
229 | /* | |
230 | * Group events scheduling is treated as a transaction, add | |
231 | * group events as a whole and perform one schedulability test. | |
232 | * If the test fails, roll back the whole group | |
233 | * | |
234 | * Start the transaction, after this ->add() doesn't need to | |
235 | * do schedulability tests. | |
236 | */ | |
237 | void (*start_txn) (struct pmu *pmu); /* optional */ | |
238 | /* | |
239 | * If ->start_txn() disabled the ->add() schedulability test | |
240 | * then ->commit_txn() is required to perform one. On success | |
241 | * the transaction is closed. On error the transaction is kept | |
242 | * open until ->cancel_txn() is called. | |
243 | */ | |
244 | int (*commit_txn) (struct pmu *pmu); /* optional */ | |
245 | /* | |
246 | * Will cancel the transaction, assumes ->del() is called | |
247 | * for each successful ->add() during the transaction. | |
248 | */ | |
249 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | |
250 | ||
251 | /* | |
252 | * Will return the value for perf_event_mmap_page::index for this event, | |
253 | * if no implementation is provided it will default to: event->hw.idx + 1. | |
254 | */ | |
255 | int (*event_idx) (struct perf_event *event); /*optional */ | |
256 | ||
257 | /* | |
258 | * flush branch stack on context-switches (needed in cpu-wide mode) | |
259 | */ | |
260 | void (*flush_branch_stack) (void); | |
261 | }; | |
262 | ||
263 | /** | |
264 | * enum perf_event_active_state - the states of a event | |
265 | */ | |
266 | enum perf_event_active_state { | |
267 | PERF_EVENT_STATE_EXIT = -3, | |
268 | PERF_EVENT_STATE_ERROR = -2, | |
269 | PERF_EVENT_STATE_OFF = -1, | |
270 | PERF_EVENT_STATE_INACTIVE = 0, | |
271 | PERF_EVENT_STATE_ACTIVE = 1, | |
272 | }; | |
273 | ||
274 | struct file; | |
275 | struct perf_sample_data; | |
276 | ||
277 | typedef void (*perf_overflow_handler_t)(struct perf_event *, | |
278 | struct perf_sample_data *, | |
279 | struct pt_regs *regs); | |
280 | ||
281 | enum perf_group_flag { | |
282 | PERF_GROUP_SOFTWARE = 0x1, | |
283 | }; | |
284 | ||
285 | #define SWEVENT_HLIST_BITS 8 | |
286 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) | |
287 | ||
288 | struct swevent_hlist { | |
289 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; | |
290 | struct rcu_head rcu_head; | |
291 | }; | |
292 | ||
293 | #define PERF_ATTACH_CONTEXT 0x01 | |
294 | #define PERF_ATTACH_GROUP 0x02 | |
295 | #define PERF_ATTACH_TASK 0x04 | |
296 | ||
297 | struct perf_cgroup; | |
298 | struct ring_buffer; | |
299 | ||
300 | /** | |
301 | * struct perf_event - performance event kernel representation: | |
302 | */ | |
303 | struct perf_event { | |
304 | #ifdef CONFIG_PERF_EVENTS | |
305 | /* | |
306 | * entry onto perf_event_context::event_list; | |
307 | * modifications require ctx->lock | |
308 | * RCU safe iterations. | |
309 | */ | |
310 | struct list_head event_entry; | |
311 | ||
312 | /* | |
313 | * XXX: group_entry and sibling_list should be mutually exclusive; | |
314 | * either you're a sibling on a group, or you're the group leader. | |
315 | * Rework the code to always use the same list element. | |
316 | * | |
317 | * Locked for modification by both ctx->mutex and ctx->lock; holding | |
318 | * either sufficies for read. | |
319 | */ | |
320 | struct list_head group_entry; | |
321 | struct list_head sibling_list; | |
322 | ||
323 | /* | |
324 | * We need storage to track the entries in perf_pmu_migrate_context; we | |
325 | * cannot use the event_entry because of RCU and we want to keep the | |
326 | * group in tact which avoids us using the other two entries. | |
327 | */ | |
328 | struct list_head migrate_entry; | |
329 | ||
330 | struct hlist_node hlist_entry; | |
331 | struct list_head active_entry; | |
332 | int nr_siblings; | |
333 | int group_flags; | |
334 | struct perf_event *group_leader; | |
335 | struct pmu *pmu; | |
336 | ||
337 | enum perf_event_active_state state; | |
338 | unsigned int attach_state; | |
339 | local64_t count; | |
340 | atomic64_t child_count; | |
341 | ||
342 | /* | |
343 | * These are the total time in nanoseconds that the event | |
344 | * has been enabled (i.e. eligible to run, and the task has | |
345 | * been scheduled in, if this is a per-task event) | |
346 | * and running (scheduled onto the CPU), respectively. | |
347 | * | |
348 | * They are computed from tstamp_enabled, tstamp_running and | |
349 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. | |
350 | */ | |
351 | u64 total_time_enabled; | |
352 | u64 total_time_running; | |
353 | ||
354 | /* | |
355 | * These are timestamps used for computing total_time_enabled | |
356 | * and total_time_running when the event is in INACTIVE or | |
357 | * ACTIVE state, measured in nanoseconds from an arbitrary point | |
358 | * in time. | |
359 | * tstamp_enabled: the notional time when the event was enabled | |
360 | * tstamp_running: the notional time when the event was scheduled on | |
361 | * tstamp_stopped: in INACTIVE state, the notional time when the | |
362 | * event was scheduled off. | |
363 | */ | |
364 | u64 tstamp_enabled; | |
365 | u64 tstamp_running; | |
366 | u64 tstamp_stopped; | |
367 | ||
368 | /* | |
369 | * timestamp shadows the actual context timing but it can | |
370 | * be safely used in NMI interrupt context. It reflects the | |
371 | * context time as it was when the event was last scheduled in. | |
372 | * | |
373 | * ctx_time already accounts for ctx->timestamp. Therefore to | |
374 | * compute ctx_time for a sample, simply add perf_clock(). | |
375 | */ | |
376 | u64 shadow_ctx_time; | |
377 | ||
378 | struct perf_event_attr attr; | |
379 | u16 header_size; | |
380 | u16 id_header_size; | |
381 | u16 read_size; | |
382 | struct hw_perf_event hw; | |
383 | ||
384 | struct perf_event_context *ctx; | |
385 | atomic_long_t refcount; | |
386 | ||
387 | /* | |
388 | * These accumulate total time (in nanoseconds) that children | |
389 | * events have been enabled and running, respectively. | |
390 | */ | |
391 | atomic64_t child_total_time_enabled; | |
392 | atomic64_t child_total_time_running; | |
393 | ||
394 | /* | |
395 | * Protect attach/detach and child_list: | |
396 | */ | |
397 | struct mutex child_mutex; | |
398 | struct list_head child_list; | |
399 | struct perf_event *parent; | |
400 | ||
401 | int oncpu; | |
402 | int cpu; | |
403 | ||
404 | struct list_head owner_entry; | |
405 | struct task_struct *owner; | |
406 | ||
407 | /* mmap bits */ | |
408 | struct mutex mmap_mutex; | |
409 | atomic_t mmap_count; | |
410 | ||
411 | struct ring_buffer *rb; | |
412 | struct list_head rb_entry; | |
413 | unsigned long rcu_batches; | |
414 | int rcu_pending; | |
415 | ||
416 | /* poll related */ | |
417 | wait_queue_head_t waitq; | |
418 | struct fasync_struct *fasync; | |
419 | ||
420 | /* delayed work for NMIs and such */ | |
421 | int pending_wakeup; | |
422 | int pending_kill; | |
423 | int pending_disable; | |
424 | struct irq_work pending; | |
425 | ||
426 | atomic_t event_limit; | |
427 | ||
428 | void (*destroy)(struct perf_event *); | |
429 | struct rcu_head rcu_head; | |
430 | ||
431 | struct pid_namespace *ns; | |
432 | u64 id; | |
433 | ||
434 | perf_overflow_handler_t overflow_handler; | |
435 | void *overflow_handler_context; | |
436 | ||
437 | #ifdef CONFIG_EVENT_TRACING | |
438 | struct ftrace_event_call *tp_event; | |
439 | struct event_filter *filter; | |
440 | #ifdef CONFIG_FUNCTION_TRACER | |
441 | struct ftrace_ops ftrace_ops; | |
442 | #endif | |
443 | #endif | |
444 | ||
445 | #ifdef CONFIG_CGROUP_PERF | |
446 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ | |
447 | int cgrp_defer_enabled; | |
448 | #endif | |
449 | ||
450 | #endif /* CONFIG_PERF_EVENTS */ | |
451 | }; | |
452 | ||
453 | /** | |
454 | * struct perf_event_context - event context structure | |
455 | * | |
456 | * Used as a container for task events and CPU events as well: | |
457 | */ | |
458 | struct perf_event_context { | |
459 | struct pmu *pmu; | |
460 | /* | |
461 | * Protect the states of the events in the list, | |
462 | * nr_active, and the list: | |
463 | */ | |
464 | raw_spinlock_t lock; | |
465 | /* | |
466 | * Protect the list of events. Locking either mutex or lock | |
467 | * is sufficient to ensure the list doesn't change; to change | |
468 | * the list you need to lock both the mutex and the spinlock. | |
469 | */ | |
470 | struct mutex mutex; | |
471 | ||
472 | struct list_head pinned_groups; | |
473 | struct list_head flexible_groups; | |
474 | struct list_head event_list; | |
475 | int nr_events; | |
476 | int nr_active; | |
477 | int is_active; | |
478 | int nr_stat; | |
479 | int nr_freq; | |
480 | int rotate_disable; | |
481 | atomic_t refcount; | |
482 | struct task_struct *task; | |
483 | ||
484 | /* | |
485 | * Context clock, runs when context enabled. | |
486 | */ | |
487 | u64 time; | |
488 | u64 timestamp; | |
489 | ||
490 | /* | |
491 | * These fields let us detect when two contexts have both | |
492 | * been cloned (inherited) from a common ancestor. | |
493 | */ | |
494 | struct perf_event_context *parent_ctx; | |
495 | u64 parent_gen; | |
496 | u64 generation; | |
497 | int pin_count; | |
498 | int nr_cgroups; /* cgroup evts */ | |
499 | int nr_branch_stack; /* branch_stack evt */ | |
500 | struct rcu_head rcu_head; | |
501 | ||
502 | struct delayed_work orphans_remove; | |
503 | bool orphans_remove_sched; | |
504 | }; | |
505 | ||
506 | /* | |
507 | * Number of contexts where an event can trigger: | |
508 | * task, softirq, hardirq, nmi. | |
509 | */ | |
510 | #define PERF_NR_CONTEXTS 4 | |
511 | ||
512 | /** | |
513 | * struct perf_event_cpu_context - per cpu event context structure | |
514 | */ | |
515 | struct perf_cpu_context { | |
516 | struct perf_event_context ctx; | |
517 | struct perf_event_context *task_ctx; | |
518 | int active_oncpu; | |
519 | int exclusive; | |
520 | struct hrtimer hrtimer; | |
521 | ktime_t hrtimer_interval; | |
522 | struct list_head rotation_list; | |
523 | struct pmu *unique_pmu; | |
524 | struct perf_cgroup *cgrp; | |
525 | }; | |
526 | ||
527 | struct perf_output_handle { | |
528 | struct perf_event *event; | |
529 | struct ring_buffer *rb; | |
530 | unsigned long wakeup; | |
531 | unsigned long size; | |
532 | void *addr; | |
533 | int page; | |
534 | }; | |
535 | ||
536 | #ifdef CONFIG_PERF_EVENTS | |
537 | ||
538 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); | |
539 | extern void perf_pmu_unregister(struct pmu *pmu); | |
540 | ||
541 | extern int perf_num_counters(void); | |
542 | extern const char *perf_pmu_name(void); | |
543 | extern void __perf_event_task_sched_in(struct task_struct *prev, | |
544 | struct task_struct *task); | |
545 | extern void __perf_event_task_sched_out(struct task_struct *prev, | |
546 | struct task_struct *next); | |
547 | extern int perf_event_init_task(struct task_struct *child); | |
548 | extern void perf_event_exit_task(struct task_struct *child); | |
549 | extern void perf_event_free_task(struct task_struct *task); | |
550 | extern void perf_event_delayed_put(struct task_struct *task); | |
551 | extern void perf_event_print_debug(void); | |
552 | extern void perf_pmu_disable(struct pmu *pmu); | |
553 | extern void perf_pmu_enable(struct pmu *pmu); | |
554 | extern int perf_event_task_disable(void); | |
555 | extern int perf_event_task_enable(void); | |
556 | extern int perf_event_refresh(struct perf_event *event, int refresh); | |
557 | extern void perf_event_update_userpage(struct perf_event *event); | |
558 | extern int perf_event_release_kernel(struct perf_event *event); | |
559 | extern struct perf_event * | |
560 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | |
561 | int cpu, | |
562 | struct task_struct *task, | |
563 | perf_overflow_handler_t callback, | |
564 | void *context); | |
565 | extern void perf_pmu_migrate_context(struct pmu *pmu, | |
566 | int src_cpu, int dst_cpu); | |
567 | extern u64 perf_event_read_value(struct perf_event *event, | |
568 | u64 *enabled, u64 *running); | |
569 | ||
570 | ||
571 | struct perf_sample_data { | |
572 | /* | |
573 | * Fields set by perf_sample_data_init(), group so as to | |
574 | * minimize the cachelines touched. | |
575 | */ | |
576 | u64 addr; | |
577 | struct perf_raw_record *raw; | |
578 | struct perf_branch_stack *br_stack; | |
579 | u64 period; | |
580 | u64 weight; | |
581 | u64 txn; | |
582 | union perf_mem_data_src data_src; | |
583 | ||
584 | /* | |
585 | * The other fields, optionally {set,used} by | |
586 | * perf_{prepare,output}_sample(). | |
587 | */ | |
588 | u64 type; | |
589 | u64 ip; | |
590 | struct { | |
591 | u32 pid; | |
592 | u32 tid; | |
593 | } tid_entry; | |
594 | u64 time; | |
595 | u64 id; | |
596 | u64 stream_id; | |
597 | struct { | |
598 | u32 cpu; | |
599 | u32 reserved; | |
600 | } cpu_entry; | |
601 | struct perf_callchain_entry *callchain; | |
602 | ||
603 | /* | |
604 | * regs_user may point to task_pt_regs or to regs_user_copy, depending | |
605 | * on arch details. | |
606 | */ | |
607 | struct perf_regs regs_user; | |
608 | struct pt_regs regs_user_copy; | |
609 | ||
610 | struct perf_regs regs_intr; | |
611 | u64 stack_user_size; | |
612 | } ____cacheline_aligned; | |
613 | ||
614 | /* default value for data source */ | |
615 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ | |
616 | PERF_MEM_S(LVL, NA) |\ | |
617 | PERF_MEM_S(SNOOP, NA) |\ | |
618 | PERF_MEM_S(LOCK, NA) |\ | |
619 | PERF_MEM_S(TLB, NA)) | |
620 | ||
621 | static inline void perf_sample_data_init(struct perf_sample_data *data, | |
622 | u64 addr, u64 period) | |
623 | { | |
624 | /* remaining struct members initialized in perf_prepare_sample() */ | |
625 | data->addr = addr; | |
626 | data->raw = NULL; | |
627 | data->br_stack = NULL; | |
628 | data->period = period; | |
629 | data->weight = 0; | |
630 | data->data_src.val = PERF_MEM_NA; | |
631 | data->txn = 0; | |
632 | } | |
633 | ||
634 | extern void perf_output_sample(struct perf_output_handle *handle, | |
635 | struct perf_event_header *header, | |
636 | struct perf_sample_data *data, | |
637 | struct perf_event *event); | |
638 | extern void perf_prepare_sample(struct perf_event_header *header, | |
639 | struct perf_sample_data *data, | |
640 | struct perf_event *event, | |
641 | struct pt_regs *regs); | |
642 | ||
643 | extern int perf_event_overflow(struct perf_event *event, | |
644 | struct perf_sample_data *data, | |
645 | struct pt_regs *regs); | |
646 | ||
647 | static inline bool is_sampling_event(struct perf_event *event) | |
648 | { | |
649 | return event->attr.sample_period != 0; | |
650 | } | |
651 | ||
652 | /* | |
653 | * Return 1 for a software event, 0 for a hardware event | |
654 | */ | |
655 | static inline int is_software_event(struct perf_event *event) | |
656 | { | |
657 | return event->pmu->task_ctx_nr == perf_sw_context; | |
658 | } | |
659 | ||
660 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |
661 | ||
662 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | |
663 | ||
664 | #ifndef perf_arch_fetch_caller_regs | |
665 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | |
666 | #endif | |
667 | ||
668 | /* | |
669 | * Take a snapshot of the regs. Skip ip and frame pointer to | |
670 | * the nth caller. We only need a few of the regs: | |
671 | * - ip for PERF_SAMPLE_IP | |
672 | * - cs for user_mode() tests | |
673 | * - bp for callchains | |
674 | * - eflags, for future purposes, just in case | |
675 | */ | |
676 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |
677 | { | |
678 | memset(regs, 0, sizeof(*regs)); | |
679 | ||
680 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | |
681 | } | |
682 | ||
683 | static __always_inline void | |
684 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |
685 | { | |
686 | struct pt_regs hot_regs; | |
687 | ||
688 | if (static_key_false(&perf_swevent_enabled[event_id])) { | |
689 | if (!regs) { | |
690 | perf_fetch_caller_regs(&hot_regs); | |
691 | regs = &hot_regs; | |
692 | } | |
693 | __perf_sw_event(event_id, nr, regs, addr); | |
694 | } | |
695 | } | |
696 | ||
697 | extern struct static_key_deferred perf_sched_events; | |
698 | ||
699 | static inline void perf_event_task_sched_in(struct task_struct *prev, | |
700 | struct task_struct *task) | |
701 | { | |
702 | if (static_key_false(&perf_sched_events.key)) | |
703 | __perf_event_task_sched_in(prev, task); | |
704 | } | |
705 | ||
706 | static inline void perf_event_task_sched_out(struct task_struct *prev, | |
707 | struct task_struct *next) | |
708 | { | |
709 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | |
710 | ||
711 | if (static_key_false(&perf_sched_events.key)) | |
712 | __perf_event_task_sched_out(prev, next); | |
713 | } | |
714 | ||
715 | extern void perf_event_mmap(struct vm_area_struct *vma); | |
716 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | |
717 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | |
718 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | |
719 | ||
720 | extern void perf_event_exec(void); | |
721 | extern void perf_event_comm(struct task_struct *tsk, bool exec); | |
722 | extern void perf_event_fork(struct task_struct *tsk); | |
723 | ||
724 | /* Callchains */ | |
725 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | |
726 | ||
727 | extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); | |
728 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); | |
729 | ||
730 | static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | |
731 | { | |
732 | if (entry->nr < PERF_MAX_STACK_DEPTH) | |
733 | entry->ip[entry->nr++] = ip; | |
734 | } | |
735 | ||
736 | extern int sysctl_perf_event_paranoid; | |
737 | extern int sysctl_perf_event_mlock; | |
738 | extern int sysctl_perf_event_sample_rate; | |
739 | extern int sysctl_perf_cpu_time_max_percent; | |
740 | ||
741 | extern void perf_sample_event_took(u64 sample_len_ns); | |
742 | ||
743 | extern int perf_proc_update_handler(struct ctl_table *table, int write, | |
744 | void __user *buffer, size_t *lenp, | |
745 | loff_t *ppos); | |
746 | extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, | |
747 | void __user *buffer, size_t *lenp, | |
748 | loff_t *ppos); | |
749 | ||
750 | ||
751 | static inline bool perf_paranoid_tracepoint_raw(void) | |
752 | { | |
753 | return sysctl_perf_event_paranoid > -1; | |
754 | } | |
755 | ||
756 | static inline bool perf_paranoid_cpu(void) | |
757 | { | |
758 | return sysctl_perf_event_paranoid > 0; | |
759 | } | |
760 | ||
761 | static inline bool perf_paranoid_kernel(void) | |
762 | { | |
763 | return sysctl_perf_event_paranoid > 1; | |
764 | } | |
765 | ||
766 | extern void perf_event_init(void); | |
767 | extern void perf_tp_event(u64 addr, u64 count, void *record, | |
768 | int entry_size, struct pt_regs *regs, | |
769 | struct hlist_head *head, int rctx, | |
770 | struct task_struct *task); | |
771 | extern void perf_bp_event(struct perf_event *event, void *data); | |
772 | ||
773 | #ifndef perf_misc_flags | |
774 | # define perf_misc_flags(regs) \ | |
775 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) | |
776 | # define perf_instruction_pointer(regs) instruction_pointer(regs) | |
777 | #endif | |
778 | ||
779 | static inline bool has_branch_stack(struct perf_event *event) | |
780 | { | |
781 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; | |
782 | } | |
783 | ||
784 | extern int perf_output_begin(struct perf_output_handle *handle, | |
785 | struct perf_event *event, unsigned int size); | |
786 | extern void perf_output_end(struct perf_output_handle *handle); | |
787 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, | |
788 | const void *buf, unsigned int len); | |
789 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, | |
790 | unsigned int len); | |
791 | extern int perf_swevent_get_recursion_context(void); | |
792 | extern void perf_swevent_put_recursion_context(int rctx); | |
793 | extern u64 perf_swevent_set_period(struct perf_event *event); | |
794 | extern void perf_event_enable(struct perf_event *event); | |
795 | extern void perf_event_disable(struct perf_event *event); | |
796 | extern int __perf_event_disable(void *info); | |
797 | extern void perf_event_task_tick(void); | |
798 | #else /* !CONFIG_PERF_EVENTS: */ | |
799 | static inline void | |
800 | perf_event_task_sched_in(struct task_struct *prev, | |
801 | struct task_struct *task) { } | |
802 | static inline void | |
803 | perf_event_task_sched_out(struct task_struct *prev, | |
804 | struct task_struct *next) { } | |
805 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | |
806 | static inline void perf_event_exit_task(struct task_struct *child) { } | |
807 | static inline void perf_event_free_task(struct task_struct *task) { } | |
808 | static inline void perf_event_delayed_put(struct task_struct *task) { } | |
809 | static inline void perf_event_print_debug(void) { } | |
810 | static inline int perf_event_task_disable(void) { return -EINVAL; } | |
811 | static inline int perf_event_task_enable(void) { return -EINVAL; } | |
812 | static inline int perf_event_refresh(struct perf_event *event, int refresh) | |
813 | { | |
814 | return -EINVAL; | |
815 | } | |
816 | ||
817 | static inline void | |
818 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } | |
819 | static inline void | |
820 | perf_bp_event(struct perf_event *event, void *data) { } | |
821 | ||
822 | static inline int perf_register_guest_info_callbacks | |
823 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | |
824 | static inline int perf_unregister_guest_info_callbacks | |
825 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | |
826 | ||
827 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | |
828 | static inline void perf_event_exec(void) { } | |
829 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } | |
830 | static inline void perf_event_fork(struct task_struct *tsk) { } | |
831 | static inline void perf_event_init(void) { } | |
832 | static inline int perf_swevent_get_recursion_context(void) { return -1; } | |
833 | static inline void perf_swevent_put_recursion_context(int rctx) { } | |
834 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } | |
835 | static inline void perf_event_enable(struct perf_event *event) { } | |
836 | static inline void perf_event_disable(struct perf_event *event) { } | |
837 | static inline int __perf_event_disable(void *info) { return -1; } | |
838 | static inline void perf_event_task_tick(void) { } | |
839 | #endif | |
840 | ||
841 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) | |
842 | extern bool perf_event_can_stop_tick(void); | |
843 | #else | |
844 | static inline bool perf_event_can_stop_tick(void) { return true; } | |
845 | #endif | |
846 | ||
847 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) | |
848 | extern void perf_restore_debug_store(void); | |
849 | #else | |
850 | static inline void perf_restore_debug_store(void) { } | |
851 | #endif | |
852 | ||
853 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) | |
854 | ||
855 | /* | |
856 | * This has to have a higher priority than migration_notifier in sched/core.c. | |
857 | */ | |
858 | #define perf_cpu_notifier(fn) \ | |
859 | do { \ | |
860 | static struct notifier_block fn##_nb = \ | |
861 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | |
862 | unsigned long cpu = smp_processor_id(); \ | |
863 | unsigned long flags; \ | |
864 | \ | |
865 | cpu_notifier_register_begin(); \ | |
866 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | |
867 | (void *)(unsigned long)cpu); \ | |
868 | local_irq_save(flags); \ | |
869 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | |
870 | (void *)(unsigned long)cpu); \ | |
871 | local_irq_restore(flags); \ | |
872 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | |
873 | (void *)(unsigned long)cpu); \ | |
874 | __register_cpu_notifier(&fn##_nb); \ | |
875 | cpu_notifier_register_done(); \ | |
876 | } while (0) | |
877 | ||
878 | /* | |
879 | * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the | |
880 | * callback for already online CPUs. | |
881 | */ | |
882 | #define __perf_cpu_notifier(fn) \ | |
883 | do { \ | |
884 | static struct notifier_block fn##_nb = \ | |
885 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | |
886 | \ | |
887 | __register_cpu_notifier(&fn##_nb); \ | |
888 | } while (0) | |
889 | ||
890 | struct perf_pmu_events_attr { | |
891 | struct device_attribute attr; | |
892 | u64 id; | |
893 | const char *event_str; | |
894 | }; | |
895 | ||
896 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ | |
897 | static struct perf_pmu_events_attr _var = { \ | |
898 | .attr = __ATTR(_name, 0444, _show, NULL), \ | |
899 | .id = _id, \ | |
900 | }; | |
901 | ||
902 | #define PMU_FORMAT_ATTR(_name, _format) \ | |
903 | static ssize_t \ | |
904 | _name##_show(struct device *dev, \ | |
905 | struct device_attribute *attr, \ | |
906 | char *page) \ | |
907 | { \ | |
908 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
909 | return sprintf(page, _format "\n"); \ | |
910 | } \ | |
911 | \ | |
912 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | |
913 | ||
914 | #endif /* _LINUX_PERF_EVENT_H */ |