]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/perf_event.h
x86: Resume trampoline must be executable
[mirror_ubuntu-artful-kernel.git] / include / linux / perf_event.h
1 /*
2 * Performance events:
3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7 *
8 * Data type definitions, declarations, prototypes.
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22 * User-space ABI bits:
23 */
24
25 /*
26 * attr.type
27 */
28 enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34 PERF_TYPE_BREAKPOINT = 5,
35
36 PERF_TYPE_MAX, /* non-ABI */
37 };
38
39 /*
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
42 * syscall:
43 */
44 enum perf_hw_id {
45 /*
46 * Common hardware events, generalized by the kernel:
47 */
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55
56 PERF_COUNT_HW_MAX, /* non-ABI */
57 };
58
59 /*
60 * Generalized hardware cache events:
61 *
62 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63 * { read, write, prefetch } x
64 * { accesses, misses }
65 */
66 enum perf_hw_cache_id {
67 PERF_COUNT_HW_CACHE_L1D = 0,
68 PERF_COUNT_HW_CACHE_L1I = 1,
69 PERF_COUNT_HW_CACHE_LL = 2,
70 PERF_COUNT_HW_CACHE_DTLB = 3,
71 PERF_COUNT_HW_CACHE_ITLB = 4,
72 PERF_COUNT_HW_CACHE_BPU = 5,
73
74 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
75 };
76
77 enum perf_hw_cache_op_id {
78 PERF_COUNT_HW_CACHE_OP_READ = 0,
79 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
80 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
81
82 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
83 };
84
85 enum perf_hw_cache_op_result_id {
86 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
87 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
88
89 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
90 };
91
92 /*
93 * Special "software" events provided by the kernel, even if the hardware
94 * does not support performance events. These events measure various
95 * physical and sw events of the kernel (and allow the profiling of them as
96 * well):
97 */
98 enum perf_sw_ids {
99 PERF_COUNT_SW_CPU_CLOCK = 0,
100 PERF_COUNT_SW_TASK_CLOCK = 1,
101 PERF_COUNT_SW_PAGE_FAULTS = 2,
102 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
103 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
104 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
105 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
106 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
107 PERF_COUNT_SW_EMULATION_FAULTS = 8,
108
109 PERF_COUNT_SW_MAX, /* non-ABI */
110 };
111
112 /*
113 * Bits that can be set in attr.sample_type to request information
114 * in the overflow packets.
115 */
116 enum perf_event_sample_format {
117 PERF_SAMPLE_IP = 1U << 0,
118 PERF_SAMPLE_TID = 1U << 1,
119 PERF_SAMPLE_TIME = 1U << 2,
120 PERF_SAMPLE_ADDR = 1U << 3,
121 PERF_SAMPLE_READ = 1U << 4,
122 PERF_SAMPLE_CALLCHAIN = 1U << 5,
123 PERF_SAMPLE_ID = 1U << 6,
124 PERF_SAMPLE_CPU = 1U << 7,
125 PERF_SAMPLE_PERIOD = 1U << 8,
126 PERF_SAMPLE_STREAM_ID = 1U << 9,
127 PERF_SAMPLE_RAW = 1U << 10,
128
129 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
130 };
131
132 /*
133 * The format of the data returned by read() on a perf event fd,
134 * as specified by attr.read_format:
135 *
136 * struct read_format {
137 * { u64 value;
138 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
139 * { u64 time_running; } && PERF_FORMAT_RUNNING
140 * { u64 id; } && PERF_FORMAT_ID
141 * } && !PERF_FORMAT_GROUP
142 *
143 * { u64 nr;
144 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
145 * { u64 time_running; } && PERF_FORMAT_RUNNING
146 * { u64 value;
147 * { u64 id; } && PERF_FORMAT_ID
148 * } cntr[nr];
149 * } && PERF_FORMAT_GROUP
150 * };
151 */
152 enum perf_event_read_format {
153 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
154 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
155 PERF_FORMAT_ID = 1U << 2,
156 PERF_FORMAT_GROUP = 1U << 3,
157
158 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
159 };
160
161 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
162
163 /*
164 * Hardware event_id to monitor via a performance monitoring event:
165 */
166 struct perf_event_attr {
167
168 /*
169 * Major type: hardware/software/tracepoint/etc.
170 */
171 __u32 type;
172
173 /*
174 * Size of the attr structure, for fwd/bwd compat.
175 */
176 __u32 size;
177
178 /*
179 * Type specific configuration information.
180 */
181 __u64 config;
182
183 union {
184 __u64 sample_period;
185 __u64 sample_freq;
186 };
187
188 __u64 sample_type;
189 __u64 read_format;
190
191 __u64 disabled : 1, /* off by default */
192 inherit : 1, /* children inherit it */
193 pinned : 1, /* must always be on PMU */
194 exclusive : 1, /* only group on PMU */
195 exclude_user : 1, /* don't count user */
196 exclude_kernel : 1, /* ditto kernel */
197 exclude_hv : 1, /* ditto hypervisor */
198 exclude_idle : 1, /* don't count when idle */
199 mmap : 1, /* include mmap data */
200 comm : 1, /* include comm data */
201 freq : 1, /* use freq, not period */
202 inherit_stat : 1, /* per task counts */
203 enable_on_exec : 1, /* next exec enables */
204 task : 1, /* trace fork/exit */
205 watermark : 1, /* wakeup_watermark */
206 /*
207 * precise_ip:
208 *
209 * 0 - SAMPLE_IP can have arbitrary skid
210 * 1 - SAMPLE_IP must have constant skid
211 * 2 - SAMPLE_IP requested to have 0 skid
212 * 3 - SAMPLE_IP must have 0 skid
213 *
214 * See also PERF_RECORD_MISC_EXACT_IP
215 */
216 precise_ip : 2, /* skid constraint */
217 mmap_data : 1, /* non-exec mmap data */
218
219 __reserved_1 : 46;
220
221 union {
222 __u32 wakeup_events; /* wakeup every n events */
223 __u32 wakeup_watermark; /* bytes before wakeup */
224 };
225
226 __u32 bp_type;
227 __u64 bp_addr;
228 __u64 bp_len;
229 };
230
231 /*
232 * Ioctls that can be done on a perf event fd:
233 */
234 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
235 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
236 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
237 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
238 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
239 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
240 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
241
242 enum perf_event_ioc_flags {
243 PERF_IOC_FLAG_GROUP = 1U << 0,
244 };
245
246 /*
247 * Structure of the page that can be mapped via mmap
248 */
249 struct perf_event_mmap_page {
250 __u32 version; /* version number of this structure */
251 __u32 compat_version; /* lowest version this is compat with */
252
253 /*
254 * Bits needed to read the hw events in user-space.
255 *
256 * u32 seq;
257 * s64 count;
258 *
259 * do {
260 * seq = pc->lock;
261 *
262 * barrier()
263 * if (pc->index) {
264 * count = pmc_read(pc->index - 1);
265 * count += pc->offset;
266 * } else
267 * goto regular_read;
268 *
269 * barrier();
270 * } while (pc->lock != seq);
271 *
272 * NOTE: for obvious reason this only works on self-monitoring
273 * processes.
274 */
275 __u32 lock; /* seqlock for synchronization */
276 __u32 index; /* hardware event identifier */
277 __s64 offset; /* add to hardware event value */
278 __u64 time_enabled; /* time event active */
279 __u64 time_running; /* time event on cpu */
280
281 /*
282 * Hole for extension of the self monitor capabilities
283 */
284
285 __u64 __reserved[123]; /* align to 1k */
286
287 /*
288 * Control data for the mmap() data buffer.
289 *
290 * User-space reading the @data_head value should issue an rmb(), on
291 * SMP capable platforms, after reading this value -- see
292 * perf_event_wakeup().
293 *
294 * When the mapping is PROT_WRITE the @data_tail value should be
295 * written by userspace to reflect the last read data. In this case
296 * the kernel will not over-write unread data.
297 */
298 __u64 data_head; /* head in the data section */
299 __u64 data_tail; /* user-space written tail */
300 };
301
302 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
303 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
304 #define PERF_RECORD_MISC_KERNEL (1 << 0)
305 #define PERF_RECORD_MISC_USER (2 << 0)
306 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
307 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
308 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
309
310 /*
311 * Indicates that the content of PERF_SAMPLE_IP points to
312 * the actual instruction that triggered the event. See also
313 * perf_event_attr::precise_ip.
314 */
315 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
316 /*
317 * Reserve the last bit to indicate some extended misc field
318 */
319 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
320
321 struct perf_event_header {
322 __u32 type;
323 __u16 misc;
324 __u16 size;
325 };
326
327 enum perf_event_type {
328
329 /*
330 * The MMAP events record the PROT_EXEC mappings so that we can
331 * correlate userspace IPs to code. They have the following structure:
332 *
333 * struct {
334 * struct perf_event_header header;
335 *
336 * u32 pid, tid;
337 * u64 addr;
338 * u64 len;
339 * u64 pgoff;
340 * char filename[];
341 * };
342 */
343 PERF_RECORD_MMAP = 1,
344
345 /*
346 * struct {
347 * struct perf_event_header header;
348 * u64 id;
349 * u64 lost;
350 * };
351 */
352 PERF_RECORD_LOST = 2,
353
354 /*
355 * struct {
356 * struct perf_event_header header;
357 *
358 * u32 pid, tid;
359 * char comm[];
360 * };
361 */
362 PERF_RECORD_COMM = 3,
363
364 /*
365 * struct {
366 * struct perf_event_header header;
367 * u32 pid, ppid;
368 * u32 tid, ptid;
369 * u64 time;
370 * };
371 */
372 PERF_RECORD_EXIT = 4,
373
374 /*
375 * struct {
376 * struct perf_event_header header;
377 * u64 time;
378 * u64 id;
379 * u64 stream_id;
380 * };
381 */
382 PERF_RECORD_THROTTLE = 5,
383 PERF_RECORD_UNTHROTTLE = 6,
384
385 /*
386 * struct {
387 * struct perf_event_header header;
388 * u32 pid, ppid;
389 * u32 tid, ptid;
390 * u64 time;
391 * };
392 */
393 PERF_RECORD_FORK = 7,
394
395 /*
396 * struct {
397 * struct perf_event_header header;
398 * u32 pid, tid;
399 *
400 * struct read_format values;
401 * };
402 */
403 PERF_RECORD_READ = 8,
404
405 /*
406 * struct {
407 * struct perf_event_header header;
408 *
409 * { u64 ip; } && PERF_SAMPLE_IP
410 * { u32 pid, tid; } && PERF_SAMPLE_TID
411 * { u64 time; } && PERF_SAMPLE_TIME
412 * { u64 addr; } && PERF_SAMPLE_ADDR
413 * { u64 id; } && PERF_SAMPLE_ID
414 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
415 * { u32 cpu, res; } && PERF_SAMPLE_CPU
416 * { u64 period; } && PERF_SAMPLE_PERIOD
417 *
418 * { struct read_format values; } && PERF_SAMPLE_READ
419 *
420 * { u64 nr,
421 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
422 *
423 * #
424 * # The RAW record below is opaque data wrt the ABI
425 * #
426 * # That is, the ABI doesn't make any promises wrt to
427 * # the stability of its content, it may vary depending
428 * # on event, hardware, kernel version and phase of
429 * # the moon.
430 * #
431 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
432 * #
433 *
434 * { u32 size;
435 * char data[size];}&& PERF_SAMPLE_RAW
436 * };
437 */
438 PERF_RECORD_SAMPLE = 9,
439
440 PERF_RECORD_MAX, /* non-ABI */
441 };
442
443 enum perf_callchain_context {
444 PERF_CONTEXT_HV = (__u64)-32,
445 PERF_CONTEXT_KERNEL = (__u64)-128,
446 PERF_CONTEXT_USER = (__u64)-512,
447
448 PERF_CONTEXT_GUEST = (__u64)-2048,
449 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
450 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
451
452 PERF_CONTEXT_MAX = (__u64)-4095,
453 };
454
455 #define PERF_FLAG_FD_NO_GROUP (1U << 0)
456 #define PERF_FLAG_FD_OUTPUT (1U << 1)
457
458 #ifdef __KERNEL__
459 /*
460 * Kernel-internal data types and definitions:
461 */
462
463 #ifdef CONFIG_PERF_EVENTS
464 # include <asm/perf_event.h>
465 # include <asm/local64.h>
466 #endif
467
468 struct perf_guest_info_callbacks {
469 int (*is_in_guest) (void);
470 int (*is_user_mode) (void);
471 unsigned long (*get_guest_ip) (void);
472 };
473
474 #ifdef CONFIG_HAVE_HW_BREAKPOINT
475 #include <asm/hw_breakpoint.h>
476 #endif
477
478 #include <linux/list.h>
479 #include <linux/mutex.h>
480 #include <linux/rculist.h>
481 #include <linux/rcupdate.h>
482 #include <linux/spinlock.h>
483 #include <linux/hrtimer.h>
484 #include <linux/fs.h>
485 #include <linux/pid_namespace.h>
486 #include <linux/workqueue.h>
487 #include <linux/ftrace.h>
488 #include <linux/cpu.h>
489 #include <linux/irq_work.h>
490 #include <linux/jump_label_ref.h>
491 #include <asm/atomic.h>
492 #include <asm/local.h>
493
494 #define PERF_MAX_STACK_DEPTH 255
495
496 struct perf_callchain_entry {
497 __u64 nr;
498 __u64 ip[PERF_MAX_STACK_DEPTH];
499 };
500
501 struct perf_raw_record {
502 u32 size;
503 void *data;
504 };
505
506 struct perf_branch_entry {
507 __u64 from;
508 __u64 to;
509 __u64 flags;
510 };
511
512 struct perf_branch_stack {
513 __u64 nr;
514 struct perf_branch_entry entries[0];
515 };
516
517 struct task_struct;
518
519 /**
520 * struct hw_perf_event - performance event hardware details:
521 */
522 struct hw_perf_event {
523 #ifdef CONFIG_PERF_EVENTS
524 union {
525 struct { /* hardware */
526 u64 config;
527 u64 last_tag;
528 unsigned long config_base;
529 unsigned long event_base;
530 int idx;
531 int last_cpu;
532 };
533 struct { /* software */
534 struct hrtimer hrtimer;
535 };
536 #ifdef CONFIG_HAVE_HW_BREAKPOINT
537 struct { /* breakpoint */
538 struct arch_hw_breakpoint info;
539 struct list_head bp_list;
540 /*
541 * Crufty hack to avoid the chicken and egg
542 * problem hw_breakpoint has with context
543 * creation and event initalization.
544 */
545 struct task_struct *bp_target;
546 };
547 #endif
548 };
549 int state;
550 local64_t prev_count;
551 u64 sample_period;
552 u64 last_period;
553 local64_t period_left;
554 u64 interrupts;
555
556 u64 freq_time_stamp;
557 u64 freq_count_stamp;
558 #endif
559 };
560
561 /*
562 * hw_perf_event::state flags
563 */
564 #define PERF_HES_STOPPED 0x01 /* the counter is stopped */
565 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
566 #define PERF_HES_ARCH 0x04
567
568 struct perf_event;
569
570 /*
571 * Common implementation detail of pmu::{start,commit,cancel}_txn
572 */
573 #define PERF_EVENT_TXN 0x1
574
575 /**
576 * struct pmu - generic performance monitoring unit
577 */
578 struct pmu {
579 struct list_head entry;
580
581 int * __percpu pmu_disable_count;
582 struct perf_cpu_context * __percpu pmu_cpu_context;
583 int task_ctx_nr;
584
585 /*
586 * Fully disable/enable this PMU, can be used to protect from the PMI
587 * as well as for lazy/batch writing of the MSRs.
588 */
589 void (*pmu_enable) (struct pmu *pmu); /* optional */
590 void (*pmu_disable) (struct pmu *pmu); /* optional */
591
592 /*
593 * Try and initialize the event for this PMU.
594 * Should return -ENOENT when the @event doesn't match this PMU.
595 */
596 int (*event_init) (struct perf_event *event);
597
598 #define PERF_EF_START 0x01 /* start the counter when adding */
599 #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
600 #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
601
602 /*
603 * Adds/Removes a counter to/from the PMU, can be done inside
604 * a transaction, see the ->*_txn() methods.
605 */
606 int (*add) (struct perf_event *event, int flags);
607 void (*del) (struct perf_event *event, int flags);
608
609 /*
610 * Starts/Stops a counter present on the PMU. The PMI handler
611 * should stop the counter when perf_event_overflow() returns
612 * !0. ->start() will be used to continue.
613 */
614 void (*start) (struct perf_event *event, int flags);
615 void (*stop) (struct perf_event *event, int flags);
616
617 /*
618 * Updates the counter value of the event.
619 */
620 void (*read) (struct perf_event *event);
621
622 /*
623 * Group events scheduling is treated as a transaction, add
624 * group events as a whole and perform one schedulability test.
625 * If the test fails, roll back the whole group
626 *
627 * Start the transaction, after this ->add() doesn't need to
628 * do schedulability tests.
629 */
630 void (*start_txn) (struct pmu *pmu); /* optional */
631 /*
632 * If ->start_txn() disabled the ->add() schedulability test
633 * then ->commit_txn() is required to perform one. On success
634 * the transaction is closed. On error the transaction is kept
635 * open until ->cancel_txn() is called.
636 */
637 int (*commit_txn) (struct pmu *pmu); /* optional */
638 /*
639 * Will cancel the transaction, assumes ->del() is called
640 * for each successfull ->add() during the transaction.
641 */
642 void (*cancel_txn) (struct pmu *pmu); /* optional */
643 };
644
645 /**
646 * enum perf_event_active_state - the states of a event
647 */
648 enum perf_event_active_state {
649 PERF_EVENT_STATE_ERROR = -2,
650 PERF_EVENT_STATE_OFF = -1,
651 PERF_EVENT_STATE_INACTIVE = 0,
652 PERF_EVENT_STATE_ACTIVE = 1,
653 };
654
655 struct file;
656
657 #define PERF_BUFFER_WRITABLE 0x01
658
659 struct perf_buffer {
660 atomic_t refcount;
661 struct rcu_head rcu_head;
662 #ifdef CONFIG_PERF_USE_VMALLOC
663 struct work_struct work;
664 int page_order; /* allocation order */
665 #endif
666 int nr_pages; /* nr of data pages */
667 int writable; /* are we writable */
668
669 atomic_t poll; /* POLL_ for wakeups */
670
671 local_t head; /* write position */
672 local_t nest; /* nested writers */
673 local_t events; /* event limit */
674 local_t wakeup; /* wakeup stamp */
675 local_t lost; /* nr records lost */
676
677 long watermark; /* wakeup watermark */
678
679 struct perf_event_mmap_page *user_page;
680 void *data_pages[0];
681 };
682
683 struct perf_sample_data;
684
685 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
686 struct perf_sample_data *,
687 struct pt_regs *regs);
688
689 enum perf_group_flag {
690 PERF_GROUP_SOFTWARE = 0x1,
691 };
692
693 #define SWEVENT_HLIST_BITS 8
694 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
695
696 struct swevent_hlist {
697 struct hlist_head heads[SWEVENT_HLIST_SIZE];
698 struct rcu_head rcu_head;
699 };
700
701 #define PERF_ATTACH_CONTEXT 0x01
702 #define PERF_ATTACH_GROUP 0x02
703 #define PERF_ATTACH_TASK 0x04
704
705 /**
706 * struct perf_event - performance event kernel representation:
707 */
708 struct perf_event {
709 #ifdef CONFIG_PERF_EVENTS
710 struct list_head group_entry;
711 struct list_head event_entry;
712 struct list_head sibling_list;
713 struct hlist_node hlist_entry;
714 int nr_siblings;
715 int group_flags;
716 struct perf_event *group_leader;
717 struct pmu *pmu;
718
719 enum perf_event_active_state state;
720 unsigned int attach_state;
721 local64_t count;
722 atomic64_t child_count;
723
724 /*
725 * These are the total time in nanoseconds that the event
726 * has been enabled (i.e. eligible to run, and the task has
727 * been scheduled in, if this is a per-task event)
728 * and running (scheduled onto the CPU), respectively.
729 *
730 * They are computed from tstamp_enabled, tstamp_running and
731 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
732 */
733 u64 total_time_enabled;
734 u64 total_time_running;
735
736 /*
737 * These are timestamps used for computing total_time_enabled
738 * and total_time_running when the event is in INACTIVE or
739 * ACTIVE state, measured in nanoseconds from an arbitrary point
740 * in time.
741 * tstamp_enabled: the notional time when the event was enabled
742 * tstamp_running: the notional time when the event was scheduled on
743 * tstamp_stopped: in INACTIVE state, the notional time when the
744 * event was scheduled off.
745 */
746 u64 tstamp_enabled;
747 u64 tstamp_running;
748 u64 tstamp_stopped;
749
750 /*
751 * timestamp shadows the actual context timing but it can
752 * be safely used in NMI interrupt context. It reflects the
753 * context time as it was when the event was last scheduled in.
754 *
755 * ctx_time already accounts for ctx->timestamp. Therefore to
756 * compute ctx_time for a sample, simply add perf_clock().
757 */
758 u64 shadow_ctx_time;
759
760 struct perf_event_attr attr;
761 struct hw_perf_event hw;
762
763 struct perf_event_context *ctx;
764 struct file *filp;
765
766 /*
767 * These accumulate total time (in nanoseconds) that children
768 * events have been enabled and running, respectively.
769 */
770 atomic64_t child_total_time_enabled;
771 atomic64_t child_total_time_running;
772
773 /*
774 * Protect attach/detach and child_list:
775 */
776 struct mutex child_mutex;
777 struct list_head child_list;
778 struct perf_event *parent;
779
780 int oncpu;
781 int cpu;
782
783 struct list_head owner_entry;
784 struct task_struct *owner;
785
786 /* mmap bits */
787 struct mutex mmap_mutex;
788 atomic_t mmap_count;
789 int mmap_locked;
790 struct user_struct *mmap_user;
791 struct perf_buffer *buffer;
792
793 /* poll related */
794 wait_queue_head_t waitq;
795 struct fasync_struct *fasync;
796
797 /* delayed work for NMIs and such */
798 int pending_wakeup;
799 int pending_kill;
800 int pending_disable;
801 struct irq_work pending;
802
803 atomic_t event_limit;
804
805 void (*destroy)(struct perf_event *);
806 struct rcu_head rcu_head;
807
808 struct pid_namespace *ns;
809 u64 id;
810
811 perf_overflow_handler_t overflow_handler;
812
813 #ifdef CONFIG_EVENT_TRACING
814 struct ftrace_event_call *tp_event;
815 struct event_filter *filter;
816 #endif
817
818 #endif /* CONFIG_PERF_EVENTS */
819 };
820
821 enum perf_event_context_type {
822 task_context,
823 cpu_context,
824 };
825
826 /**
827 * struct perf_event_context - event context structure
828 *
829 * Used as a container for task events and CPU events as well:
830 */
831 struct perf_event_context {
832 enum perf_event_context_type type;
833 struct pmu *pmu;
834 /*
835 * Protect the states of the events in the list,
836 * nr_active, and the list:
837 */
838 raw_spinlock_t lock;
839 /*
840 * Protect the list of events. Locking either mutex or lock
841 * is sufficient to ensure the list doesn't change; to change
842 * the list you need to lock both the mutex and the spinlock.
843 */
844 struct mutex mutex;
845
846 struct list_head pinned_groups;
847 struct list_head flexible_groups;
848 struct list_head event_list;
849 int nr_events;
850 int nr_active;
851 int is_active;
852 int nr_stat;
853 atomic_t refcount;
854 struct task_struct *task;
855
856 /*
857 * Context clock, runs when context enabled.
858 */
859 u64 time;
860 u64 timestamp;
861
862 /*
863 * These fields let us detect when two contexts have both
864 * been cloned (inherited) from a common ancestor.
865 */
866 struct perf_event_context *parent_ctx;
867 u64 parent_gen;
868 u64 generation;
869 int pin_count;
870 struct rcu_head rcu_head;
871 };
872
873 /*
874 * Number of contexts where an event can trigger:
875 * task, softirq, hardirq, nmi.
876 */
877 #define PERF_NR_CONTEXTS 4
878
879 /**
880 * struct perf_event_cpu_context - per cpu event context structure
881 */
882 struct perf_cpu_context {
883 struct perf_event_context ctx;
884 struct perf_event_context *task_ctx;
885 int active_oncpu;
886 int exclusive;
887 struct list_head rotation_list;
888 int jiffies_interval;
889 };
890
891 struct perf_output_handle {
892 struct perf_event *event;
893 struct perf_buffer *buffer;
894 unsigned long wakeup;
895 unsigned long size;
896 void *addr;
897 int page;
898 int nmi;
899 int sample;
900 };
901
902 #ifdef CONFIG_PERF_EVENTS
903
904 extern int perf_pmu_register(struct pmu *pmu);
905 extern void perf_pmu_unregister(struct pmu *pmu);
906
907 extern int perf_num_counters(void);
908 extern const char *perf_pmu_name(void);
909 extern void __perf_event_task_sched_in(struct task_struct *task);
910 extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
911
912 extern atomic_t perf_task_events;
913
914 static inline void perf_event_task_sched_in(struct task_struct *task)
915 {
916 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
917 }
918
919 static inline
920 void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
921 {
922 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
923 }
924
925 extern int perf_event_init_task(struct task_struct *child);
926 extern void perf_event_exit_task(struct task_struct *child);
927 extern void perf_event_free_task(struct task_struct *task);
928 extern void perf_event_delayed_put(struct task_struct *task);
929 extern void perf_event_print_debug(void);
930 extern void perf_pmu_disable(struct pmu *pmu);
931 extern void perf_pmu_enable(struct pmu *pmu);
932 extern int perf_event_task_disable(void);
933 extern int perf_event_task_enable(void);
934 extern void perf_event_update_userpage(struct perf_event *event);
935 extern int perf_event_release_kernel(struct perf_event *event);
936 extern struct perf_event *
937 perf_event_create_kernel_counter(struct perf_event_attr *attr,
938 int cpu,
939 struct task_struct *task,
940 perf_overflow_handler_t callback);
941 extern u64 perf_event_read_value(struct perf_event *event,
942 u64 *enabled, u64 *running);
943
944 struct perf_sample_data {
945 u64 type;
946
947 u64 ip;
948 struct {
949 u32 pid;
950 u32 tid;
951 } tid_entry;
952 u64 time;
953 u64 addr;
954 u64 id;
955 u64 stream_id;
956 struct {
957 u32 cpu;
958 u32 reserved;
959 } cpu_entry;
960 u64 period;
961 struct perf_callchain_entry *callchain;
962 struct perf_raw_record *raw;
963 };
964
965 static inline
966 void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
967 {
968 data->addr = addr;
969 data->raw = NULL;
970 }
971
972 extern void perf_output_sample(struct perf_output_handle *handle,
973 struct perf_event_header *header,
974 struct perf_sample_data *data,
975 struct perf_event *event);
976 extern void perf_prepare_sample(struct perf_event_header *header,
977 struct perf_sample_data *data,
978 struct perf_event *event,
979 struct pt_regs *regs);
980
981 extern int perf_event_overflow(struct perf_event *event, int nmi,
982 struct perf_sample_data *data,
983 struct pt_regs *regs);
984
985 /*
986 * Return 1 for a software event, 0 for a hardware event
987 */
988 static inline int is_software_event(struct perf_event *event)
989 {
990 return event->pmu->task_ctx_nr == perf_sw_context;
991 }
992
993 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
994
995 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
996
997 #ifndef perf_arch_fetch_caller_regs
998 static inline void
999 perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1000 #endif
1001
1002 /*
1003 * Take a snapshot of the regs. Skip ip and frame pointer to
1004 * the nth caller. We only need a few of the regs:
1005 * - ip for PERF_SAMPLE_IP
1006 * - cs for user_mode() tests
1007 * - bp for callchains
1008 * - eflags, for future purposes, just in case
1009 */
1010 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1011 {
1012 memset(regs, 0, sizeof(*regs));
1013
1014 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1015 }
1016
1017 static __always_inline void
1018 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1019 {
1020 struct pt_regs hot_regs;
1021
1022 JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
1023 return;
1024
1025 have_event:
1026 if (!regs) {
1027 perf_fetch_caller_regs(&hot_regs);
1028 regs = &hot_regs;
1029 }
1030 __perf_sw_event(event_id, nr, nmi, regs, addr);
1031 }
1032
1033 extern void perf_event_mmap(struct vm_area_struct *vma);
1034 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1035 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1036 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1037
1038 extern void perf_event_comm(struct task_struct *tsk);
1039 extern void perf_event_fork(struct task_struct *tsk);
1040
1041 /* Callchains */
1042 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1043
1044 extern void perf_callchain_user(struct perf_callchain_entry *entry,
1045 struct pt_regs *regs);
1046 extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
1047 struct pt_regs *regs);
1048
1049
1050 static inline void
1051 perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1052 {
1053 if (entry->nr < PERF_MAX_STACK_DEPTH)
1054 entry->ip[entry->nr++] = ip;
1055 }
1056
1057 extern int sysctl_perf_event_paranoid;
1058 extern int sysctl_perf_event_mlock;
1059 extern int sysctl_perf_event_sample_rate;
1060
1061 static inline bool perf_paranoid_tracepoint_raw(void)
1062 {
1063 return sysctl_perf_event_paranoid > -1;
1064 }
1065
1066 static inline bool perf_paranoid_cpu(void)
1067 {
1068 return sysctl_perf_event_paranoid > 0;
1069 }
1070
1071 static inline bool perf_paranoid_kernel(void)
1072 {
1073 return sysctl_perf_event_paranoid > 1;
1074 }
1075
1076 extern void perf_event_init(void);
1077 extern void perf_tp_event(u64 addr, u64 count, void *record,
1078 int entry_size, struct pt_regs *regs,
1079 struct hlist_head *head, int rctx);
1080 extern void perf_bp_event(struct perf_event *event, void *data);
1081
1082 #ifndef perf_misc_flags
1083 #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1084 PERF_RECORD_MISC_KERNEL)
1085 #define perf_instruction_pointer(regs) instruction_pointer(regs)
1086 #endif
1087
1088 extern int perf_output_begin(struct perf_output_handle *handle,
1089 struct perf_event *event, unsigned int size,
1090 int nmi, int sample);
1091 extern void perf_output_end(struct perf_output_handle *handle);
1092 extern void perf_output_copy(struct perf_output_handle *handle,
1093 const void *buf, unsigned int len);
1094 extern int perf_swevent_get_recursion_context(void);
1095 extern void perf_swevent_put_recursion_context(int rctx);
1096 extern void perf_event_enable(struct perf_event *event);
1097 extern void perf_event_disable(struct perf_event *event);
1098 extern void perf_event_task_tick(void);
1099 #else
1100 static inline void
1101 perf_event_task_sched_in(struct task_struct *task) { }
1102 static inline void
1103 perf_event_task_sched_out(struct task_struct *task,
1104 struct task_struct *next) { }
1105 static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1106 static inline void perf_event_exit_task(struct task_struct *child) { }
1107 static inline void perf_event_free_task(struct task_struct *task) { }
1108 static inline void perf_event_delayed_put(struct task_struct *task) { }
1109 static inline void perf_event_print_debug(void) { }
1110 static inline int perf_event_task_disable(void) { return -EINVAL; }
1111 static inline int perf_event_task_enable(void) { return -EINVAL; }
1112
1113 static inline void
1114 perf_sw_event(u32 event_id, u64 nr, int nmi,
1115 struct pt_regs *regs, u64 addr) { }
1116 static inline void
1117 perf_bp_event(struct perf_event *event, void *data) { }
1118
1119 static inline int perf_register_guest_info_callbacks
1120 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1121 static inline int perf_unregister_guest_info_callbacks
1122 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1123
1124 static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1125 static inline void perf_event_comm(struct task_struct *tsk) { }
1126 static inline void perf_event_fork(struct task_struct *tsk) { }
1127 static inline void perf_event_init(void) { }
1128 static inline int perf_swevent_get_recursion_context(void) { return -1; }
1129 static inline void perf_swevent_put_recursion_context(int rctx) { }
1130 static inline void perf_event_enable(struct perf_event *event) { }
1131 static inline void perf_event_disable(struct perf_event *event) { }
1132 static inline void perf_event_task_tick(void) { }
1133 #endif
1134
1135 #define perf_output_put(handle, x) \
1136 perf_output_copy((handle), &(x), sizeof(x))
1137
1138 /*
1139 * This has to have a higher priority than migration_notifier in sched.c.
1140 */
1141 #define perf_cpu_notifier(fn) \
1142 do { \
1143 static struct notifier_block fn##_nb __cpuinitdata = \
1144 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1145 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1146 (void *)(unsigned long)smp_processor_id()); \
1147 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1148 (void *)(unsigned long)smp_processor_id()); \
1149 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1150 (void *)(unsigned long)smp_processor_id()); \
1151 register_cpu_notifier(&fn##_nb); \
1152 } while (0)
1153
1154 #endif /* __KERNEL__ */
1155 #endif /* _LINUX_PERF_EVENT_H */