]>
Commit | Line | Data |
---|---|---|
0793a61d TG |
1 | /* |
2 | * Performance counters: | |
3 | * | |
4 | * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar | |
6 | * | |
7 | * Data type definitions, declarations, prototypes. | |
8 | * | |
9 | * Started by: Thomas Gleixner and Ingo Molnar | |
10 | * | |
11 | * For licencing details see kernel-base/COPYING | |
12 | */ | |
13 | #ifndef _LINUX_PERF_COUNTER_H | |
14 | #define _LINUX_PERF_COUNTER_H | |
15 | ||
16 | #include <asm/atomic.h> | |
17 | ||
18 | #include <linux/list.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/rculist.h> | |
21 | #include <linux/rcupdate.h> | |
22 | #include <linux/spinlock.h> | |
23 | ||
24 | struct task_struct; | |
25 | ||
26 | /* | |
9f66a381 IM |
27 | * User-space ABI bits: |
28 | */ | |
29 | ||
30 | /* | |
31 | * Generalized performance counter event types, used by the hw_event.type | |
32 | * parameter of the sys_perf_counter_open() syscall: | |
0793a61d TG |
33 | */ |
34 | enum hw_event_types { | |
0793a61d | 35 | /* |
9f66a381 | 36 | * Common hardware events, generalized by the kernel: |
0793a61d | 37 | */ |
9f66a381 IM |
38 | PERF_COUNT_CYCLES = 0, |
39 | PERF_COUNT_INSTRUCTIONS = 1, | |
40 | PERF_COUNT_CACHE_REFERENCES = 2, | |
41 | PERF_COUNT_CACHE_MISSES = 3, | |
42 | PERF_COUNT_BRANCH_INSTRUCTIONS = 4, | |
43 | PERF_COUNT_BRANCH_MISSES = 5, | |
44 | ||
45 | /* | |
46 | * Special "software" counters provided by the kernel, even if | |
47 | * the hardware does not support performance counters. These | |
48 | * counters measure various physical and sw events of the | |
49 | * kernel (and allow the profiling of them as well): | |
50 | */ | |
51 | PERF_COUNT_CPU_CLOCK = -1, | |
52 | PERF_COUNT_TASK_CLOCK = -2, | |
53 | PERF_COUNT_PAGE_FAULTS = -3, | |
54 | PERF_COUNT_CONTEXT_SWITCHES = -4, | |
0793a61d TG |
55 | }; |
56 | ||
57 | /* | |
58 | * IRQ-notification data record type: | |
59 | */ | |
9f66a381 IM |
60 | enum perf_counter_record_type { |
61 | PERF_RECORD_SIMPLE = 0, | |
62 | PERF_RECORD_IRQ = 1, | |
63 | PERF_RECORD_GROUP = 2, | |
0793a61d TG |
64 | }; |
65 | ||
9f66a381 IM |
66 | /* |
67 | * Hardware event to monitor via a performance monitoring counter: | |
68 | */ | |
69 | struct perf_counter_hw_event { | |
70 | u64 type; | |
71 | ||
72 | u64 irq_period; | |
73 | u32 record_type; | |
74 | ||
75 | u32 disabled : 1, /* off by default */ | |
76 | nmi : 1, /* NMI sampling */ | |
77 | raw : 1, /* raw event type */ | |
78 | __reserved_1 : 29; | |
79 | ||
80 | u64 __reserved_2; | |
eab656ae TG |
81 | }; |
82 | ||
9f66a381 IM |
83 | /* |
84 | * Kernel-internal data types: | |
85 | */ | |
86 | ||
0793a61d | 87 | /** |
9f66a381 | 88 | * struct hw_perf_counter - performance counter hardware details: |
0793a61d TG |
89 | */ |
90 | struct hw_perf_counter { | |
9f66a381 IM |
91 | u64 config; |
92 | unsigned long config_base; | |
93 | unsigned long counter_base; | |
94 | int nmi; | |
95 | unsigned int idx; | |
96 | u64 prev_count; | |
97 | u64 irq_period; | |
98 | s32 next_count; | |
0793a61d TG |
99 | }; |
100 | ||
101 | /* | |
102 | * Hardcoded buffer length limit for now, for IRQ-fed events: | |
103 | */ | |
9f66a381 | 104 | #define PERF_DATA_BUFLEN 2048 |
0793a61d TG |
105 | |
106 | /** | |
107 | * struct perf_data - performance counter IRQ data sampling ... | |
108 | */ | |
109 | struct perf_data { | |
9f66a381 IM |
110 | int len; |
111 | int rd_idx; | |
112 | int overrun; | |
113 | u8 data[PERF_DATA_BUFLEN]; | |
0793a61d TG |
114 | }; |
115 | ||
116 | /** | |
117 | * struct perf_counter - performance counter kernel representation: | |
118 | */ | |
119 | struct perf_counter { | |
04289bb9 IM |
120 | struct list_head list_entry; |
121 | struct list_head sibling_list; | |
122 | struct perf_counter *group_leader; | |
123 | ||
0793a61d TG |
124 | int active; |
125 | #if BITS_PER_LONG == 64 | |
126 | atomic64_t count; | |
127 | #else | |
128 | atomic_t count32[2]; | |
129 | #endif | |
9f66a381 | 130 | struct perf_counter_hw_event hw_event; |
0793a61d TG |
131 | struct hw_perf_counter hw; |
132 | ||
133 | struct perf_counter_context *ctx; | |
134 | struct task_struct *task; | |
135 | ||
136 | /* | |
137 | * Protect attach/detach: | |
138 | */ | |
139 | struct mutex mutex; | |
140 | ||
141 | int oncpu; | |
142 | int cpu; | |
143 | ||
0793a61d TG |
144 | /* read() / irq related data */ |
145 | wait_queue_head_t waitq; | |
146 | /* optional: for NMIs */ | |
147 | int wakeup_pending; | |
148 | struct perf_data *irqdata; | |
149 | struct perf_data *usrdata; | |
150 | struct perf_data data[2]; | |
151 | }; | |
152 | ||
153 | /** | |
154 | * struct perf_counter_context - counter context structure | |
155 | * | |
156 | * Used as a container for task counters and CPU counters as well: | |
157 | */ | |
158 | struct perf_counter_context { | |
159 | #ifdef CONFIG_PERF_COUNTERS | |
160 | /* | |
161 | * Protect the list of counters: | |
162 | */ | |
163 | spinlock_t lock; | |
04289bb9 IM |
164 | |
165 | struct list_head counter_list; | |
0793a61d TG |
166 | int nr_counters; |
167 | int nr_active; | |
168 | struct task_struct *task; | |
169 | #endif | |
170 | }; | |
171 | ||
172 | /** | |
173 | * struct perf_counter_cpu_context - per cpu counter context structure | |
174 | */ | |
175 | struct perf_cpu_context { | |
176 | struct perf_counter_context ctx; | |
177 | struct perf_counter_context *task_ctx; | |
178 | int active_oncpu; | |
179 | int max_pertask; | |
180 | }; | |
181 | ||
182 | /* | |
183 | * Set by architecture code: | |
184 | */ | |
185 | extern int perf_max_counters; | |
186 | ||
187 | #ifdef CONFIG_PERF_COUNTERS | |
188 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | |
189 | extern void perf_counter_task_sched_out(struct task_struct *task, int cpu); | |
190 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | |
191 | extern void perf_counter_init_task(struct task_struct *task); | |
192 | extern void perf_counter_notify(struct pt_regs *regs); | |
193 | extern void perf_counter_print_debug(void); | |
4ac13294 TG |
194 | extern void hw_perf_restore_ctrl(u64 ctrl); |
195 | extern u64 hw_perf_disable_all(void); | |
0793a61d TG |
196 | #else |
197 | static inline void | |
198 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | |
199 | static inline void | |
200 | perf_counter_task_sched_out(struct task_struct *task, int cpu) { } | |
201 | static inline void | |
202 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | |
203 | static inline void perf_counter_init_task(struct task_struct *task) { } | |
204 | static inline void perf_counter_notify(struct pt_regs *regs) { } | |
205 | static inline void perf_counter_print_debug(void) { } | |
4ac13294 TG |
206 | static inline void hw_perf_restore_ctrl(u64 ctrl) { } |
207 | static inline u64 hw_perf_disable_all(void) { return 0; } | |
0793a61d TG |
208 | #endif |
209 | ||
210 | #endif /* _LINUX_PERF_COUNTER_H */ |