]>
Commit | Line | Data |
---|---|---|
087bfbb0 YZ |
1 | #include <linux/module.h> |
2 | #include <linux/slab.h> | |
14371cce | 3 | #include <linux/pci.h> |
087bfbb0 YZ |
4 | #include <linux/perf_event.h> |
5 | #include "perf_event.h" | |
6 | ||
7 | #define UNCORE_PMU_NAME_LEN 32 | |
7740dfc0 | 8 | #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) |
ced2efb0 | 9 | #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) |
087bfbb0 | 10 | |
eca26c99 | 11 | #define UNCORE_FIXED_EVENT 0xff |
087bfbb0 YZ |
12 | #define UNCORE_PMC_IDX_MAX_GENERIC 8 |
13 | #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC | |
14 | #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) | |
15 | ||
899396cf YZ |
16 | #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) |
17 | #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) | |
18 | #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) | |
19 | #define UNCORE_EXTRA_PCI_DEV 0xff | |
5306c31c | 20 | #define UNCORE_EXTRA_PCI_DEV_MAX 3 |
899396cf YZ |
21 | |
22 | /* support up to 8 sockets */ | |
23 | #define UNCORE_SOCKET_MAX 8 | |
24 | ||
087bfbb0 YZ |
25 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) |
26 | ||
27 | struct intel_uncore_ops; | |
28 | struct intel_uncore_pmu; | |
29 | struct intel_uncore_box; | |
30 | struct uncore_event_desc; | |
31 | ||
32 | struct intel_uncore_type { | |
33 | const char *name; | |
34 | int num_counters; | |
35 | int num_boxes; | |
36 | int perf_ctr_bits; | |
37 | int fixed_ctr_bits; | |
087bfbb0 YZ |
38 | unsigned perf_ctr; |
39 | unsigned event_ctl; | |
40 | unsigned event_mask; | |
41 | unsigned fixed_ctr; | |
42 | unsigned fixed_ctl; | |
43 | unsigned box_ctl; | |
44 | unsigned msr_offset; | |
6a67943a YZ |
45 | unsigned num_shared_regs:8; |
46 | unsigned single_fixed:1; | |
254298c7 | 47 | unsigned pair_ctr_ctl:1; |
cb37af77 | 48 | unsigned *msr_offsets; |
087bfbb0 YZ |
49 | struct event_constraint unconstrainted; |
50 | struct event_constraint *constraints; | |
51 | struct intel_uncore_pmu *pmus; | |
52 | struct intel_uncore_ops *ops; | |
53 | struct uncore_event_desc *event_descs; | |
314d9f63 | 54 | const struct attribute_group *attr_groups[4]; |
d64b25b6 | 55 | struct pmu *pmu; /* for custom pmu ops */ |
087bfbb0 YZ |
56 | }; |
57 | ||
314d9f63 YZ |
58 | #define pmu_group attr_groups[0] |
59 | #define format_group attr_groups[1] | |
60 | #define events_group attr_groups[2] | |
087bfbb0 YZ |
61 | |
62 | struct intel_uncore_ops { | |
63 | void (*init_box)(struct intel_uncore_box *); | |
64 | void (*disable_box)(struct intel_uncore_box *); | |
65 | void (*enable_box)(struct intel_uncore_box *); | |
66 | void (*disable_event)(struct intel_uncore_box *, struct perf_event *); | |
67 | void (*enable_event)(struct intel_uncore_box *, struct perf_event *); | |
68 | u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *); | |
6a67943a YZ |
69 | int (*hw_config)(struct intel_uncore_box *, struct perf_event *); |
70 | struct event_constraint *(*get_constraint)(struct intel_uncore_box *, | |
71 | struct perf_event *); | |
72 | void (*put_constraint)(struct intel_uncore_box *, struct perf_event *); | |
087bfbb0 YZ |
73 | }; |
74 | ||
75 | struct intel_uncore_pmu { | |
76 | struct pmu pmu; | |
77 | char name[UNCORE_PMU_NAME_LEN]; | |
78 | int pmu_idx; | |
79 | int func_id; | |
80 | struct intel_uncore_type *type; | |
81 | struct intel_uncore_box ** __percpu box; | |
14371cce | 82 | struct list_head box_list; |
087bfbb0 YZ |
83 | }; |
84 | ||
6a67943a YZ |
85 | struct intel_uncore_extra_reg { |
86 | raw_spinlock_t lock; | |
254298c7 | 87 | u64 config, config1, config2; |
6a67943a YZ |
88 | atomic_t ref; |
89 | }; | |
90 | ||
087bfbb0 YZ |
91 | struct intel_uncore_box { |
92 | int phys_id; | |
93 | int n_active; /* number of active events */ | |
94 | int n_events; | |
95 | int cpu; /* cpu to collect events */ | |
96 | unsigned long flags; | |
97 | atomic_t refcnt; | |
98 | struct perf_event *events[UNCORE_PMC_IDX_MAX]; | |
99 | struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; | |
b371b594 | 100 | struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; |
087bfbb0 YZ |
101 | unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; |
102 | u64 tags[UNCORE_PMC_IDX_MAX]; | |
14371cce | 103 | struct pci_dev *pci_dev; |
087bfbb0 | 104 | struct intel_uncore_pmu *pmu; |
79859cce | 105 | u64 hrtimer_duration; /* hrtimer timeout for this box */ |
087bfbb0 YZ |
106 | struct hrtimer hrtimer; |
107 | struct list_head list; | |
ced2efb0 | 108 | struct list_head active_list; |
b9e1ab6d | 109 | void *io_addr; |
6a67943a | 110 | struct intel_uncore_extra_reg shared_regs[0]; |
087bfbb0 YZ |
111 | }; |
112 | ||
113 | #define UNCORE_BOX_FLAG_INITIATED 0 | |
114 | ||
115 | struct uncore_event_desc { | |
116 | struct kobj_attribute attr; | |
117 | const char *config; | |
118 | }; | |
119 | ||
712df65c TI |
120 | struct pci2phy_map { |
121 | struct list_head list; | |
122 | int segment; | |
123 | int pbus_to_physid[256]; | |
124 | }; | |
125 | ||
126 | int uncore_pcibus_to_physid(struct pci_bus *bus); | |
127 | struct pci2phy_map *__find_pci2phy_map(int segment); | |
128 | ||
514b2346 YZ |
129 | ssize_t uncore_event_show(struct kobject *kobj, |
130 | struct kobj_attribute *attr, char *buf); | |
131 | ||
087bfbb0 YZ |
132 | #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ |
133 | { \ | |
134 | .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \ | |
135 | .config = _config, \ | |
136 | } | |
137 | ||
138 | #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ | |
139 | static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ | |
140 | struct kobj_attribute *attr, \ | |
141 | char *page) \ | |
142 | { \ | |
143 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ | |
144 | return sprintf(page, _format "\n"); \ | |
145 | } \ | |
146 | static struct kobj_attribute format_attr_##_var = \ | |
147 | __ATTR(_name, 0444, __uncore_##_var##_show, NULL) | |
148 | ||
14371cce YZ |
149 | static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) |
150 | { | |
151 | return box->pmu->type->box_ctl; | |
152 | } | |
153 | ||
154 | static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) | |
155 | { | |
156 | return box->pmu->type->fixed_ctl; | |
157 | } | |
158 | ||
159 | static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) | |
160 | { | |
161 | return box->pmu->type->fixed_ctr; | |
162 | } | |
163 | ||
164 | static inline | |
165 | unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) | |
166 | { | |
167 | return idx * 4 + box->pmu->type->event_ctl; | |
168 | } | |
169 | ||
170 | static inline | |
171 | unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) | |
172 | { | |
173 | return idx * 8 + box->pmu->type->perf_ctr; | |
174 | } | |
175 | ||
cb37af77 YZ |
176 | static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) |
177 | { | |
178 | struct intel_uncore_pmu *pmu = box->pmu; | |
179 | return pmu->type->msr_offsets ? | |
180 | pmu->type->msr_offsets[pmu->pmu_idx] : | |
181 | pmu->type->msr_offset * pmu->pmu_idx; | |
182 | } | |
183 | ||
184 | static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) | |
087bfbb0 YZ |
185 | { |
186 | if (!box->pmu->type->box_ctl) | |
187 | return 0; | |
cb37af77 | 188 | return box->pmu->type->box_ctl + uncore_msr_box_offset(box); |
087bfbb0 YZ |
189 | } |
190 | ||
cb37af77 | 191 | static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) |
087bfbb0 YZ |
192 | { |
193 | if (!box->pmu->type->fixed_ctl) | |
194 | return 0; | |
cb37af77 | 195 | return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); |
087bfbb0 YZ |
196 | } |
197 | ||
cb37af77 | 198 | static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) |
087bfbb0 | 199 | { |
cb37af77 | 200 | return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); |
087bfbb0 YZ |
201 | } |
202 | ||
203 | static inline | |
204 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) | |
205 | { | |
254298c7 YZ |
206 | return box->pmu->type->event_ctl + |
207 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | |
cb37af77 | 208 | uncore_msr_box_offset(box); |
087bfbb0 YZ |
209 | } |
210 | ||
211 | static inline | |
212 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) | |
213 | { | |
254298c7 YZ |
214 | return box->pmu->type->perf_ctr + |
215 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | |
cb37af77 | 216 | uncore_msr_box_offset(box); |
087bfbb0 YZ |
217 | } |
218 | ||
14371cce YZ |
219 | static inline |
220 | unsigned uncore_fixed_ctl(struct intel_uncore_box *box) | |
221 | { | |
222 | if (box->pci_dev) | |
223 | return uncore_pci_fixed_ctl(box); | |
224 | else | |
225 | return uncore_msr_fixed_ctl(box); | |
226 | } | |
227 | ||
228 | static inline | |
229 | unsigned uncore_fixed_ctr(struct intel_uncore_box *box) | |
230 | { | |
231 | if (box->pci_dev) | |
232 | return uncore_pci_fixed_ctr(box); | |
233 | else | |
234 | return uncore_msr_fixed_ctr(box); | |
235 | } | |
236 | ||
237 | static inline | |
238 | unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) | |
239 | { | |
240 | if (box->pci_dev) | |
241 | return uncore_pci_event_ctl(box, idx); | |
242 | else | |
243 | return uncore_msr_event_ctl(box, idx); | |
244 | } | |
245 | ||
246 | static inline | |
247 | unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) | |
248 | { | |
249 | if (box->pci_dev) | |
250 | return uncore_pci_perf_ctr(box, idx); | |
251 | else | |
252 | return uncore_msr_perf_ctr(box, idx); | |
253 | } | |
254 | ||
087bfbb0 YZ |
255 | static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) |
256 | { | |
257 | return box->pmu->type->perf_ctr_bits; | |
258 | } | |
259 | ||
260 | static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) | |
261 | { | |
262 | return box->pmu->type->fixed_ctr_bits; | |
263 | } | |
264 | ||
265 | static inline int uncore_num_counters(struct intel_uncore_box *box) | |
266 | { | |
267 | return box->pmu->type->num_counters; | |
268 | } | |
269 | ||
270 | static inline void uncore_disable_box(struct intel_uncore_box *box) | |
271 | { | |
272 | if (box->pmu->type->ops->disable_box) | |
273 | box->pmu->type->ops->disable_box(box); | |
274 | } | |
275 | ||
276 | static inline void uncore_enable_box(struct intel_uncore_box *box) | |
277 | { | |
278 | if (box->pmu->type->ops->enable_box) | |
279 | box->pmu->type->ops->enable_box(box); | |
280 | } | |
281 | ||
282 | static inline void uncore_disable_event(struct intel_uncore_box *box, | |
283 | struct perf_event *event) | |
284 | { | |
285 | box->pmu->type->ops->disable_event(box, event); | |
286 | } | |
287 | ||
288 | static inline void uncore_enable_event(struct intel_uncore_box *box, | |
289 | struct perf_event *event) | |
290 | { | |
291 | box->pmu->type->ops->enable_event(box, event); | |
292 | } | |
293 | ||
294 | static inline u64 uncore_read_counter(struct intel_uncore_box *box, | |
295 | struct perf_event *event) | |
296 | { | |
297 | return box->pmu->type->ops->read_counter(box, event); | |
298 | } | |
299 | ||
15c12479 IM |
300 | static inline void uncore_box_init(struct intel_uncore_box *box) |
301 | { | |
302 | if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { | |
303 | if (box->pmu->type->ops->init_box) | |
304 | box->pmu->type->ops->init_box(box); | |
305 | } | |
306 | } | |
307 | ||
254298c7 YZ |
308 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) |
309 | { | |
310 | return (box->phys_id < 0); | |
311 | } | |
514b2346 YZ |
312 | |
313 | struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event); | |
314 | struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu); | |
315 | struct intel_uncore_box *uncore_event_to_box(struct perf_event *event); | |
316 | u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); | |
317 | void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); | |
318 | void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); | |
319 | void uncore_pmu_event_read(struct perf_event *event); | |
320 | void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); | |
321 | struct event_constraint * | |
322 | uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); | |
323 | void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); | |
324 | u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); | |
325 | ||
326 | extern struct intel_uncore_type **uncore_msr_uncores; | |
327 | extern struct intel_uncore_type **uncore_pci_uncores; | |
328 | extern struct pci_driver *uncore_pci_driver; | |
712df65c TI |
329 | extern raw_spinlock_t pci2phy_map_lock; |
330 | extern struct list_head pci2phy_map_head; | |
514b2346 YZ |
331 | extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; |
332 | extern struct event_constraint uncore_constraint_empty; | |
92807ffd YZ |
333 | |
334 | /* perf_event_intel_uncore_snb.c */ | |
335 | int snb_uncore_pci_init(void); | |
336 | int ivb_uncore_pci_init(void); | |
337 | int hsw_uncore_pci_init(void); | |
a41f3c8c | 338 | int bdw_uncore_pci_init(void); |
0e1eb0a1 | 339 | int skl_uncore_pci_init(void); |
92807ffd YZ |
340 | void snb_uncore_cpu_init(void); |
341 | void nhm_uncore_cpu_init(void); | |
77af0037 | 342 | int snb_pci2phy_map_init(int devid); |
8268fdfc YZ |
343 | |
344 | /* perf_event_intel_uncore_snbep.c */ | |
345 | int snbep_uncore_pci_init(void); | |
346 | void snbep_uncore_cpu_init(void); | |
ddcd0973 PZ |
347 | int ivbep_uncore_pci_init(void); |
348 | void ivbep_uncore_cpu_init(void); | |
e735b9db YZ |
349 | int hswep_uncore_pci_init(void); |
350 | void hswep_uncore_cpu_init(void); | |
070e9887 KL |
351 | int bdx_uncore_pci_init(void); |
352 | void bdx_uncore_cpu_init(void); | |
77af0037 HC |
353 | int knl_uncore_pci_init(void); |
354 | void knl_uncore_cpu_init(void); | |
c1e46580 YZ |
355 | |
356 | /* perf_event_intel_uncore_nhmex.c */ | |
357 | void nhmex_uncore_cpu_init(void); |