]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/x86/kernel/cpu/perf_event_intel_uncore.h
Merge remote-tracking branches 'asoc/topic/atmel', 'asoc/topic/bcm2835' and 'asoc...
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.h
CommitLineData
087bfbb0
YZ
1#include <linux/module.h>
2#include <linux/slab.h>
14371cce 3#include <linux/pci.h>
087bfbb0
YZ
4#include <linux/perf_event.h>
5#include "perf_event.h"
6
7#define UNCORE_PMU_NAME_LEN 32
7740dfc0 8#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
ced2efb0 9#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
087bfbb0 10
eca26c99 11#define UNCORE_FIXED_EVENT 0xff
087bfbb0
YZ
12#define UNCORE_PMC_IDX_MAX_GENERIC 8
13#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
14#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
15
899396cf
YZ
16#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
17#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
18#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
19#define UNCORE_EXTRA_PCI_DEV 0xff
5306c31c 20#define UNCORE_EXTRA_PCI_DEV_MAX 3
899396cf
YZ
21
22/* support up to 8 sockets */
23#define UNCORE_SOCKET_MAX 8
24
087bfbb0
YZ
25#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
26
27struct intel_uncore_ops;
28struct intel_uncore_pmu;
29struct intel_uncore_box;
30struct uncore_event_desc;
31
32struct intel_uncore_type {
33 const char *name;
34 int num_counters;
35 int num_boxes;
36 int perf_ctr_bits;
37 int fixed_ctr_bits;
087bfbb0
YZ
38 unsigned perf_ctr;
39 unsigned event_ctl;
40 unsigned event_mask;
41 unsigned fixed_ctr;
42 unsigned fixed_ctl;
43 unsigned box_ctl;
44 unsigned msr_offset;
6a67943a
YZ
45 unsigned num_shared_regs:8;
46 unsigned single_fixed:1;
254298c7 47 unsigned pair_ctr_ctl:1;
cb37af77 48 unsigned *msr_offsets;
087bfbb0
YZ
49 struct event_constraint unconstrainted;
50 struct event_constraint *constraints;
51 struct intel_uncore_pmu *pmus;
52 struct intel_uncore_ops *ops;
53 struct uncore_event_desc *event_descs;
314d9f63 54 const struct attribute_group *attr_groups[4];
d64b25b6 55 struct pmu *pmu; /* for custom pmu ops */
087bfbb0
YZ
56};
57
314d9f63
YZ
58#define pmu_group attr_groups[0]
59#define format_group attr_groups[1]
60#define events_group attr_groups[2]
087bfbb0
YZ
61
62struct intel_uncore_ops {
63 void (*init_box)(struct intel_uncore_box *);
64 void (*disable_box)(struct intel_uncore_box *);
65 void (*enable_box)(struct intel_uncore_box *);
66 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
67 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
68 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
6a67943a
YZ
69 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
70 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
71 struct perf_event *);
72 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
087bfbb0
YZ
73};
74
75struct intel_uncore_pmu {
76 struct pmu pmu;
77 char name[UNCORE_PMU_NAME_LEN];
78 int pmu_idx;
79 int func_id;
80 struct intel_uncore_type *type;
81 struct intel_uncore_box ** __percpu box;
14371cce 82 struct list_head box_list;
087bfbb0
YZ
83};
84
6a67943a
YZ
85struct intel_uncore_extra_reg {
86 raw_spinlock_t lock;
254298c7 87 u64 config, config1, config2;
6a67943a
YZ
88 atomic_t ref;
89};
90
087bfbb0
YZ
91struct intel_uncore_box {
92 int phys_id;
93 int n_active; /* number of active events */
94 int n_events;
95 int cpu; /* cpu to collect events */
96 unsigned long flags;
97 atomic_t refcnt;
98 struct perf_event *events[UNCORE_PMC_IDX_MAX];
99 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
b371b594 100 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
087bfbb0
YZ
101 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
102 u64 tags[UNCORE_PMC_IDX_MAX];
14371cce 103 struct pci_dev *pci_dev;
087bfbb0 104 struct intel_uncore_pmu *pmu;
79859cce 105 u64 hrtimer_duration; /* hrtimer timeout for this box */
087bfbb0
YZ
106 struct hrtimer hrtimer;
107 struct list_head list;
ced2efb0 108 struct list_head active_list;
b9e1ab6d 109 void *io_addr;
6a67943a 110 struct intel_uncore_extra_reg shared_regs[0];
087bfbb0
YZ
111};
112
113#define UNCORE_BOX_FLAG_INITIATED 0
114
115struct uncore_event_desc {
116 struct kobj_attribute attr;
117 const char *config;
118};
119
712df65c
TI
120struct pci2phy_map {
121 struct list_head list;
122 int segment;
123 int pbus_to_physid[256];
124};
125
126int uncore_pcibus_to_physid(struct pci_bus *bus);
127struct pci2phy_map *__find_pci2phy_map(int segment);
128
514b2346
YZ
129ssize_t uncore_event_show(struct kobject *kobj,
130 struct kobj_attribute *attr, char *buf);
131
087bfbb0
YZ
132#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
133{ \
134 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
135 .config = _config, \
136}
137
138#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
139static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
140 struct kobj_attribute *attr, \
141 char *page) \
142{ \
143 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
144 return sprintf(page, _format "\n"); \
145} \
146static struct kobj_attribute format_attr_##_var = \
147 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
148
14371cce
YZ
149static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
150{
151 return box->pmu->type->box_ctl;
152}
153
154static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
155{
156 return box->pmu->type->fixed_ctl;
157}
158
159static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
160{
161 return box->pmu->type->fixed_ctr;
162}
163
164static inline
165unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
166{
167 return idx * 4 + box->pmu->type->event_ctl;
168}
169
170static inline
171unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
172{
173 return idx * 8 + box->pmu->type->perf_ctr;
174}
175
cb37af77
YZ
176static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
177{
178 struct intel_uncore_pmu *pmu = box->pmu;
179 return pmu->type->msr_offsets ?
180 pmu->type->msr_offsets[pmu->pmu_idx] :
181 pmu->type->msr_offset * pmu->pmu_idx;
182}
183
184static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
087bfbb0
YZ
185{
186 if (!box->pmu->type->box_ctl)
187 return 0;
cb37af77 188 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
087bfbb0
YZ
189}
190
cb37af77 191static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
087bfbb0
YZ
192{
193 if (!box->pmu->type->fixed_ctl)
194 return 0;
cb37af77 195 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
087bfbb0
YZ
196}
197
cb37af77 198static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
087bfbb0 199{
cb37af77 200 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
087bfbb0
YZ
201}
202
203static inline
204unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
205{
254298c7
YZ
206 return box->pmu->type->event_ctl +
207 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
cb37af77 208 uncore_msr_box_offset(box);
087bfbb0
YZ
209}
210
211static inline
212unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
213{
254298c7
YZ
214 return box->pmu->type->perf_ctr +
215 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
cb37af77 216 uncore_msr_box_offset(box);
087bfbb0
YZ
217}
218
14371cce
YZ
219static inline
220unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
221{
222 if (box->pci_dev)
223 return uncore_pci_fixed_ctl(box);
224 else
225 return uncore_msr_fixed_ctl(box);
226}
227
228static inline
229unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
230{
231 if (box->pci_dev)
232 return uncore_pci_fixed_ctr(box);
233 else
234 return uncore_msr_fixed_ctr(box);
235}
236
237static inline
238unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
239{
240 if (box->pci_dev)
241 return uncore_pci_event_ctl(box, idx);
242 else
243 return uncore_msr_event_ctl(box, idx);
244}
245
246static inline
247unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
248{
249 if (box->pci_dev)
250 return uncore_pci_perf_ctr(box, idx);
251 else
252 return uncore_msr_perf_ctr(box, idx);
253}
254
087bfbb0
YZ
255static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
256{
257 return box->pmu->type->perf_ctr_bits;
258}
259
260static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
261{
262 return box->pmu->type->fixed_ctr_bits;
263}
264
265static inline int uncore_num_counters(struct intel_uncore_box *box)
266{
267 return box->pmu->type->num_counters;
268}
269
270static inline void uncore_disable_box(struct intel_uncore_box *box)
271{
272 if (box->pmu->type->ops->disable_box)
273 box->pmu->type->ops->disable_box(box);
274}
275
276static inline void uncore_enable_box(struct intel_uncore_box *box)
277{
278 if (box->pmu->type->ops->enable_box)
279 box->pmu->type->ops->enable_box(box);
280}
281
282static inline void uncore_disable_event(struct intel_uncore_box *box,
283 struct perf_event *event)
284{
285 box->pmu->type->ops->disable_event(box, event);
286}
287
288static inline void uncore_enable_event(struct intel_uncore_box *box,
289 struct perf_event *event)
290{
291 box->pmu->type->ops->enable_event(box, event);
292}
293
294static inline u64 uncore_read_counter(struct intel_uncore_box *box,
295 struct perf_event *event)
296{
297 return box->pmu->type->ops->read_counter(box, event);
298}
299
15c12479
IM
300static inline void uncore_box_init(struct intel_uncore_box *box)
301{
302 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
303 if (box->pmu->type->ops->init_box)
304 box->pmu->type->ops->init_box(box);
305 }
306}
307
254298c7
YZ
308static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
309{
310 return (box->phys_id < 0);
311}
514b2346
YZ
312
313struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event);
314struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
315struct intel_uncore_box *uncore_event_to_box(struct perf_event *event);
316u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
317void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
318void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
319void uncore_pmu_event_read(struct perf_event *event);
320void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
321struct event_constraint *
322uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
323void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
324u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
325
326extern struct intel_uncore_type **uncore_msr_uncores;
327extern struct intel_uncore_type **uncore_pci_uncores;
328extern struct pci_driver *uncore_pci_driver;
712df65c
TI
329extern raw_spinlock_t pci2phy_map_lock;
330extern struct list_head pci2phy_map_head;
514b2346
YZ
331extern struct pci_dev *uncore_extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
332extern struct event_constraint uncore_constraint_empty;
92807ffd
YZ
333
334/* perf_event_intel_uncore_snb.c */
335int snb_uncore_pci_init(void);
336int ivb_uncore_pci_init(void);
337int hsw_uncore_pci_init(void);
a41f3c8c 338int bdw_uncore_pci_init(void);
0e1eb0a1 339int skl_uncore_pci_init(void);
92807ffd
YZ
340void snb_uncore_cpu_init(void);
341void nhm_uncore_cpu_init(void);
77af0037 342int snb_pci2phy_map_init(int devid);
8268fdfc
YZ
343
344/* perf_event_intel_uncore_snbep.c */
345int snbep_uncore_pci_init(void);
346void snbep_uncore_cpu_init(void);
ddcd0973
PZ
347int ivbep_uncore_pci_init(void);
348void ivbep_uncore_cpu_init(void);
e735b9db
YZ
349int hswep_uncore_pci_init(void);
350void hswep_uncore_cpu_init(void);
070e9887
KL
351int bdx_uncore_pci_init(void);
352void bdx_uncore_cpu_init(void);
77af0037
HC
353int knl_uncore_pci_init(void);
354void knl_uncore_cpu_init(void);
c1e46580
YZ
355
356/* perf_event_intel_uncore_nhmex.c */
357void nhmex_uncore_cpu_init(void);