]>
Commit | Line | Data |
---|---|---|
03089688 | 1 | /* |
4b47e573 | 2 | * ARMv8 PMUv3 Performance Events handling code. |
03089688 WD |
3 | * |
4 | * Copyright (C) 2012 ARM Limited | |
5 | * Author: Will Deacon <will.deacon@arm.com> | |
6 | * | |
7 | * This code is based heavily on the ARMv7 perf event code. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
03089688 | 21 | |
03089688 | 22 | #include <asm/irq_regs.h> |
b8cfadfc | 23 | #include <asm/perf_event.h> |
bf2d4782 | 24 | #include <asm/sysreg.h> |
d98ecdac | 25 | #include <asm/virt.h> |
03089688 | 26 | |
dbee3a74 | 27 | #include <linux/acpi.h> |
9d2dcc8f | 28 | #include <linux/clocksource.h> |
d1947bc4 | 29 | #include <linux/kvm_host.h> |
6475b2d8 MR |
30 | #include <linux/of.h> |
31 | #include <linux/perf/arm_pmu.h> | |
32 | #include <linux/platform_device.h> | |
03089688 | 33 | |
ac82d127 | 34 | /* ARMv8 Cortex-A53 specific event types. */ |
03598fdb | 35 | #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 |
ac82d127 | 36 | |
d0aa2bff | 37 | /* ARMv8 Cavium ThunderX specific event types. */ |
03598fdb AK |
38 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9 |
39 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA | |
40 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB | |
41 | #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC | |
42 | #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED | |
62a4dda9 | 43 | |
236b9b91 JL |
44 | /* |
45 | * ARMv8 Architectural defined events, not all of these may | |
342e53bd WD |
46 | * be supported on any given implementation. Unsupported events will |
47 | * be disabled at run-time based on the PMCEID registers. | |
236b9b91 | 48 | */ |
03089688 | 49 | static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { |
ae2fb7ec | 50 | PERF_MAP_ALL_UNSUPPORTED, |
03598fdb AK |
51 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, |
52 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED, | |
53 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, | |
54 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, | |
236b9b91 | 55 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, |
03598fdb | 56 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, |
236b9b91 JL |
57 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, |
58 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND, | |
59 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND, | |
03089688 WD |
60 | }; |
61 | ||
62 | static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
63 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
64 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
ae2fb7ec MR |
65 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
66 | ||
03598fdb AK |
67 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, |
68 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, | |
ae2fb7ec | 69 | |
236b9b91 JL |
70 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE, |
71 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL, | |
72 | ||
73 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL, | |
74 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB, | |
75 | ||
76 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL, | |
77 | [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB, | |
78 | ||
03598fdb AK |
79 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, |
80 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, | |
03089688 WD |
81 | }; |
82 | ||
ac82d127 MR |
83 | static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
84 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
85 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
86 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
87 | ||
03598fdb | 88 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL, |
ac82d127 | 89 | |
5cf7fb26 JT |
90 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
91 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, | |
ac82d127 MR |
92 | }; |
93 | ||
62a4dda9 MR |
94 | static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
95 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
96 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
97 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
98 | ||
03598fdb AK |
99 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
100 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, | |
101 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
102 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, | |
62a4dda9 | 103 | |
03598fdb AK |
104 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, |
105 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, | |
62a4dda9 | 106 | |
5cf7fb26 JT |
107 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
108 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, | |
62a4dda9 MR |
109 | }; |
110 | ||
5561b6c5 JT |
111 | static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
112 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
113 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
114 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
115 | ||
116 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, | |
117 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
5561b6c5 JT |
118 | }; |
119 | ||
d0aa2bff JG |
120 | static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
121 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
122 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
123 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
124 | ||
03598fdb AK |
125 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
126 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, | |
127 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
128 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST, | |
129 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS, | |
130 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS, | |
131 | ||
03598fdb AK |
132 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS, |
133 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS, | |
134 | ||
135 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, | |
136 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, | |
137 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, | |
138 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, | |
62a4dda9 MR |
139 | }; |
140 | ||
201a72b2 AK |
141 | static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
142 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
143 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
144 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
145 | ||
146 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, | |
147 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, | |
148 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
149 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, | |
150 | ||
201a72b2 AK |
151 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, |
152 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, | |
153 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, | |
154 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, | |
155 | ||
201a72b2 AK |
156 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
157 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, | |
158 | }; | |
4b1a9e69 AK |
159 | |
160 | static ssize_t | |
161 | armv8pmu_events_sysfs_show(struct device *dev, | |
162 | struct device_attribute *attr, char *page) | |
163 | { | |
164 | struct perf_pmu_events_attr *pmu_attr; | |
165 | ||
166 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | |
167 | ||
168 | return sprintf(page, "event=0x%03llx\n", pmu_attr->id); | |
169 | } | |
170 | ||
9e9caa6a DR |
171 | #define ARMV8_EVENT_ATTR_RESOLVE(m) #m |
172 | #define ARMV8_EVENT_ATTR(name, config) \ | |
4b1a9e69 AK |
173 | PMU_EVENT_ATTR(name, armv8_event_attr_##name, \ |
174 | config, armv8pmu_events_sysfs_show) | |
9e9caa6a | 175 | |
03598fdb AK |
176 | ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR); |
177 | ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL); | |
178 | ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL); | |
179 | ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL); | |
180 | ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE); | |
181 | ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL); | |
182 | ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED); | |
183 | ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED); | |
184 | ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED); | |
9e9caa6a | 185 | ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN); |
03598fdb AK |
186 | ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN); |
187 | ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED); | |
188 | ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED); | |
189 | ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED); | |
190 | ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED); | |
191 | ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED); | |
192 | ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED); | |
193 | ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES); | |
194 | ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED); | |
9e9caa6a | 195 | ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS); |
03598fdb AK |
196 | ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE); |
197 | ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB); | |
198 | ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE); | |
199 | ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL); | |
200 | ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB); | |
9e9caa6a | 201 | ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS); |
03598fdb AK |
202 | ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR); |
203 | ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC); | |
204 | ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED); | |
9e9caa6a | 205 | ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES); |
4ba2578f | 206 | /* Don't expose the chain event in /sys, since it's useless in isolation */ |
9e9caa6a DR |
207 | ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE); |
208 | ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE); | |
209 | ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED); | |
210 | ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED); | |
211 | ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND); | |
212 | ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND); | |
213 | ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB); | |
214 | ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB); | |
215 | ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE); | |
216 | ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL); | |
217 | ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE); | |
218 | ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL); | |
219 | ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE); | |
220 | ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB); | |
221 | ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL); | |
03598fdb | 222 | ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL); |
9e9caa6a | 223 | ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB); |
03598fdb | 224 | ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB); |
2ddd5e58 WD |
225 | ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS); |
226 | ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE); | |
227 | ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS); | |
228 | ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK); | |
229 | ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK); | |
230 | ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD); | |
231 | ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD); | |
232 | ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD); | |
233 | ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP); | |
234 | ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED); | |
235 | ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE); | |
236 | ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION); | |
9e9caa6a DR |
237 | |
238 | static struct attribute *armv8_pmuv3_event_attrs[] = { | |
239 | &armv8_event_attr_sw_incr.attr.attr, | |
240 | &armv8_event_attr_l1i_cache_refill.attr.attr, | |
241 | &armv8_event_attr_l1i_tlb_refill.attr.attr, | |
242 | &armv8_event_attr_l1d_cache_refill.attr.attr, | |
243 | &armv8_event_attr_l1d_cache.attr.attr, | |
244 | &armv8_event_attr_l1d_tlb_refill.attr.attr, | |
245 | &armv8_event_attr_ld_retired.attr.attr, | |
246 | &armv8_event_attr_st_retired.attr.attr, | |
247 | &armv8_event_attr_inst_retired.attr.attr, | |
248 | &armv8_event_attr_exc_taken.attr.attr, | |
249 | &armv8_event_attr_exc_return.attr.attr, | |
250 | &armv8_event_attr_cid_write_retired.attr.attr, | |
251 | &armv8_event_attr_pc_write_retired.attr.attr, | |
252 | &armv8_event_attr_br_immed_retired.attr.attr, | |
253 | &armv8_event_attr_br_return_retired.attr.attr, | |
254 | &armv8_event_attr_unaligned_ldst_retired.attr.attr, | |
255 | &armv8_event_attr_br_mis_pred.attr.attr, | |
256 | &armv8_event_attr_cpu_cycles.attr.attr, | |
257 | &armv8_event_attr_br_pred.attr.attr, | |
258 | &armv8_event_attr_mem_access.attr.attr, | |
259 | &armv8_event_attr_l1i_cache.attr.attr, | |
260 | &armv8_event_attr_l1d_cache_wb.attr.attr, | |
261 | &armv8_event_attr_l2d_cache.attr.attr, | |
262 | &armv8_event_attr_l2d_cache_refill.attr.attr, | |
263 | &armv8_event_attr_l2d_cache_wb.attr.attr, | |
264 | &armv8_event_attr_bus_access.attr.attr, | |
265 | &armv8_event_attr_memory_error.attr.attr, | |
266 | &armv8_event_attr_inst_spec.attr.attr, | |
267 | &armv8_event_attr_ttbr_write_retired.attr.attr, | |
268 | &armv8_event_attr_bus_cycles.attr.attr, | |
9e9caa6a DR |
269 | &armv8_event_attr_l1d_cache_allocate.attr.attr, |
270 | &armv8_event_attr_l2d_cache_allocate.attr.attr, | |
271 | &armv8_event_attr_br_retired.attr.attr, | |
272 | &armv8_event_attr_br_mis_pred_retired.attr.attr, | |
273 | &armv8_event_attr_stall_frontend.attr.attr, | |
274 | &armv8_event_attr_stall_backend.attr.attr, | |
275 | &armv8_event_attr_l1d_tlb.attr.attr, | |
276 | &armv8_event_attr_l1i_tlb.attr.attr, | |
277 | &armv8_event_attr_l2i_cache.attr.attr, | |
278 | &armv8_event_attr_l2i_cache_refill.attr.attr, | |
279 | &armv8_event_attr_l3d_cache_allocate.attr.attr, | |
280 | &armv8_event_attr_l3d_cache_refill.attr.attr, | |
281 | &armv8_event_attr_l3d_cache.attr.attr, | |
282 | &armv8_event_attr_l3d_cache_wb.attr.attr, | |
283 | &armv8_event_attr_l2d_tlb_refill.attr.attr, | |
03598fdb | 284 | &armv8_event_attr_l2i_tlb_refill.attr.attr, |
9e9caa6a | 285 | &armv8_event_attr_l2d_tlb.attr.attr, |
03598fdb | 286 | &armv8_event_attr_l2i_tlb.attr.attr, |
2ddd5e58 WD |
287 | &armv8_event_attr_remote_access.attr.attr, |
288 | &armv8_event_attr_ll_cache.attr.attr, | |
289 | &armv8_event_attr_ll_cache_miss.attr.attr, | |
290 | &armv8_event_attr_dtlb_walk.attr.attr, | |
291 | &armv8_event_attr_itlb_walk.attr.attr, | |
292 | &armv8_event_attr_ll_cache_rd.attr.attr, | |
293 | &armv8_event_attr_ll_cache_miss_rd.attr.attr, | |
294 | &armv8_event_attr_remote_access_rd.attr.attr, | |
295 | &armv8_event_attr_sample_pop.attr.attr, | |
296 | &armv8_event_attr_sample_feed.attr.attr, | |
297 | &armv8_event_attr_sample_filtrate.attr.attr, | |
298 | &armv8_event_attr_sample_collision.attr.attr, | |
57d74123 | 299 | NULL, |
9e9caa6a DR |
300 | }; |
301 | ||
4b1a9e69 AK |
302 | static umode_t |
303 | armv8pmu_event_attr_is_visible(struct kobject *kobj, | |
304 | struct attribute *attr, int unused) | |
305 | { | |
306 | struct device *dev = kobj_to_dev(kobj); | |
307 | struct pmu *pmu = dev_get_drvdata(dev); | |
308 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); | |
309 | struct perf_pmu_events_attr *pmu_attr; | |
310 | ||
311 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); | |
312 | ||
342e53bd WD |
313 | if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && |
314 | test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) | |
315 | return attr->mode; | |
316 | ||
317 | pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; | |
318 | if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && | |
319 | test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap)) | |
4b1a9e69 AK |
320 | return attr->mode; |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
9e9caa6a DR |
325 | static struct attribute_group armv8_pmuv3_events_attr_group = { |
326 | .name = "events", | |
327 | .attrs = armv8_pmuv3_event_attrs, | |
4b1a9e69 | 328 | .is_visible = armv8pmu_event_attr_is_visible, |
9e9caa6a DR |
329 | }; |
330 | ||
fe7296e1 | 331 | PMU_FORMAT_ATTR(event, "config:0-15"); |
c1320790 SP |
332 | PMU_FORMAT_ATTR(long, "config1:0"); |
333 | ||
334 | static inline bool armv8pmu_event_is_64bit(struct perf_event *event) | |
335 | { | |
336 | return event->attr.config1 & 0x1; | |
337 | } | |
57d74123 WD |
338 | |
339 | static struct attribute *armv8_pmuv3_format_attrs[] = { | |
340 | &format_attr_event.attr, | |
c1320790 | 341 | &format_attr_long.attr, |
57d74123 WD |
342 | NULL, |
343 | }; | |
344 | ||
345 | static struct attribute_group armv8_pmuv3_format_attr_group = { | |
346 | .name = "format", | |
347 | .attrs = armv8_pmuv3_format_attrs, | |
348 | }; | |
349 | ||
03089688 WD |
350 | /* |
351 | * Perf Events' indices | |
352 | */ | |
353 | #define ARMV8_IDX_CYCLE_COUNTER 0 | |
354 | #define ARMV8_IDX_COUNTER0 1 | |
6475b2d8 MR |
355 | #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ |
356 | (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | |
03089688 | 357 | |
c1320790 SP |
358 | /* |
359 | * We must chain two programmable counters for 64 bit events, | |
360 | * except when we have allocated the 64bit cycle counter (for CPU | |
361 | * cycles event). This must be called only when the event has | |
362 | * a counter allocated. | |
363 | */ | |
364 | static inline bool armv8pmu_event_is_chained(struct perf_event *event) | |
365 | { | |
366 | int idx = event->hw.idx; | |
367 | ||
368 | return !WARN_ON(idx < 0) && | |
369 | armv8pmu_event_is_64bit(event) && | |
370 | (idx != ARMV8_IDX_CYCLE_COUNTER); | |
371 | } | |
372 | ||
03089688 WD |
373 | /* |
374 | * ARMv8 low level PMU access | |
375 | */ | |
376 | ||
377 | /* | |
378 | * Perf Event to low level counters mapping | |
379 | */ | |
380 | #define ARMV8_IDX_TO_COUNTER(x) \ | |
b8cfadfc | 381 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) |
03089688 WD |
382 | |
383 | static inline u32 armv8pmu_pmcr_read(void) | |
384 | { | |
bf2d4782 | 385 | return read_sysreg(pmcr_el0); |
03089688 WD |
386 | } |
387 | ||
388 | static inline void armv8pmu_pmcr_write(u32 val) | |
389 | { | |
b8cfadfc | 390 | val &= ARMV8_PMU_PMCR_MASK; |
03089688 | 391 | isb(); |
bf2d4782 | 392 | write_sysreg(val, pmcr_el0); |
03089688 WD |
393 | } |
394 | ||
395 | static inline int armv8pmu_has_overflowed(u32 pmovsr) | |
396 | { | |
b8cfadfc | 397 | return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; |
03089688 WD |
398 | } |
399 | ||
6475b2d8 | 400 | static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) |
03089688 | 401 | { |
6475b2d8 MR |
402 | return idx >= ARMV8_IDX_CYCLE_COUNTER && |
403 | idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu); | |
03089688 WD |
404 | } |
405 | ||
406 | static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) | |
407 | { | |
6475b2d8 | 408 | return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); |
03089688 WD |
409 | } |
410 | ||
0c55d19c | 411 | static inline void armv8pmu_select_counter(int idx) |
03089688 | 412 | { |
6475b2d8 | 413 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
bf2d4782 | 414 | write_sysreg(counter, pmselr_el0); |
03089688 | 415 | isb(); |
0c55d19c | 416 | } |
03089688 | 417 | |
0c55d19c SP |
418 | static inline u32 armv8pmu_read_evcntr(int idx) |
419 | { | |
420 | armv8pmu_select_counter(idx); | |
421 | return read_sysreg(pmxevcntr_el0); | |
03089688 WD |
422 | } |
423 | ||
c1320790 SP |
424 | static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) |
425 | { | |
426 | int idx = event->hw.idx; | |
427 | u64 val = 0; | |
428 | ||
429 | val = armv8pmu_read_evcntr(idx); | |
430 | if (armv8pmu_event_is_chained(event)) | |
431 | val = (val << 32) | armv8pmu_read_evcntr(idx - 1); | |
432 | return val; | |
433 | } | |
434 | ||
3d659e7d | 435 | static u64 armv8pmu_read_counter(struct perf_event *event) |
03089688 | 436 | { |
6475b2d8 MR |
437 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
438 | struct hw_perf_event *hwc = &event->hw; | |
439 | int idx = hwc->idx; | |
c1320790 | 440 | u64 value = 0; |
03089688 | 441 | |
6475b2d8 | 442 | if (!armv8pmu_counter_valid(cpu_pmu, idx)) |
03089688 WD |
443 | pr_err("CPU%u reading wrong counter %d\n", |
444 | smp_processor_id(), idx); | |
445 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) | |
bf2d4782 | 446 | value = read_sysreg(pmccntr_el0); |
0c55d19c | 447 | else |
c1320790 | 448 | value = armv8pmu_read_hw_counter(event); |
03089688 WD |
449 | |
450 | return value; | |
451 | } | |
452 | ||
0c55d19c SP |
453 | static inline void armv8pmu_write_evcntr(int idx, u32 value) |
454 | { | |
455 | armv8pmu_select_counter(idx); | |
456 | write_sysreg(value, pmxevcntr_el0); | |
457 | } | |
458 | ||
c1320790 SP |
459 | static inline void armv8pmu_write_hw_counter(struct perf_event *event, |
460 | u64 value) | |
461 | { | |
462 | int idx = event->hw.idx; | |
463 | ||
464 | if (armv8pmu_event_is_chained(event)) { | |
465 | armv8pmu_write_evcntr(idx, upper_32_bits(value)); | |
466 | armv8pmu_write_evcntr(idx - 1, lower_32_bits(value)); | |
467 | } else { | |
468 | armv8pmu_write_evcntr(idx, value); | |
469 | } | |
470 | } | |
471 | ||
3d659e7d | 472 | static void armv8pmu_write_counter(struct perf_event *event, u64 value) |
03089688 | 473 | { |
6475b2d8 MR |
474 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
475 | struct hw_perf_event *hwc = &event->hw; | |
476 | int idx = hwc->idx; | |
477 | ||
478 | if (!armv8pmu_counter_valid(cpu_pmu, idx)) | |
03089688 WD |
479 | pr_err("CPU%u writing wrong counter %d\n", |
480 | smp_processor_id(), idx); | |
7175f059 JG |
481 | else if (idx == ARMV8_IDX_CYCLE_COUNTER) { |
482 | /* | |
c1320790 SP |
483 | * The cycles counter is really a 64-bit counter. |
484 | * When treating it as a 32-bit counter, we only count | |
485 | * the lower 32 bits, and set the upper 32-bits so that | |
486 | * we get an interrupt upon 32-bit overflow. | |
7175f059 | 487 | */ |
c1320790 SP |
488 | if (!armv8pmu_event_is_64bit(event)) |
489 | value |= 0xffffffff00000000ULL; | |
3a95200d | 490 | write_sysreg(value, pmccntr_el0); |
0c55d19c | 491 | } else |
c1320790 | 492 | armv8pmu_write_hw_counter(event, value); |
03089688 WD |
493 | } |
494 | ||
495 | static inline void armv8pmu_write_evtype(int idx, u32 val) | |
496 | { | |
0c55d19c SP |
497 | armv8pmu_select_counter(idx); |
498 | val &= ARMV8_PMU_EVTYPE_MASK; | |
499 | write_sysreg(val, pmxevtyper_el0); | |
03089688 WD |
500 | } |
501 | ||
c1320790 SP |
502 | static inline void armv8pmu_write_event_type(struct perf_event *event) |
503 | { | |
504 | struct hw_perf_event *hwc = &event->hw; | |
505 | int idx = hwc->idx; | |
506 | ||
507 | /* | |
508 | * For chained events, the low counter is programmed to count | |
509 | * the event of interest and the high counter is programmed | |
510 | * with CHAIN event code with filters set to count at all ELs. | |
511 | */ | |
512 | if (armv8pmu_event_is_chained(event)) { | |
513 | u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN | | |
514 | ARMV8_PMU_INCLUDE_EL2; | |
515 | ||
516 | armv8pmu_write_evtype(idx - 1, hwc->config_base); | |
517 | armv8pmu_write_evtype(idx, chain_evt); | |
518 | } else { | |
519 | armv8pmu_write_evtype(idx, hwc->config_base); | |
520 | } | |
521 | } | |
522 | ||
03089688 WD |
523 | static inline int armv8pmu_enable_counter(int idx) |
524 | { | |
6475b2d8 | 525 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
bf2d4782 | 526 | write_sysreg(BIT(counter), pmcntenset_el0); |
03089688 WD |
527 | return idx; |
528 | } | |
529 | ||
c1320790 SP |
530 | static inline void armv8pmu_enable_event_counter(struct perf_event *event) |
531 | { | |
d1947bc4 | 532 | struct perf_event_attr *attr = &event->attr; |
c1320790 | 533 | int idx = event->hw.idx; |
d1947bc4 | 534 | u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); |
c1320790 | 535 | |
c1320790 | 536 | if (armv8pmu_event_is_chained(event)) |
d1947bc4 AM |
537 | counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); |
538 | ||
539 | kvm_set_pmu_events(counter_bits, attr); | |
540 | ||
541 | /* We rely on the hypervisor switch code to enable guest counters */ | |
542 | if (!kvm_pmu_counter_deferred(attr)) { | |
543 | armv8pmu_enable_counter(idx); | |
544 | if (armv8pmu_event_is_chained(event)) | |
545 | armv8pmu_enable_counter(idx - 1); | |
546 | } | |
c1320790 SP |
547 | } |
548 | ||
03089688 WD |
549 | static inline int armv8pmu_disable_counter(int idx) |
550 | { | |
6475b2d8 | 551 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
bf2d4782 | 552 | write_sysreg(BIT(counter), pmcntenclr_el0); |
03089688 WD |
553 | return idx; |
554 | } | |
555 | ||
c1320790 SP |
556 | static inline void armv8pmu_disable_event_counter(struct perf_event *event) |
557 | { | |
558 | struct hw_perf_event *hwc = &event->hw; | |
d1947bc4 | 559 | struct perf_event_attr *attr = &event->attr; |
c1320790 | 560 | int idx = hwc->idx; |
d1947bc4 | 561 | u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); |
c1320790 SP |
562 | |
563 | if (armv8pmu_event_is_chained(event)) | |
d1947bc4 AM |
564 | counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); |
565 | ||
566 | kvm_clr_pmu_events(counter_bits); | |
567 | ||
568 | /* We rely on the hypervisor switch code to disable guest counters */ | |
569 | if (!kvm_pmu_counter_deferred(attr)) { | |
570 | if (armv8pmu_event_is_chained(event)) | |
571 | armv8pmu_disable_counter(idx - 1); | |
572 | armv8pmu_disable_counter(idx); | |
573 | } | |
c1320790 SP |
574 | } |
575 | ||
03089688 WD |
576 | static inline int armv8pmu_enable_intens(int idx) |
577 | { | |
6475b2d8 | 578 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
bf2d4782 | 579 | write_sysreg(BIT(counter), pmintenset_el1); |
03089688 WD |
580 | return idx; |
581 | } | |
582 | ||
c1320790 SP |
583 | static inline int armv8pmu_enable_event_irq(struct perf_event *event) |
584 | { | |
585 | return armv8pmu_enable_intens(event->hw.idx); | |
586 | } | |
587 | ||
03089688 WD |
588 | static inline int armv8pmu_disable_intens(int idx) |
589 | { | |
6475b2d8 | 590 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
bf2d4782 | 591 | write_sysreg(BIT(counter), pmintenclr_el1); |
03089688 WD |
592 | isb(); |
593 | /* Clear the overflow flag in case an interrupt is pending. */ | |
bf2d4782 | 594 | write_sysreg(BIT(counter), pmovsclr_el0); |
03089688 | 595 | isb(); |
6475b2d8 | 596 | |
03089688 WD |
597 | return idx; |
598 | } | |
599 | ||
c1320790 SP |
600 | static inline int armv8pmu_disable_event_irq(struct perf_event *event) |
601 | { | |
602 | return armv8pmu_disable_intens(event->hw.idx); | |
603 | } | |
604 | ||
03089688 WD |
605 | static inline u32 armv8pmu_getreset_flags(void) |
606 | { | |
607 | u32 value; | |
608 | ||
609 | /* Read */ | |
bf2d4782 | 610 | value = read_sysreg(pmovsclr_el0); |
03089688 WD |
611 | |
612 | /* Write to clear flags */ | |
b8cfadfc | 613 | value &= ARMV8_PMU_OVSR_MASK; |
bf2d4782 | 614 | write_sysreg(value, pmovsclr_el0); |
03089688 WD |
615 | |
616 | return value; | |
617 | } | |
618 | ||
6475b2d8 | 619 | static void armv8pmu_enable_event(struct perf_event *event) |
03089688 WD |
620 | { |
621 | unsigned long flags; | |
6475b2d8 MR |
622 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
623 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | |
03089688 WD |
624 | |
625 | /* | |
626 | * Enable counter and interrupt, and set the counter to count | |
627 | * the event that we're interested in. | |
628 | */ | |
629 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
630 | ||
631 | /* | |
632 | * Disable counter | |
633 | */ | |
c1320790 | 634 | armv8pmu_disable_event_counter(event); |
03089688 WD |
635 | |
636 | /* | |
637 | * Set event (if destined for PMNx counters). | |
638 | */ | |
c1320790 | 639 | armv8pmu_write_event_type(event); |
03089688 WD |
640 | |
641 | /* | |
642 | * Enable interrupt for this counter | |
643 | */ | |
c1320790 | 644 | armv8pmu_enable_event_irq(event); |
03089688 WD |
645 | |
646 | /* | |
647 | * Enable counter | |
648 | */ | |
c1320790 | 649 | armv8pmu_enable_event_counter(event); |
03089688 WD |
650 | |
651 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
652 | } | |
653 | ||
6475b2d8 | 654 | static void armv8pmu_disable_event(struct perf_event *event) |
03089688 WD |
655 | { |
656 | unsigned long flags; | |
6475b2d8 MR |
657 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
658 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | |
03089688 WD |
659 | |
660 | /* | |
661 | * Disable counter and interrupt | |
662 | */ | |
663 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
664 | ||
665 | /* | |
666 | * Disable counter | |
667 | */ | |
c1320790 | 668 | armv8pmu_disable_event_counter(event); |
03089688 WD |
669 | |
670 | /* | |
671 | * Disable interrupt for this counter | |
672 | */ | |
c1320790 | 673 | armv8pmu_disable_event_irq(event); |
03089688 WD |
674 | |
675 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
676 | } | |
677 | ||
3cce50df SP |
678 | static void armv8pmu_start(struct arm_pmu *cpu_pmu) |
679 | { | |
680 | unsigned long flags; | |
681 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | |
682 | ||
683 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
684 | /* Enable all counters */ | |
685 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); | |
686 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
687 | } | |
688 | ||
689 | static void armv8pmu_stop(struct arm_pmu *cpu_pmu) | |
690 | { | |
691 | unsigned long flags; | |
692 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | |
693 | ||
694 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
695 | /* Disable all counters */ | |
696 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); | |
697 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
698 | } | |
699 | ||
0788f1e9 | 700 | static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) |
03089688 WD |
701 | { |
702 | u32 pmovsr; | |
703 | struct perf_sample_data data; | |
6475b2d8 | 704 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
03089688 WD |
705 | struct pt_regs *regs; |
706 | int idx; | |
707 | ||
708 | /* | |
709 | * Get and reset the IRQ flags | |
710 | */ | |
711 | pmovsr = armv8pmu_getreset_flags(); | |
712 | ||
713 | /* | |
714 | * Did an overflow occur? | |
715 | */ | |
716 | if (!armv8pmu_has_overflowed(pmovsr)) | |
717 | return IRQ_NONE; | |
718 | ||
719 | /* | |
720 | * Handle the counter(s) overflow(s) | |
721 | */ | |
722 | regs = get_irq_regs(); | |
723 | ||
3cce50df SP |
724 | /* |
725 | * Stop the PMU while processing the counter overflows | |
726 | * to prevent skews in group events. | |
727 | */ | |
728 | armv8pmu_stop(cpu_pmu); | |
03089688 WD |
729 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
730 | struct perf_event *event = cpuc->events[idx]; | |
731 | struct hw_perf_event *hwc; | |
732 | ||
733 | /* Ignore if we don't have an event. */ | |
734 | if (!event) | |
735 | continue; | |
736 | ||
737 | /* | |
738 | * We have a single interrupt for all counters. Check that | |
739 | * each counter has overflowed before we process it. | |
740 | */ | |
741 | if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) | |
742 | continue; | |
743 | ||
744 | hwc = &event->hw; | |
6475b2d8 | 745 | armpmu_event_update(event); |
03089688 | 746 | perf_sample_data_init(&data, 0, hwc->last_period); |
6475b2d8 | 747 | if (!armpmu_event_set_period(event)) |
03089688 WD |
748 | continue; |
749 | ||
750 | if (perf_event_overflow(event, &data, regs)) | |
6475b2d8 | 751 | cpu_pmu->disable(event); |
03089688 | 752 | } |
3cce50df | 753 | armv8pmu_start(cpu_pmu); |
03089688 WD |
754 | |
755 | /* | |
756 | * Handle the pending perf events. | |
757 | * | |
758 | * Note: this call *must* be run with interrupts disabled. For | |
759 | * platforms that can have the PMU interrupts raised as an NMI, this | |
760 | * will not work. | |
761 | */ | |
762 | irq_work_run(); | |
763 | ||
764 | return IRQ_HANDLED; | |
765 | } | |
766 | ||
c1320790 SP |
767 | static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, |
768 | struct arm_pmu *cpu_pmu) | |
769 | { | |
770 | int idx; | |
771 | ||
772 | for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) { | |
773 | if (!test_and_set_bit(idx, cpuc->used_mask)) | |
774 | return idx; | |
775 | } | |
776 | return -EAGAIN; | |
777 | } | |
778 | ||
779 | static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, | |
780 | struct arm_pmu *cpu_pmu) | |
781 | { | |
782 | int idx; | |
783 | ||
784 | /* | |
785 | * Chaining requires two consecutive event counters, where | |
786 | * the lower idx must be even. | |
787 | */ | |
788 | for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { | |
789 | if (!test_and_set_bit(idx, cpuc->used_mask)) { | |
790 | /* Check if the preceding even counter is available */ | |
791 | if (!test_and_set_bit(idx - 1, cpuc->used_mask)) | |
792 | return idx; | |
793 | /* Release the Odd counter */ | |
794 | clear_bit(idx, cpuc->used_mask); | |
795 | } | |
796 | } | |
797 | return -EAGAIN; | |
798 | } | |
799 | ||
03089688 | 800 | static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, |
6475b2d8 | 801 | struct perf_event *event) |
03089688 | 802 | { |
6475b2d8 MR |
803 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
804 | struct hw_perf_event *hwc = &event->hw; | |
b8cfadfc | 805 | unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; |
03089688 | 806 | |
1031a159 | 807 | /* Always prefer to place a cycle counter into the cycle counter. */ |
03598fdb | 808 | if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { |
1031a159 PA |
809 | if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
810 | return ARMV8_IDX_CYCLE_COUNTER; | |
03089688 WD |
811 | } |
812 | ||
813 | /* | |
1031a159 | 814 | * Otherwise use events counters |
03089688 | 815 | */ |
c1320790 SP |
816 | if (armv8pmu_event_is_64bit(event)) |
817 | return armv8pmu_get_chain_idx(cpuc, cpu_pmu); | |
818 | else | |
819 | return armv8pmu_get_single_idx(cpuc, cpu_pmu); | |
03089688 WD |
820 | } |
821 | ||
7dfc8db1 | 822 | static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, |
c1320790 | 823 | struct perf_event *event) |
7dfc8db1 | 824 | { |
c1320790 SP |
825 | int idx = event->hw.idx; |
826 | ||
827 | clear_bit(idx, cpuc->used_mask); | |
828 | if (armv8pmu_event_is_chained(event)) | |
829 | clear_bit(idx - 1, cpuc->used_mask); | |
7dfc8db1 SP |
830 | } |
831 | ||
03089688 | 832 | /* |
b3650678 | 833 | * Add an event filter to a given event. |
03089688 WD |
834 | */ |
835 | static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |
836 | struct perf_event_attr *attr) | |
837 | { | |
838 | unsigned long config_base = 0; | |
839 | ||
840 | if (attr->exclude_idle) | |
841 | return -EPERM; | |
78a19cfd GK |
842 | |
843 | /* | |
844 | * If we're running in hyp mode, then we *are* the hypervisor. | |
845 | * Therefore we ignore exclude_hv in this configuration, since | |
846 | * there's no hypervisor to sample anyway. This is consistent | |
847 | * with other architectures (x86 and Power). | |
848 | */ | |
849 | if (is_kernel_in_hyp_mode()) { | |
435e53fb | 850 | if (!attr->exclude_kernel && !attr->exclude_host) |
78a19cfd | 851 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
435e53fb | 852 | if (attr->exclude_guest) |
78a19cfd | 853 | config_base |= ARMV8_PMU_EXCLUDE_EL1; |
435e53fb AM |
854 | if (attr->exclude_host) |
855 | config_base |= ARMV8_PMU_EXCLUDE_EL0; | |
78a19cfd | 856 | } else { |
d1947bc4 | 857 | if (!attr->exclude_hv && !attr->exclude_host) |
78a19cfd GK |
858 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
859 | } | |
d1947bc4 AM |
860 | |
861 | /* | |
862 | * Filter out !VHE kernels and guest kernels | |
863 | */ | |
864 | if (attr->exclude_kernel) | |
865 | config_base |= ARMV8_PMU_EXCLUDE_EL1; | |
866 | ||
03089688 | 867 | if (attr->exclude_user) |
b8cfadfc | 868 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
03089688 WD |
869 | |
870 | /* | |
871 | * Install the filter into config_base as this is used to | |
872 | * construct the event type. | |
873 | */ | |
874 | event->config_base = config_base; | |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
ca2b4972 WD |
879 | static int armv8pmu_filter_match(struct perf_event *event) |
880 | { | |
881 | unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT; | |
882 | return evtype != ARMV8_PMUV3_PERFCTR_CHAIN; | |
883 | } | |
884 | ||
03089688 WD |
885 | static void armv8pmu_reset(void *info) |
886 | { | |
6475b2d8 | 887 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; |
03089688 WD |
888 | u32 idx, nb_cnt = cpu_pmu->num_events; |
889 | ||
890 | /* The counter and interrupt enable registers are unknown at reset. */ | |
6475b2d8 MR |
891 | for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { |
892 | armv8pmu_disable_counter(idx); | |
893 | armv8pmu_disable_intens(idx); | |
894 | } | |
03089688 | 895 | |
d1947bc4 AM |
896 | /* Clear the counters we flip at guest entry/exit */ |
897 | kvm_clr_pmu_events(U32_MAX); | |
898 | ||
7175f059 JG |
899 | /* |
900 | * Initialize & Reset PMNC. Request overflow interrupt for | |
901 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). | |
902 | */ | |
b8cfadfc SZ |
903 | armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | |
904 | ARMV8_PMU_PMCR_LC); | |
03089688 WD |
905 | } |
906 | ||
6c833bb9 WD |
907 | static int __armv8_pmuv3_map_event(struct perf_event *event, |
908 | const unsigned (*extra_event_map) | |
909 | [PERF_COUNT_HW_MAX], | |
910 | const unsigned (*extra_cache_map) | |
911 | [PERF_COUNT_HW_CACHE_MAX] | |
912 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
913 | [PERF_COUNT_HW_CACHE_RESULT_MAX]) | |
03089688 | 914 | { |
236b9b91 JL |
915 | int hw_event_id; |
916 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
917 | ||
918 | hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, | |
919 | &armv8_pmuv3_perf_cache_map, | |
920 | ARMV8_PMU_EVTYPE_EVENT); | |
236b9b91 | 921 | |
c1320790 SP |
922 | if (armv8pmu_event_is_64bit(event)) |
923 | event->hw.flags |= ARMPMU_EVT_64BIT; | |
924 | ||
e2b5c5c7 | 925 | /* Only expose micro/arch events supported by this PMU */ |
6c833bb9 WD |
926 | if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) |
927 | && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { | |
928 | return hw_event_id; | |
236b9b91 JL |
929 | } |
930 | ||
6c833bb9 WD |
931 | return armpmu_map_event(event, extra_event_map, extra_cache_map, |
932 | ARMV8_PMU_EVTYPE_EVENT); | |
933 | } | |
934 | ||
935 | static int armv8_pmuv3_map_event(struct perf_event *event) | |
936 | { | |
937 | return __armv8_pmuv3_map_event(event, NULL, NULL); | |
03089688 WD |
938 | } |
939 | ||
ac82d127 MR |
940 | static int armv8_a53_map_event(struct perf_event *event) |
941 | { | |
d0d09d4d | 942 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map); |
ac82d127 MR |
943 | } |
944 | ||
62a4dda9 MR |
945 | static int armv8_a57_map_event(struct perf_event *event) |
946 | { | |
d0d09d4d | 947 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map); |
62a4dda9 MR |
948 | } |
949 | ||
5561b6c5 JT |
950 | static int armv8_a73_map_event(struct perf_event *event) |
951 | { | |
952 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map); | |
953 | } | |
954 | ||
d0aa2bff JG |
955 | static int armv8_thunder_map_event(struct perf_event *event) |
956 | { | |
d0d09d4d | 957 | return __armv8_pmuv3_map_event(event, NULL, |
6c833bb9 | 958 | &armv8_thunder_perf_cache_map); |
d0aa2bff JG |
959 | } |
960 | ||
201a72b2 AK |
961 | static int armv8_vulcan_map_event(struct perf_event *event) |
962 | { | |
d0d09d4d | 963 | return __armv8_pmuv3_map_event(event, NULL, |
6c833bb9 | 964 | &armv8_vulcan_perf_cache_map); |
201a72b2 AK |
965 | } |
966 | ||
f1b36dcb MR |
967 | struct armv8pmu_probe_info { |
968 | struct arm_pmu *pmu; | |
969 | bool present; | |
970 | }; | |
971 | ||
4b1a9e69 | 972 | static void __armv8pmu_probe_pmu(void *info) |
03089688 | 973 | { |
f1b36dcb MR |
974 | struct armv8pmu_probe_info *probe = info; |
975 | struct arm_pmu *cpu_pmu = probe->pmu; | |
faa9a083 | 976 | u64 dfr0; |
342e53bd | 977 | u64 pmceid_raw[2]; |
4b1a9e69 | 978 | u32 pmceid[2]; |
faa9a083 | 979 | int pmuver; |
03089688 | 980 | |
f1b36dcb | 981 | dfr0 = read_sysreg(id_aa64dfr0_el1); |
0331365e | 982 | pmuver = cpuid_feature_extract_unsigned_field(dfr0, |
f1b36dcb | 983 | ID_AA64DFR0_PMUVER_SHIFT); |
0331365e | 984 | if (pmuver == 0xf || pmuver == 0) |
f1b36dcb MR |
985 | return; |
986 | ||
987 | probe->present = true; | |
988 | ||
03089688 | 989 | /* Read the nb of CNTx counters supported from PMNC */ |
4b1a9e69 AK |
990 | cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) |
991 | & ARMV8_PMU_PMCR_N_MASK; | |
03089688 | 992 | |
6475b2d8 | 993 | /* Add the CPU cycles counter */ |
4b1a9e69 AK |
994 | cpu_pmu->num_events += 1; |
995 | ||
342e53bd WD |
996 | pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); |
997 | pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); | |
4b1a9e69 | 998 | |
3aa56885 YN |
999 | bitmap_from_arr32(cpu_pmu->pmceid_bitmap, |
1000 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); | |
342e53bd WD |
1001 | |
1002 | pmceid[0] = pmceid_raw[0] >> 32; | |
1003 | pmceid[1] = pmceid_raw[1] >> 32; | |
1004 | ||
1005 | bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, | |
1006 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); | |
03089688 WD |
1007 | } |
1008 | ||
4b1a9e69 | 1009 | static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) |
03089688 | 1010 | { |
f1b36dcb MR |
1011 | struct armv8pmu_probe_info probe = { |
1012 | .pmu = cpu_pmu, | |
1013 | .present = false, | |
1014 | }; | |
1015 | int ret; | |
1016 | ||
1017 | ret = smp_call_function_any(&cpu_pmu->supported_cpus, | |
4b1a9e69 | 1018 | __armv8pmu_probe_pmu, |
f1b36dcb MR |
1019 | &probe, 1); |
1020 | if (ret) | |
1021 | return ret; | |
1022 | ||
1023 | return probe.present ? 0 : -ENODEV; | |
03089688 WD |
1024 | } |
1025 | ||
f1b36dcb | 1026 | static int armv8_pmu_init(struct arm_pmu *cpu_pmu) |
03089688 | 1027 | { |
f1b36dcb MR |
1028 | int ret = armv8pmu_probe_pmu(cpu_pmu); |
1029 | if (ret) | |
1030 | return ret; | |
1031 | ||
d3adeed7 WD |
1032 | cpu_pmu->handle_irq = armv8pmu_handle_irq; |
1033 | cpu_pmu->enable = armv8pmu_enable_event; | |
1034 | cpu_pmu->disable = armv8pmu_disable_event; | |
1035 | cpu_pmu->read_counter = armv8pmu_read_counter; | |
1036 | cpu_pmu->write_counter = armv8pmu_write_counter; | |
1037 | cpu_pmu->get_event_idx = armv8pmu_get_event_idx; | |
1038 | cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx; | |
1039 | cpu_pmu->start = armv8pmu_start; | |
1040 | cpu_pmu->stop = armv8pmu_stop; | |
1041 | cpu_pmu->reset = armv8pmu_reset; | |
ac82d127 | 1042 | cpu_pmu->set_event_filter = armv8pmu_set_event_filter; |
ca2b4972 | 1043 | cpu_pmu->filter_match = armv8pmu_filter_match; |
f1b36dcb MR |
1044 | |
1045 | return 0; | |
ac82d127 MR |
1046 | } |
1047 | ||
1048 | static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) | |
1049 | { | |
f1b36dcb MR |
1050 | int ret = armv8_pmu_init(cpu_pmu); |
1051 | if (ret) | |
1052 | return ret; | |
1053 | ||
6475b2d8 MR |
1054 | cpu_pmu->name = "armv8_pmuv3"; |
1055 | cpu_pmu->map_event = armv8_pmuv3_map_event; | |
569de902 MR |
1056 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1057 | &armv8_pmuv3_events_attr_group; | |
1058 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1059 | &armv8_pmuv3_format_attr_group; | |
f1b36dcb MR |
1060 | |
1061 | return 0; | |
ac82d127 MR |
1062 | } |
1063 | ||
e884f80c JT |
1064 | static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) |
1065 | { | |
1066 | int ret = armv8_pmu_init(cpu_pmu); | |
1067 | if (ret) | |
1068 | return ret; | |
1069 | ||
1070 | cpu_pmu->name = "armv8_cortex_a35"; | |
1071 | cpu_pmu->map_event = armv8_a53_map_event; | |
1072 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = | |
1073 | &armv8_pmuv3_events_attr_group; | |
1074 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1075 | &armv8_pmuv3_format_attr_group; | |
1076 | ||
1077 | return 0; | |
1078 | } | |
1079 | ||
ac82d127 MR |
1080 | static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) |
1081 | { | |
f1b36dcb MR |
1082 | int ret = armv8_pmu_init(cpu_pmu); |
1083 | if (ret) | |
1084 | return ret; | |
1085 | ||
ac82d127 MR |
1086 | cpu_pmu->name = "armv8_cortex_a53"; |
1087 | cpu_pmu->map_event = armv8_a53_map_event; | |
569de902 MR |
1088 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1089 | &armv8_pmuv3_events_attr_group; | |
1090 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1091 | &armv8_pmuv3_format_attr_group; | |
f1b36dcb MR |
1092 | |
1093 | return 0; | |
03089688 | 1094 | } |
03089688 | 1095 | |
62a4dda9 MR |
1096 | static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) |
1097 | { | |
f1b36dcb MR |
1098 | int ret = armv8_pmu_init(cpu_pmu); |
1099 | if (ret) | |
1100 | return ret; | |
1101 | ||
62a4dda9 MR |
1102 | cpu_pmu->name = "armv8_cortex_a57"; |
1103 | cpu_pmu->map_event = armv8_a57_map_event; | |
569de902 MR |
1104 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1105 | &armv8_pmuv3_events_attr_group; | |
1106 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1107 | &armv8_pmuv3_format_attr_group; | |
f1b36dcb MR |
1108 | |
1109 | return 0; | |
62a4dda9 MR |
1110 | } |
1111 | ||
5d7ee877 WD |
1112 | static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) |
1113 | { | |
f1b36dcb MR |
1114 | int ret = armv8_pmu_init(cpu_pmu); |
1115 | if (ret) | |
1116 | return ret; | |
1117 | ||
5d7ee877 WD |
1118 | cpu_pmu->name = "armv8_cortex_a72"; |
1119 | cpu_pmu->map_event = armv8_a57_map_event; | |
569de902 MR |
1120 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1121 | &armv8_pmuv3_events_attr_group; | |
1122 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1123 | &armv8_pmuv3_format_attr_group; | |
f1b36dcb MR |
1124 | |
1125 | return 0; | |
5d7ee877 WD |
1126 | } |
1127 | ||
5561b6c5 JT |
1128 | static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) |
1129 | { | |
1130 | int ret = armv8_pmu_init(cpu_pmu); | |
1131 | if (ret) | |
1132 | return ret; | |
1133 | ||
1134 | cpu_pmu->name = "armv8_cortex_a73"; | |
1135 | cpu_pmu->map_event = armv8_a73_map_event; | |
1136 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = | |
1137 | &armv8_pmuv3_events_attr_group; | |
1138 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1139 | &armv8_pmuv3_format_attr_group; | |
1140 | ||
1141 | return 0; | |
1142 | } | |
1143 | ||
d0aa2bff JG |
1144 | static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) |
1145 | { | |
f1b36dcb MR |
1146 | int ret = armv8_pmu_init(cpu_pmu); |
1147 | if (ret) | |
1148 | return ret; | |
1149 | ||
d0aa2bff JG |
1150 | cpu_pmu->name = "armv8_cavium_thunder"; |
1151 | cpu_pmu->map_event = armv8_thunder_map_event; | |
569de902 MR |
1152 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1153 | &armv8_pmuv3_events_attr_group; | |
1154 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1155 | &armv8_pmuv3_format_attr_group; | |
f1b36dcb MR |
1156 | |
1157 | return 0; | |
d0aa2bff JG |
1158 | } |
1159 | ||
201a72b2 AK |
1160 | static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) |
1161 | { | |
f1b36dcb MR |
1162 | int ret = armv8_pmu_init(cpu_pmu); |
1163 | if (ret) | |
1164 | return ret; | |
1165 | ||
201a72b2 AK |
1166 | cpu_pmu->name = "armv8_brcm_vulcan"; |
1167 | cpu_pmu->map_event = armv8_vulcan_map_event; | |
569de902 MR |
1168 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = |
1169 | &armv8_pmuv3_events_attr_group; | |
1170 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = | |
1171 | &armv8_pmuv3_format_attr_group; | |
f1b36dcb MR |
1172 | |
1173 | return 0; | |
201a72b2 AK |
1174 | } |
1175 | ||
6475b2d8 MR |
1176 | static const struct of_device_id armv8_pmu_of_device_ids[] = { |
1177 | {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init}, | |
e884f80c | 1178 | {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, |
ac82d127 | 1179 | {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, |
62a4dda9 | 1180 | {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, |
5d7ee877 | 1181 | {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, |
5561b6c5 | 1182 | {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, |
d0aa2bff | 1183 | {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, |
201a72b2 | 1184 | {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, |
03089688 WD |
1185 | {}, |
1186 | }; | |
1187 | ||
6475b2d8 | 1188 | static int armv8_pmu_device_probe(struct platform_device *pdev) |
03089688 | 1189 | { |
f00fa5f4 | 1190 | return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); |
03089688 WD |
1191 | } |
1192 | ||
6475b2d8 | 1193 | static struct platform_driver armv8_pmu_driver = { |
03089688 | 1194 | .driver = { |
85023b2e | 1195 | .name = ARMV8_PMU_PDEV_NAME, |
6475b2d8 | 1196 | .of_match_table = armv8_pmu_of_device_ids, |
81e9fa8b | 1197 | .suppress_bind_attrs = true, |
03089688 | 1198 | }, |
6475b2d8 | 1199 | .probe = armv8_pmu_device_probe, |
03089688 WD |
1200 | }; |
1201 | ||
f00fa5f4 MR |
1202 | static int __init armv8_pmu_driver_init(void) |
1203 | { | |
1204 | if (acpi_disabled) | |
1205 | return platform_driver_register(&armv8_pmu_driver); | |
1206 | else | |
1207 | return arm_pmu_acpi_probe(armv8_pmuv3_init); | |
1208 | } | |
1209 | device_initcall(armv8_pmu_driver_init) | |
9d2dcc8f MF |
1210 | |
1211 | void arch_perf_update_userpage(struct perf_event *event, | |
1212 | struct perf_event_mmap_page *userpg, u64 now) | |
1213 | { | |
1214 | u32 freq; | |
1215 | u32 shift; | |
1216 | ||
1217 | /* | |
1218 | * Internal timekeeping for enabled/running/stopped times | |
1219 | * is always computed with the sched_clock. | |
1220 | */ | |
1221 | freq = arch_timer_get_rate(); | |
1222 | userpg->cap_user_time = 1; | |
1223 | ||
1224 | clocks_calc_mult_shift(&userpg->time_mult, &shift, freq, | |
1225 | NSEC_PER_SEC, 0); | |
1226 | /* | |
1227 | * time_shift is not expected to be greater than 31 due to | |
1228 | * the original published conversion algorithm shifting a | |
1229 | * 32-bit value (now specifies a 64-bit value) - refer | |
1230 | * perf_event_mmap_page documentation in perf_event.h. | |
1231 | */ | |
1232 | if (shift == 32) { | |
1233 | shift = 31; | |
1234 | userpg->time_mult >>= 1; | |
1235 | } | |
1236 | userpg->time_shift = (u16)shift; | |
1237 | userpg->time_offset = -now; | |
1238 | } |