]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/perf/arm_smmuv3_pmu.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-jammy-kernel.git] / drivers / perf / arm_smmuv3_pmu.c
CommitLineData
7d839b4b
NL
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * This driver adds support for perf events to use the Performance
5 * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6 * to monitor that node.
7 *
8 * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10 * to 4K boundary. For example, the PMCG at 0xff88840000 is named
11 * smmuv3_pmcg_ff88840
12 *
13 * Filtering by stream id is done by specifying filtering parameters
14 * with the event. options are:
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
18 *
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
21 * with a value that contains:
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
24 * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25 * contain a value to match from the corresponding bits of event StreamID.
26 *
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
29 * Applies filter pattern 0x42 to transaction events, which means events
30 * matching stream ids 0x42 and 0x43 are counted. Further filtering
31 * information is available in the SMMU documentation.
32 *
33 * SMMU events are not attributable to a CPU, so task mode and sampling
34 * are not supported.
35 */
36
37#include <linux/acpi.h>
24062fe8 38#include <linux/acpi_iort.h>
7d839b4b
NL
39#include <linux/bitfield.h>
40#include <linux/bitops.h>
41#include <linux/cpuhotplug.h>
42#include <linux/cpumask.h>
43#include <linux/device.h>
44#include <linux/errno.h>
45#include <linux/interrupt.h>
46#include <linux/irq.h>
47#include <linux/kernel.h>
48#include <linux/list.h>
49#include <linux/msi.h>
50#include <linux/perf_event.h>
51#include <linux/platform_device.h>
52#include <linux/smp.h>
53#include <linux/sysfs.h>
54#include <linux/types.h>
55
56#define SMMU_PMCG_EVCNTR0 0x0
57#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
58#define SMMU_PMCG_EVTYPER0 0x400
59#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
60#define SMMU_PMCG_SID_SPAN_SHIFT 29
61#define SMMU_PMCG_SMR0 0xA00
62#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
63#define SMMU_PMCG_CNTENSET0 0xC00
64#define SMMU_PMCG_CNTENCLR0 0xC20
65#define SMMU_PMCG_INTENSET0 0xC40
66#define SMMU_PMCG_INTENCLR0 0xC60
67#define SMMU_PMCG_OVSCLR0 0xC80
68#define SMMU_PMCG_OVSSET0 0xCC0
69#define SMMU_PMCG_CFGR 0xE00
70#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
f202cdab 71#define SMMU_PMCG_CFGR_MSI BIT(21)
7d839b4b
NL
72#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
73#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
74#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
75#define SMMU_PMCG_CR 0xE04
76#define SMMU_PMCG_CR_ENABLE BIT(0)
77#define SMMU_PMCG_CEID0 0xE20
78#define SMMU_PMCG_CEID1 0xE28
79#define SMMU_PMCG_IRQ_CTRL 0xE50
80#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
81#define SMMU_PMCG_IRQ_CFG0 0xE58
f202cdab
SK
82#define SMMU_PMCG_IRQ_CFG1 0xE60
83#define SMMU_PMCG_IRQ_CFG2 0xE64
84
85/* MSI config fields */
86#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
87#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
7d839b4b
NL
88
89#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
90#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
91
92#define SMMU_PMCG_MAX_COUNTERS 64
93#define SMMU_PMCG_ARCH_MAX_EVENTS 128
94
95#define SMMU_PMCG_PA_SHIFT 12
96
24062fe8
SK
97#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
98
7d839b4b
NL
99static int cpuhp_state_num;
100
101struct smmu_pmu {
102 struct hlist_node node;
103 struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
104 DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
105 DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
106 unsigned int irq;
107 unsigned int on_cpu;
108 struct pmu pmu;
109 unsigned int num_counters;
110 struct device *dev;
111 void __iomem *reg_base;
112 void __iomem *reloc_base;
113 u64 counter_mask;
24062fe8 114 u32 options;
7d839b4b
NL
115 bool global_filter;
116 u32 global_filter_span;
117 u32 global_filter_sid;
118};
119
120#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
121
122#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
123 static inline u32 get_##_name(struct perf_event *event) \
124 { \
125 return FIELD_GET(GENMASK_ULL(_end, _start), \
126 event->attr._config); \
127 } \
128
129SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
130SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
131SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
132SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
133
134static inline void smmu_pmu_enable(struct pmu *pmu)
135{
136 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
137
138 writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
139 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
140 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
141}
142
143static inline void smmu_pmu_disable(struct pmu *pmu)
144{
145 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
146
147 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
148 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
149}
150
151static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
152 u32 idx, u64 value)
153{
154 if (smmu_pmu->counter_mask & BIT(32))
155 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
156 else
157 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
158}
159
160static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
161{
162 u64 value;
163
164 if (smmu_pmu->counter_mask & BIT(32))
165 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
166 else
167 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
168
169 return value;
170}
171
172static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
173{
174 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
175}
176
177static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
178{
179 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
180}
181
182static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
183{
184 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
185}
186
187static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
188 u32 idx)
189{
190 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
191}
192
193static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
194 u32 val)
195{
196 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
197}
198
199static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
200{
201 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
202}
203
204static void smmu_pmu_event_update(struct perf_event *event)
205{
206 struct hw_perf_event *hwc = &event->hw;
207 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
208 u64 delta, prev, now;
209 u32 idx = hwc->idx;
210
211 do {
212 prev = local64_read(&hwc->prev_count);
213 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
214 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
215
216 /* handle overflow. */
217 delta = now - prev;
218 delta &= smmu_pmu->counter_mask;
219
220 local64_add(delta, &event->count);
221}
222
223static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
224 struct hw_perf_event *hwc)
225{
226 u32 idx = hwc->idx;
227 u64 new;
228
24062fe8
SK
229 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
230 /*
231 * On platforms that require this quirk, if the counter starts
232 * at < half_counter value and wraps, the current logic of
233 * handling the overflow may not work. It is expected that,
234 * those platforms will have full 64 counter bits implemented
235 * so that such a possibility is remote(eg: HiSilicon HIP08).
236 */
237 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
238 } else {
239 /*
240 * We limit the max period to half the max counter value
241 * of the counter size, so that even in the case of extreme
242 * interrupt latency the counter will (hopefully) not wrap
243 * past its initial value.
244 */
245 new = smmu_pmu->counter_mask >> 1;
246 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
247 }
7d839b4b
NL
248
249 local64_set(&hwc->prev_count, new);
7d839b4b
NL
250}
251
252static void smmu_pmu_set_event_filter(struct perf_event *event,
253 int idx, u32 span, u32 sid)
254{
255 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
256 u32 evtyper;
257
258 evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
259 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
260 smmu_pmu_set_smr(smmu_pmu, idx, sid);
261}
262
263static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
264 struct perf_event *event, int idx)
265{
266 u32 span, sid;
267 unsigned int num_ctrs = smmu_pmu->num_counters;
268 bool filter_en = !!get_filter_enable(event);
269
270 span = filter_en ? get_filter_span(event) :
271 SMMU_PMCG_DEFAULT_FILTER_SPAN;
272 sid = filter_en ? get_filter_stream_id(event) :
273 SMMU_PMCG_DEFAULT_FILTER_SID;
274
275 /* Support individual filter settings */
276 if (!smmu_pmu->global_filter) {
277 smmu_pmu_set_event_filter(event, idx, span, sid);
278 return 0;
279 }
280
281 /* Requested settings same as current global settings*/
282 if (span == smmu_pmu->global_filter_span &&
283 sid == smmu_pmu->global_filter_sid)
284 return 0;
285
286 if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
287 return -EAGAIN;
288
289 smmu_pmu_set_event_filter(event, 0, span, sid);
290 smmu_pmu->global_filter_span = span;
291 smmu_pmu->global_filter_sid = sid;
292 return 0;
293}
294
295static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
296 struct perf_event *event)
297{
298 int idx, err;
299 unsigned int num_ctrs = smmu_pmu->num_counters;
300
301 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
302 if (idx == num_ctrs)
303 /* The counters are all in use. */
304 return -EAGAIN;
305
306 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
307 if (err)
308 return err;
309
310 set_bit(idx, smmu_pmu->used_counters);
311
312 return idx;
313}
314
315/*
316 * Implementation of abstract pmu functionality required by
317 * the core perf events code.
318 */
319
320static int smmu_pmu_event_init(struct perf_event *event)
321{
322 struct hw_perf_event *hwc = &event->hw;
323 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
324 struct device *dev = smmu_pmu->dev;
325 struct perf_event *sibling;
326 u16 event_id;
327
328 if (event->attr.type != event->pmu->type)
329 return -ENOENT;
330
331 if (hwc->sample_period) {
332 dev_dbg(dev, "Sampling not supported\n");
333 return -EOPNOTSUPP;
334 }
335
336 if (event->cpu < 0) {
337 dev_dbg(dev, "Per-task mode not supported\n");
338 return -EOPNOTSUPP;
339 }
340
341 /* Verify specified event is supported on this PMU */
342 event_id = get_event(event);
343 if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
344 (!test_bit(event_id, smmu_pmu->supported_events))) {
345 dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
346 return -EINVAL;
347 }
348
349 /* Don't allow groups with mixed PMUs, except for s/w events */
350 if (event->group_leader->pmu != event->pmu &&
351 !is_software_event(event->group_leader)) {
352 dev_dbg(dev, "Can't create mixed PMU group\n");
353 return -EINVAL;
354 }
355
356 for_each_sibling_event(sibling, event->group_leader) {
357 if (sibling->pmu != event->pmu &&
358 !is_software_event(sibling)) {
359 dev_dbg(dev, "Can't create mixed PMU group\n");
360 return -EINVAL;
361 }
362 }
363
364 hwc->idx = -1;
365
366 /*
367 * Ensure all events are on the same cpu so all events are in the
368 * same cpu context, to avoid races on pmu_enable etc.
369 */
370 event->cpu = smmu_pmu->on_cpu;
371
372 return 0;
373}
374
375static void smmu_pmu_event_start(struct perf_event *event, int flags)
376{
377 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
378 struct hw_perf_event *hwc = &event->hw;
379 int idx = hwc->idx;
380
381 hwc->state = 0;
382
383 smmu_pmu_set_period(smmu_pmu, hwc);
384
385 smmu_pmu_counter_enable(smmu_pmu, idx);
386}
387
388static void smmu_pmu_event_stop(struct perf_event *event, int flags)
389{
390 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
391 struct hw_perf_event *hwc = &event->hw;
392 int idx = hwc->idx;
393
394 if (hwc->state & PERF_HES_STOPPED)
395 return;
396
397 smmu_pmu_counter_disable(smmu_pmu, idx);
398 /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
399 smmu_pmu_event_update(event);
400 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
401}
402
403static int smmu_pmu_event_add(struct perf_event *event, int flags)
404{
405 struct hw_perf_event *hwc = &event->hw;
406 int idx;
407 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
408
409 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
410 if (idx < 0)
411 return idx;
412
413 hwc->idx = idx;
414 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
415 smmu_pmu->events[idx] = event;
416 local64_set(&hwc->prev_count, 0);
417
418 smmu_pmu_interrupt_enable(smmu_pmu, idx);
419
420 if (flags & PERF_EF_START)
421 smmu_pmu_event_start(event, flags);
422
423 /* Propagate changes to the userspace mapping. */
424 perf_event_update_userpage(event);
425
426 return 0;
427}
428
429static void smmu_pmu_event_del(struct perf_event *event, int flags)
430{
431 struct hw_perf_event *hwc = &event->hw;
432 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
433 int idx = hwc->idx;
434
435 smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
436 smmu_pmu_interrupt_disable(smmu_pmu, idx);
437 smmu_pmu->events[idx] = NULL;
438 clear_bit(idx, smmu_pmu->used_counters);
439
440 perf_event_update_userpage(event);
441}
442
443static void smmu_pmu_event_read(struct perf_event *event)
444{
445 smmu_pmu_event_update(event);
446}
447
448/* cpumask */
449
450static ssize_t smmu_pmu_cpumask_show(struct device *dev,
451 struct device_attribute *attr,
452 char *buf)
453{
454 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
455
456 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
457}
458
459static struct device_attribute smmu_pmu_cpumask_attr =
460 __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
461
462static struct attribute *smmu_pmu_cpumask_attrs[] = {
463 &smmu_pmu_cpumask_attr.attr,
464 NULL
465};
466
467static struct attribute_group smmu_pmu_cpumask_group = {
468 .attrs = smmu_pmu_cpumask_attrs,
469};
470
471/* Events */
472
473static ssize_t smmu_pmu_event_show(struct device *dev,
474 struct device_attribute *attr, char *page)
475{
476 struct perf_pmu_events_attr *pmu_attr;
477
478 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
479
480 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
481}
482
483#define SMMU_EVENT_ATTR(name, config) \
484 PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
485 config, smmu_pmu_event_show)
486SMMU_EVENT_ATTR(cycles, 0);
487SMMU_EVENT_ATTR(transaction, 1);
488SMMU_EVENT_ATTR(tlb_miss, 2);
489SMMU_EVENT_ATTR(config_cache_miss, 3);
490SMMU_EVENT_ATTR(trans_table_walk_access, 4);
491SMMU_EVENT_ATTR(config_struct_access, 5);
492SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
493SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
494
495static struct attribute *smmu_pmu_events[] = {
496 &smmu_event_attr_cycles.attr.attr,
497 &smmu_event_attr_transaction.attr.attr,
498 &smmu_event_attr_tlb_miss.attr.attr,
499 &smmu_event_attr_config_cache_miss.attr.attr,
500 &smmu_event_attr_trans_table_walk_access.attr.attr,
501 &smmu_event_attr_config_struct_access.attr.attr,
502 &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
503 &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
504 NULL
505};
506
507static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
508 struct attribute *attr, int unused)
509{
510 struct device *dev = kobj_to_dev(kobj);
511 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
512 struct perf_pmu_events_attr *pmu_attr;
513
514 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
515
516 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
517 return attr->mode;
518
519 return 0;
520}
521
522static struct attribute_group smmu_pmu_events_group = {
523 .name = "events",
524 .attrs = smmu_pmu_events,
525 .is_visible = smmu_pmu_event_is_visible,
526};
527
528/* Formats */
529PMU_FORMAT_ATTR(event, "config:0-15");
530PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
531PMU_FORMAT_ATTR(filter_span, "config1:32");
532PMU_FORMAT_ATTR(filter_enable, "config1:33");
533
534static struct attribute *smmu_pmu_formats[] = {
535 &format_attr_event.attr,
536 &format_attr_filter_stream_id.attr,
537 &format_attr_filter_span.attr,
538 &format_attr_filter_enable.attr,
539 NULL
540};
541
542static struct attribute_group smmu_pmu_format_group = {
543 .name = "format",
544 .attrs = smmu_pmu_formats,
545};
546
547static const struct attribute_group *smmu_pmu_attr_grps[] = {
548 &smmu_pmu_cpumask_group,
549 &smmu_pmu_events_group,
550 &smmu_pmu_format_group,
551 NULL
552};
553
554/*
555 * Generic device handlers
556 */
557
558static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
559{
560 struct smmu_pmu *smmu_pmu;
561 unsigned int target;
562
563 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
564 if (cpu != smmu_pmu->on_cpu)
565 return 0;
566
567 target = cpumask_any_but(cpu_online_mask, cpu);
568 if (target >= nr_cpu_ids)
569 return 0;
570
571 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
572 smmu_pmu->on_cpu = target;
573 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
574
575 return 0;
576}
577
578static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
579{
580 struct smmu_pmu *smmu_pmu = data;
581 u64 ovsr;
582 unsigned int idx;
583
584 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
585 if (!ovsr)
586 return IRQ_NONE;
587
588 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
589
590 for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
591 struct perf_event *event = smmu_pmu->events[idx];
592 struct hw_perf_event *hwc;
593
594 if (WARN_ON_ONCE(!event))
595 continue;
596
597 smmu_pmu_event_update(event);
598 hwc = &event->hw;
599
600 smmu_pmu_set_period(smmu_pmu, hwc);
601 }
602
603 return IRQ_HANDLED;
604}
605
f202cdab
SK
606static void smmu_pmu_free_msis(void *data)
607{
608 struct device *dev = data;
609
610 platform_msi_domain_free_irqs(dev);
611}
612
613static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
614{
615 phys_addr_t doorbell;
616 struct device *dev = msi_desc_to_dev(desc);
617 struct smmu_pmu *pmu = dev_get_drvdata(dev);
618
619 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
620 doorbell &= MSI_CFG0_ADDR_MASK;
621
622 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
623 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
624 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
625 pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
626}
627
628static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
629{
630 struct msi_desc *desc;
631 struct device *dev = pmu->dev;
632 int ret;
633
634 /* Clear MSI address reg */
635 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
636
637 /* MSI supported or not */
638 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
639 return;
640
641 ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
642 if (ret) {
643 dev_warn(dev, "failed to allocate MSIs\n");
644 return;
645 }
646
647 desc = first_msi_entry(dev);
648 if (desc)
649 pmu->irq = desc->irq;
650
651 /* Add callback to free MSIs on teardown */
652 devm_add_action(dev, smmu_pmu_free_msis, dev);
653}
654
7d839b4b
NL
655static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
656{
657 unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
658 int irq, ret = -ENXIO;
659
f202cdab
SK
660 smmu_pmu_setup_msi(pmu);
661
7d839b4b
NL
662 irq = pmu->irq;
663 if (irq)
664 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
665 flags, "smmuv3-pmu", pmu);
666 return ret;
667}
668
669static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
670{
671 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
672
673 smmu_pmu_disable(&smmu_pmu->pmu);
674
675 /* Disable counter and interrupt */
676 writeq_relaxed(counter_present_mask,
677 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
678 writeq_relaxed(counter_present_mask,
679 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
680 writeq_relaxed(counter_present_mask,
681 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
682}
683
24062fe8
SK
684static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
685{
686 u32 model;
687
688 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
689
690 switch (model) {
691 case IORT_SMMU_V3_PMCG_HISI_HIP08:
692 /* HiSilicon Erratum 162001800 */
693 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
694 break;
695 }
696
697 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
698}
699
7d839b4b
NL
700static int smmu_pmu_probe(struct platform_device *pdev)
701{
702 struct smmu_pmu *smmu_pmu;
703 struct resource *res_0, *res_1;
704 u32 cfgr, reg_size;
705 u64 ceid_64[2];
706 int irq, err;
707 char *name;
708 struct device *dev = &pdev->dev;
709
710 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
711 if (!smmu_pmu)
712 return -ENOMEM;
713
714 smmu_pmu->dev = dev;
715 platform_set_drvdata(pdev, smmu_pmu);
716
717 smmu_pmu->pmu = (struct pmu) {
718 .task_ctx_nr = perf_invalid_context,
719 .pmu_enable = smmu_pmu_enable,
720 .pmu_disable = smmu_pmu_disable,
721 .event_init = smmu_pmu_event_init,
722 .add = smmu_pmu_event_add,
723 .del = smmu_pmu_event_del,
724 .start = smmu_pmu_event_start,
725 .stop = smmu_pmu_event_stop,
726 .read = smmu_pmu_event_read,
727 .attr_groups = smmu_pmu_attr_grps,
728 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
729 };
730
731 res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
732 smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
733 if (IS_ERR(smmu_pmu->reg_base))
734 return PTR_ERR(smmu_pmu->reg_base);
735
736 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
737
738 /* Determine if page 1 is present */
739 if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
740 res_1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
741 smmu_pmu->reloc_base = devm_ioremap_resource(dev, res_1);
742 if (IS_ERR(smmu_pmu->reloc_base))
743 return PTR_ERR(smmu_pmu->reloc_base);
744 } else {
745 smmu_pmu->reloc_base = smmu_pmu->reg_base;
746 }
747
748 irq = platform_get_irq(pdev, 0);
749 if (irq > 0)
750 smmu_pmu->irq = irq;
751
752 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
753 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
754 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
755 SMMU_PMCG_ARCH_MAX_EVENTS);
756
757 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
758
759 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
760
761 reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
762 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
763
764 smmu_pmu_reset(smmu_pmu);
765
766 err = smmu_pmu_setup_irq(smmu_pmu);
767 if (err) {
768 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
769 return err;
770 }
771
772 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
773 (res_0->start) >> SMMU_PMCG_PA_SHIFT);
774 if (!name) {
775 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
776 return -EINVAL;
777 }
778
24062fe8
SK
779 smmu_pmu_get_acpi_options(smmu_pmu);
780
7d839b4b
NL
781 /* Pick one CPU to be the preferred one to use */
782 smmu_pmu->on_cpu = raw_smp_processor_id();
783 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
784 cpumask_of(smmu_pmu->on_cpu)));
785
786 err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
787 &smmu_pmu->node);
788 if (err) {
789 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
790 err, &res_0->start);
791 goto out_cpuhp_err;
792 }
793
794 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
795 if (err) {
796 dev_err(dev, "Error %d registering PMU @%pa\n",
797 err, &res_0->start);
798 goto out_unregister;
799 }
800
801 dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
802 &res_0->start, smmu_pmu->num_counters,
803 smmu_pmu->global_filter ? "Global(Counter0)" :
804 "Individual");
805
806 return 0;
807
808out_unregister:
809 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
810out_cpuhp_err:
811 put_cpu();
812 return err;
813}
814
815static int smmu_pmu_remove(struct platform_device *pdev)
816{
817 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
818
819 perf_pmu_unregister(&smmu_pmu->pmu);
820 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
821
822 return 0;
823}
824
825static void smmu_pmu_shutdown(struct platform_device *pdev)
826{
827 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
828
829 smmu_pmu_disable(&smmu_pmu->pmu);
830}
831
832static struct platform_driver smmu_pmu_driver = {
833 .driver = {
834 .name = "arm-smmu-v3-pmcg",
835 },
836 .probe = smmu_pmu_probe,
837 .remove = smmu_pmu_remove,
838 .shutdown = smmu_pmu_shutdown,
839};
840
841static int __init arm_smmu_pmu_init(void)
842{
843 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
844 "perf/arm/pmcg:online",
845 NULL,
846 smmu_pmu_offline_cpu);
847 if (cpuhp_state_num < 0)
848 return cpuhp_state_num;
849
850 return platform_driver_register(&smmu_pmu_driver);
851}
852module_init(arm_smmu_pmu_init);
853
854static void __exit arm_smmu_pmu_exit(void)
855{
856 platform_driver_unregister(&smmu_pmu_driver);
857 cpuhp_remove_multi_state(cpuhp_state_num);
858}
859
860module_exit(arm_smmu_pmu_exit);
861
862MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
863MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
864MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
865MODULE_LICENSE("GPL v2");