]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/perf/arm_pmu_acpi.c
cxl: Enable PCI device IDs for future IBM CXL adapters
[mirror_ubuntu-zesty-kernel.git] / drivers / perf / arm_pmu_acpi.c
1 /*
2 * ACPI probing code for ARM performance counters.
3 *
4 * Copyright (C) 2017 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/cpumask.h>
13 #include <linux/init.h>
14 #include <linux/percpu.h>
15 #include <linux/perf/arm_pmu.h>
16
17 #include <asm/cputype.h>
18
19 static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
20 static DEFINE_PER_CPU(int, pmu_irqs);
21
22 static int arm_pmu_acpi_register_irq(int cpu)
23 {
24 struct acpi_madt_generic_interrupt *gicc;
25 int gsi, trigger;
26
27 gicc = acpi_cpu_get_madt_gicc(cpu);
28 if (WARN_ON(!gicc))
29 return -EINVAL;
30
31 gsi = gicc->performance_interrupt;
32 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
33 trigger = ACPI_EDGE_SENSITIVE;
34 else
35 trigger = ACPI_LEVEL_SENSITIVE;
36
37 /*
38 * Helpfully, the MADT GICC doesn't have a polarity flag for the
39 * "performance interrupt". Luckily, on compliant GICs the polarity is
40 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
41 * from SW.
42 *
43 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
44 * may not match the real polarity, but that should not matter.
45 *
46 * Other interrupt controllers are not supported with ACPI.
47 */
48 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
49 }
50
51 static void arm_pmu_acpi_unregister_irq(int cpu)
52 {
53 struct acpi_madt_generic_interrupt *gicc;
54 int gsi;
55
56 gicc = acpi_cpu_get_madt_gicc(cpu);
57 if (!gicc)
58 return;
59
60 gsi = gicc->performance_interrupt;
61 acpi_unregister_gsi(gsi);
62 }
63
64 static int arm_pmu_acpi_parse_irqs(void)
65 {
66 int irq, cpu, irq_cpu, err;
67
68 for_each_possible_cpu(cpu) {
69 irq = arm_pmu_acpi_register_irq(cpu);
70 if (irq < 0) {
71 err = irq;
72 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
73 cpu, err);
74 goto out_err;
75 } else if (irq == 0) {
76 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
77 }
78
79 per_cpu(pmu_irqs, cpu) = irq;
80 }
81
82 return 0;
83
84 out_err:
85 for_each_possible_cpu(cpu) {
86 irq = per_cpu(pmu_irqs, cpu);
87 if (!irq)
88 continue;
89
90 arm_pmu_acpi_unregister_irq(cpu);
91
92 /*
93 * Blat all copies of the IRQ so that we only unregister the
94 * corresponding GSI once (e.g. when we have PPIs).
95 */
96 for_each_possible_cpu(irq_cpu) {
97 if (per_cpu(pmu_irqs, irq_cpu) == irq)
98 per_cpu(pmu_irqs, irq_cpu) = 0;
99 }
100 }
101
102 return err;
103 }
104
105 static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
106 {
107 unsigned long cpuid = read_cpuid_id();
108 struct arm_pmu *pmu;
109 int cpu;
110
111 for_each_possible_cpu(cpu) {
112 pmu = per_cpu(probed_pmus, cpu);
113 if (!pmu || pmu->acpi_cpuid != cpuid)
114 continue;
115
116 return pmu;
117 }
118
119 pmu = armpmu_alloc();
120 if (!pmu) {
121 pr_warn("Unable to allocate PMU for CPU%d\n",
122 smp_processor_id());
123 return NULL;
124 }
125
126 pmu->acpi_cpuid = cpuid;
127
128 return pmu;
129 }
130
131 /*
132 * This must run before the common arm_pmu hotplug logic, so that we can
133 * associate a CPU and its interrupt before the common code tries to manage the
134 * affinity and so on.
135 *
136 * Note that hotplug events are serialized, so we cannot race with another CPU
137 * coming up. The perf core won't open events while a hotplug event is in
138 * progress.
139 */
140 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
141 {
142 struct arm_pmu *pmu;
143 struct pmu_hw_events __percpu *hw_events;
144 int irq;
145
146 /* If we've already probed this CPU, we have nothing to do */
147 if (per_cpu(probed_pmus, cpu))
148 return 0;
149
150 irq = per_cpu(pmu_irqs, cpu);
151
152 pmu = arm_pmu_acpi_find_alloc_pmu();
153 if (!pmu)
154 return -ENOMEM;
155
156 cpumask_set_cpu(cpu, &pmu->supported_cpus);
157
158 per_cpu(probed_pmus, cpu) = pmu;
159
160 /*
161 * Log and request the IRQ so the core arm_pmu code can manage it. In
162 * some situations (e.g. mismatched PPIs), we may fail to request the
163 * IRQ. However, it may be too late for us to do anything about it.
164 * The common ARM PMU code will log a warning in this case.
165 */
166 hw_events = pmu->hw_events;
167 per_cpu(hw_events->irq, cpu) = irq;
168 armpmu_request_irq(pmu, cpu);
169
170 /*
171 * Ideally, we'd probe the PMU here when we find the first matching
172 * CPU. We can't do that for several reasons; see the comment in
173 * arm_pmu_acpi_init().
174 *
175 * So for the time being, we're done.
176 */
177 return 0;
178 }
179
180 int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
181 {
182 int pmu_idx = 0;
183 int cpu, ret;
184
185 if (acpi_disabled)
186 return 0;
187
188 /*
189 * Initialise and register the set of PMUs which we know about right
190 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
191 * could handle late hotplug, but this may lead to deadlock since we
192 * might try to register a hotplug notifier instance from within a
193 * hotplug notifier.
194 *
195 * There's also the problem of having access to the right init_fn,
196 * without tying this too deeply into the "real" PMU driver.
197 *
198 * For the moment, as with the platform/DT case, we need at least one
199 * of a PMU's CPUs to be online at probe time.
200 */
201 for_each_possible_cpu(cpu) {
202 struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
203 char *base_name;
204
205 if (!pmu || pmu->name)
206 continue;
207
208 ret = init_fn(pmu);
209 if (ret == -ENODEV) {
210 /* PMU not handled by this driver, or not present */
211 continue;
212 } else if (ret) {
213 pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
214 return ret;
215 }
216
217 base_name = pmu->name;
218 pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
219 if (!pmu->name) {
220 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
221 return -ENOMEM;
222 }
223
224 ret = armpmu_register(pmu);
225 if (ret) {
226 pr_warn("Failed to register PMU for CPU%d\n", cpu);
227 return ret;
228 }
229 }
230
231 return 0;
232 }
233
234 static int arm_pmu_acpi_init(void)
235 {
236 int ret;
237
238 if (acpi_disabled)
239 return 0;
240
241 /*
242 * We can't request IRQs yet, since we don't know the cookie value
243 * until we know which CPUs share the same logical PMU. We'll handle
244 * that in arm_pmu_acpi_cpu_starting().
245 */
246 ret = arm_pmu_acpi_parse_irqs();
247 if (ret)
248 return ret;
249
250 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
251 "perf/arm/pmu_acpi:starting",
252 arm_pmu_acpi_cpu_starting, NULL);
253
254 return ret;
255 }
256 subsys_initcall(arm_pmu_acpi_init)