]>
Commit | Line | Data |
---|---|---|
76cde263 HM |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * Copyright The Asahi Linux Contributors | |
4 | * | |
5 | * Based on irq-lpc32xx: | |
6 | * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com> | |
7 | * Based on irq-bcm2836: | |
8 | * Copyright 2015 Broadcom | |
9 | */ | |
10 | ||
11 | /* | |
12 | * AIC is a fairly simple interrupt controller with the following features: | |
13 | * | |
14 | * - 896 level-triggered hardware IRQs | |
15 | * - Single mask bit per IRQ | |
16 | * - Per-IRQ affinity setting | |
17 | * - Automatic masking on event delivery (auto-ack) | |
18 | * - Software triggering (ORed with hw line) | |
19 | * - 2 per-CPU IPIs (meant as "self" and "other", but they are | |
20 | * interchangeable if not symmetric) | |
21 | * - Automatic prioritization (single event/ack register per CPU, lower IRQs = | |
22 | * higher priority) | |
23 | * - Automatic masking on ack | |
24 | * - Default "this CPU" register view and explicit per-CPU views | |
25 | * | |
26 | * In addition, this driver also handles FIQs, as these are routed to the same | |
27 | * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and | |
28 | * performance counters (TODO). | |
29 | * | |
30 | * Implementation notes: | |
31 | * | |
32 | * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs, | |
33 | * and one for IPIs. | |
34 | * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller | |
35 | * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused). | |
36 | * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu. | |
37 | * - DT bindings use 3-cell form (like GIC): | |
38 | * - <0 nr flags> - hwirq #nr | |
39 | * - <1 nr flags> - FIQ #nr | |
40 | * - nr=0 Physical HV timer | |
41 | * - nr=1 Virtual HV timer | |
42 | * - nr=2 Physical guest timer | |
43 | * - nr=3 Virtual guest timer | |
44 | */ | |
45 | ||
46 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
47 | ||
48 | #include <linux/bits.h> | |
49 | #include <linux/bitfield.h> | |
50 | #include <linux/cpuhotplug.h> | |
51 | #include <linux/io.h> | |
52 | #include <linux/irqchip.h> | |
53 | #include <linux/irqdomain.h> | |
54 | #include <linux/limits.h> | |
55 | #include <linux/of_address.h> | |
56 | #include <linux/slab.h> | |
57 | #include <asm/exception.h> | |
58 | #include <asm/sysreg.h> | |
59 | #include <asm/virt.h> | |
60 | ||
61 | #include <dt-bindings/interrupt-controller/apple-aic.h> | |
62 | ||
63 | /* | |
64 | * AIC registers (MMIO) | |
65 | */ | |
66 | ||
67 | #define AIC_INFO 0x0004 | |
68 | #define AIC_INFO_NR_HW GENMASK(15, 0) | |
69 | ||
70 | #define AIC_CONFIG 0x0010 | |
71 | ||
72 | #define AIC_WHOAMI 0x2000 | |
73 | #define AIC_EVENT 0x2004 | |
74 | #define AIC_EVENT_TYPE GENMASK(31, 16) | |
75 | #define AIC_EVENT_NUM GENMASK(15, 0) | |
76 | ||
77 | #define AIC_EVENT_TYPE_HW 1 | |
78 | #define AIC_EVENT_TYPE_IPI 4 | |
79 | #define AIC_EVENT_IPI_OTHER 1 | |
80 | #define AIC_EVENT_IPI_SELF 2 | |
81 | ||
82 | #define AIC_IPI_SEND 0x2008 | |
83 | #define AIC_IPI_ACK 0x200c | |
84 | #define AIC_IPI_MASK_SET 0x2024 | |
85 | #define AIC_IPI_MASK_CLR 0x2028 | |
86 | ||
87 | #define AIC_IPI_SEND_CPU(cpu) BIT(cpu) | |
88 | ||
89 | #define AIC_IPI_OTHER BIT(0) | |
90 | #define AIC_IPI_SELF BIT(31) | |
91 | ||
92 | #define AIC_TARGET_CPU 0x3000 | |
93 | #define AIC_SW_SET 0x4000 | |
94 | #define AIC_SW_CLR 0x4080 | |
95 | #define AIC_MASK_SET 0x4100 | |
96 | #define AIC_MASK_CLR 0x4180 | |
97 | ||
98 | #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7)) | |
99 | #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7)) | |
100 | #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7)) | |
101 | #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7)) | |
102 | ||
103 | #define MASK_REG(x) (4 * ((x) >> 5)) | |
104 | #define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) | |
105 | ||
106 | /* | |
107 | * IMP-DEF sysregs that control FIQ sources | |
108 | * Note: sysreg-based IPIs are not supported yet. | |
109 | */ | |
110 | ||
111 | /* Core PMC control register */ | |
112 | #define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0) | |
113 | #define PMCR0_IMODE GENMASK(10, 8) | |
114 | #define PMCR0_IMODE_OFF 0 | |
115 | #define PMCR0_IMODE_PMI 1 | |
116 | #define PMCR0_IMODE_AIC 2 | |
117 | #define PMCR0_IMODE_HALT 3 | |
118 | #define PMCR0_IMODE_FIQ 4 | |
119 | #define PMCR0_IACT BIT(11) | |
120 | ||
121 | /* IPI request registers */ | |
122 | #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0) | |
123 | #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1) | |
124 | #define IPI_RR_CPU GENMASK(7, 0) | |
125 | /* Cluster only used for the GLOBAL register */ | |
126 | #define IPI_RR_CLUSTER GENMASK(23, 16) | |
127 | #define IPI_RR_TYPE GENMASK(29, 28) | |
128 | #define IPI_RR_IMMEDIATE 0 | |
129 | #define IPI_RR_RETRACT 1 | |
130 | #define IPI_RR_DEFERRED 2 | |
131 | #define IPI_RR_NOWAKE 3 | |
132 | ||
133 | /* IPI status register */ | |
134 | #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1) | |
135 | #define IPI_SR_PENDING BIT(0) | |
136 | ||
137 | /* Guest timer FIQ enable register */ | |
138 | #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3) | |
139 | #define VM_TMR_FIQ_ENABLE_V BIT(0) | |
140 | #define VM_TMR_FIQ_ENABLE_P BIT(1) | |
141 | ||
142 | /* Deferred IPI countdown register */ | |
143 | #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1) | |
144 | ||
145 | /* Uncore PMC control register */ | |
146 | #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4) | |
147 | #define UPMCR0_IMODE GENMASK(18, 16) | |
148 | #define UPMCR0_IMODE_OFF 0 | |
149 | #define UPMCR0_IMODE_AIC 2 | |
150 | #define UPMCR0_IMODE_HALT 3 | |
151 | #define UPMCR0_IMODE_FIQ 4 | |
152 | ||
153 | /* Uncore PMC status register */ | |
154 | #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4) | |
155 | #define UPMSR_IACT BIT(0) | |
156 | ||
157 | #define AIC_NR_FIQ 4 | |
158 | #define AIC_NR_SWIPI 32 | |
159 | ||
160 | /* | |
161 | * FIQ hwirq index definitions: FIQ sources use the DT binding defines | |
162 | * directly, except that timers are special. At the irqchip level, the | |
163 | * two timer types are represented by their access method: _EL0 registers | |
164 | * or _EL02 registers. In the DT binding, the timers are represented | |
165 | * by their purpose (HV or guest). This mapping is for when the kernel is | |
166 | * running at EL2 (with VHE). When the kernel is running at EL1, the | |
167 | * mapping differs and aic_irq_domain_translate() performs the remapping. | |
168 | */ | |
169 | ||
170 | #define AIC_TMR_EL0_PHYS AIC_TMR_HV_PHYS | |
171 | #define AIC_TMR_EL0_VIRT AIC_TMR_HV_VIRT | |
172 | #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS | |
173 | #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT | |
174 | ||
175 | struct aic_irq_chip { | |
176 | void __iomem *base; | |
177 | struct irq_domain *hw_domain; | |
178 | struct irq_domain *ipi_domain; | |
179 | int nr_hw; | |
180 | int ipi_hwirq; | |
181 | }; | |
182 | ||
183 | static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); | |
184 | ||
185 | static DEFINE_PER_CPU(atomic_t, aic_vipi_flag); | |
186 | static DEFINE_PER_CPU(atomic_t, aic_vipi_enable); | |
187 | ||
188 | static struct aic_irq_chip *aic_irqc; | |
189 | ||
190 | static void aic_handle_ipi(struct pt_regs *regs); | |
191 | ||
192 | static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg) | |
193 | { | |
194 | return readl_relaxed(ic->base + reg); | |
195 | } | |
196 | ||
197 | static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val) | |
198 | { | |
199 | writel_relaxed(val, ic->base + reg); | |
200 | } | |
201 | ||
202 | /* | |
203 | * IRQ irqchip | |
204 | */ | |
205 | ||
206 | static void aic_irq_mask(struct irq_data *d) | |
207 | { | |
208 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); | |
209 | ||
210 | aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)), | |
211 | MASK_BIT(irqd_to_hwirq(d))); | |
212 | } | |
213 | ||
214 | static void aic_irq_unmask(struct irq_data *d) | |
215 | { | |
216 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); | |
217 | ||
218 | aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq), | |
219 | MASK_BIT(irqd_to_hwirq(d))); | |
220 | } | |
221 | ||
222 | static void aic_irq_eoi(struct irq_data *d) | |
223 | { | |
224 | /* | |
225 | * Reading the interrupt reason automatically acknowledges and masks | |
226 | * the IRQ, so we just unmask it here if needed. | |
227 | */ | |
228 | if (!irqd_irq_disabled(d) && !irqd_irq_masked(d)) | |
229 | aic_irq_unmask(d); | |
230 | } | |
231 | ||
232 | static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs) | |
233 | { | |
234 | struct aic_irq_chip *ic = aic_irqc; | |
235 | u32 event, type, irq; | |
236 | ||
237 | do { | |
238 | /* | |
239 | * We cannot use a relaxed read here, as reads from DMA buffers | |
240 | * need to be ordered after the IRQ fires. | |
241 | */ | |
242 | event = readl(ic->base + AIC_EVENT); | |
243 | type = FIELD_GET(AIC_EVENT_TYPE, event); | |
244 | irq = FIELD_GET(AIC_EVENT_NUM, event); | |
245 | ||
246 | if (type == AIC_EVENT_TYPE_HW) | |
247 | handle_domain_irq(aic_irqc->hw_domain, irq, regs); | |
248 | else if (type == AIC_EVENT_TYPE_IPI && irq == 1) | |
249 | aic_handle_ipi(regs); | |
250 | else if (event != 0) | |
251 | pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq); | |
252 | } while (event); | |
253 | ||
254 | /* | |
255 | * vGIC maintenance interrupts end up here too, so we need to check | |
256 | * for them separately. This should never trigger if KVM is working | |
257 | * properly, because it will have already taken care of clearing it | |
258 | * on guest exit before this handler runs. | |
259 | */ | |
260 | if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) && | |
261 | read_sysreg_s(SYS_ICH_MISR_EL2) != 0) { | |
262 | pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n"); | |
263 | sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0); | |
264 | } | |
265 | } | |
266 | ||
267 | static int aic_irq_set_affinity(struct irq_data *d, | |
268 | const struct cpumask *mask_val, bool force) | |
269 | { | |
270 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | |
271 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); | |
272 | int cpu; | |
273 | ||
274 | if (force) | |
275 | cpu = cpumask_first(mask_val); | |
276 | else | |
277 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | |
278 | ||
279 | aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu)); | |
280 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | |
281 | ||
282 | return IRQ_SET_MASK_OK; | |
283 | } | |
284 | ||
285 | static int aic_irq_set_type(struct irq_data *d, unsigned int type) | |
286 | { | |
287 | /* | |
288 | * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't | |
289 | * have a way to find out the type of any given IRQ, so just allow both. | |
290 | */ | |
291 | return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL; | |
292 | } | |
293 | ||
294 | static struct irq_chip aic_chip = { | |
295 | .name = "AIC", | |
296 | .irq_mask = aic_irq_mask, | |
297 | .irq_unmask = aic_irq_unmask, | |
298 | .irq_eoi = aic_irq_eoi, | |
299 | .irq_set_affinity = aic_irq_set_affinity, | |
300 | .irq_set_type = aic_irq_set_type, | |
301 | }; | |
302 | ||
303 | /* | |
304 | * FIQ irqchip | |
305 | */ | |
306 | ||
307 | static unsigned long aic_fiq_get_idx(struct irq_data *d) | |
308 | { | |
309 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); | |
310 | ||
311 | return irqd_to_hwirq(d) - ic->nr_hw; | |
312 | } | |
313 | ||
314 | static void aic_fiq_set_mask(struct irq_data *d) | |
315 | { | |
316 | /* Only the guest timers have real mask bits, unfortunately. */ | |
317 | switch (aic_fiq_get_idx(d)) { | |
318 | case AIC_TMR_EL02_PHYS: | |
319 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0); | |
320 | isb(); | |
321 | break; | |
322 | case AIC_TMR_EL02_VIRT: | |
323 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0); | |
324 | isb(); | |
325 | break; | |
326 | default: | |
327 | break; | |
328 | } | |
329 | } | |
330 | ||
331 | static void aic_fiq_clear_mask(struct irq_data *d) | |
332 | { | |
333 | switch (aic_fiq_get_idx(d)) { | |
334 | case AIC_TMR_EL02_PHYS: | |
335 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P); | |
336 | isb(); | |
337 | break; | |
338 | case AIC_TMR_EL02_VIRT: | |
339 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V); | |
340 | isb(); | |
341 | break; | |
342 | default: | |
343 | break; | |
344 | } | |
345 | } | |
346 | ||
347 | static void aic_fiq_mask(struct irq_data *d) | |
348 | { | |
349 | aic_fiq_set_mask(d); | |
350 | __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d))); | |
351 | } | |
352 | ||
353 | static void aic_fiq_unmask(struct irq_data *d) | |
354 | { | |
355 | aic_fiq_clear_mask(d); | |
356 | __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d))); | |
357 | } | |
358 | ||
359 | static void aic_fiq_eoi(struct irq_data *d) | |
360 | { | |
361 | /* We mask to ack (where we can), so we need to unmask at EOI. */ | |
362 | if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d))) | |
363 | aic_fiq_clear_mask(d); | |
364 | } | |
365 | ||
366 | #define TIMER_FIRING(x) \ | |
367 | (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \ | |
368 | ARCH_TIMER_CTRL_IT_STAT)) == \ | |
369 | (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT)) | |
370 | ||
371 | static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) | |
372 | { | |
373 | /* | |
374 | * It would be really nice if we had a system register that lets us get | |
375 | * the FIQ source state without having to peek down into sources... | |
376 | * but such a register does not seem to exist. | |
377 | * | |
378 | * So, we have these potential sources to test for: | |
379 | * - Fast IPIs (not yet used) | |
380 | * - The 4 timers (CNTP, CNTV for each of HV and guest) | |
381 | * - Per-core PMCs (not yet supported) | |
382 | * - Per-cluster uncore PMCs (not yet supported) | |
383 | * | |
384 | * Since not dealing with any of these results in a FIQ storm, | |
385 | * we check for everything here, even things we don't support yet. | |
386 | */ | |
387 | ||
388 | if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) { | |
389 | pr_err_ratelimited("Fast IPI fired. Acking.\n"); | |
390 | write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); | |
391 | } | |
392 | ||
393 | if (TIMER_FIRING(read_sysreg(cntp_ctl_el0))) | |
394 | handle_domain_irq(aic_irqc->hw_domain, | |
395 | aic_irqc->nr_hw + AIC_TMR_EL0_PHYS, regs); | |
396 | ||
397 | if (TIMER_FIRING(read_sysreg(cntv_ctl_el0))) | |
398 | handle_domain_irq(aic_irqc->hw_domain, | |
399 | aic_irqc->nr_hw + AIC_TMR_EL0_VIRT, regs); | |
400 | ||
401 | if (is_kernel_in_hyp_mode()) { | |
402 | uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2); | |
403 | ||
404 | if ((enabled & VM_TMR_FIQ_ENABLE_P) && | |
405 | TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02))) | |
406 | handle_domain_irq(aic_irqc->hw_domain, | |
407 | aic_irqc->nr_hw + AIC_TMR_EL02_PHYS, regs); | |
408 | ||
409 | if ((enabled & VM_TMR_FIQ_ENABLE_V) && | |
410 | TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02))) | |
411 | handle_domain_irq(aic_irqc->hw_domain, | |
412 | aic_irqc->nr_hw + AIC_TMR_EL02_VIRT, regs); | |
413 | } | |
414 | ||
415 | if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) == | |
416 | (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) { | |
417 | /* | |
418 | * Not supported yet, let's figure out how to handle this when | |
419 | * we implement these proprietary performance counters. For now, | |
420 | * just mask it and move on. | |
421 | */ | |
422 | pr_err_ratelimited("PMC FIQ fired. Masking.\n"); | |
423 | sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT, | |
424 | FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); | |
425 | } | |
426 | ||
427 | if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ && | |
428 | (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) { | |
429 | /* Same story with uncore PMCs */ | |
430 | pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n"); | |
431 | sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, | |
432 | FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); | |
433 | } | |
434 | } | |
435 | ||
436 | static int aic_fiq_set_type(struct irq_data *d, unsigned int type) | |
437 | { | |
438 | return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; | |
439 | } | |
440 | ||
441 | static struct irq_chip fiq_chip = { | |
442 | .name = "AIC-FIQ", | |
443 | .irq_mask = aic_fiq_mask, | |
444 | .irq_unmask = aic_fiq_unmask, | |
445 | .irq_ack = aic_fiq_set_mask, | |
446 | .irq_eoi = aic_fiq_eoi, | |
447 | .irq_set_type = aic_fiq_set_type, | |
448 | }; | |
449 | ||
450 | /* | |
451 | * Main IRQ domain | |
452 | */ | |
453 | ||
454 | static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq, | |
455 | irq_hw_number_t hw) | |
456 | { | |
457 | struct aic_irq_chip *ic = id->host_data; | |
458 | ||
459 | if (hw < ic->nr_hw) { | |
460 | irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data, | |
461 | handle_fasteoi_irq, NULL, NULL); | |
462 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); | |
463 | } else { | |
464 | irq_set_percpu_devid(irq); | |
465 | irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data, | |
466 | handle_percpu_devid_irq, NULL, NULL); | |
467 | } | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static int aic_irq_domain_translate(struct irq_domain *id, | |
473 | struct irq_fwspec *fwspec, | |
474 | unsigned long *hwirq, | |
475 | unsigned int *type) | |
476 | { | |
477 | struct aic_irq_chip *ic = id->host_data; | |
478 | ||
479 | if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode)) | |
480 | return -EINVAL; | |
481 | ||
482 | switch (fwspec->param[0]) { | |
483 | case AIC_IRQ: | |
484 | if (fwspec->param[1] >= ic->nr_hw) | |
485 | return -EINVAL; | |
486 | *hwirq = fwspec->param[1]; | |
487 | break; | |
488 | case AIC_FIQ: | |
489 | if (fwspec->param[1] >= AIC_NR_FIQ) | |
490 | return -EINVAL; | |
491 | *hwirq = ic->nr_hw + fwspec->param[1]; | |
492 | ||
493 | /* | |
494 | * In EL1 the non-redirected registers are the guest's, | |
495 | * not EL2's, so remap the hwirqs to match. | |
496 | */ | |
497 | if (!is_kernel_in_hyp_mode()) { | |
498 | switch (fwspec->param[1]) { | |
499 | case AIC_TMR_GUEST_PHYS: | |
500 | *hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS; | |
501 | break; | |
502 | case AIC_TMR_GUEST_VIRT: | |
503 | *hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT; | |
504 | break; | |
505 | case AIC_TMR_HV_PHYS: | |
506 | case AIC_TMR_HV_VIRT: | |
507 | return -ENOENT; | |
508 | default: | |
509 | break; | |
510 | } | |
511 | } | |
512 | break; | |
513 | default: | |
514 | return -EINVAL; | |
515 | } | |
516 | ||
517 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |
523 | unsigned int nr_irqs, void *arg) | |
524 | { | |
525 | unsigned int type = IRQ_TYPE_NONE; | |
526 | struct irq_fwspec *fwspec = arg; | |
527 | irq_hw_number_t hwirq; | |
528 | int i, ret; | |
529 | ||
530 | ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type); | |
531 | if (ret) | |
532 | return ret; | |
533 | ||
534 | for (i = 0; i < nr_irqs; i++) { | |
535 | ret = aic_irq_domain_map(domain, virq + i, hwirq + i); | |
536 | if (ret) | |
537 | return ret; | |
538 | } | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |
544 | unsigned int nr_irqs) | |
545 | { | |
546 | int i; | |
547 | ||
548 | for (i = 0; i < nr_irqs; i++) { | |
549 | struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); | |
550 | ||
551 | irq_set_handler(virq + i, NULL); | |
552 | irq_domain_reset_irq_data(d); | |
553 | } | |
554 | } | |
555 | ||
556 | static const struct irq_domain_ops aic_irq_domain_ops = { | |
557 | .translate = aic_irq_domain_translate, | |
558 | .alloc = aic_irq_domain_alloc, | |
559 | .free = aic_irq_domain_free, | |
560 | }; | |
561 | ||
562 | /* | |
563 | * IPI irqchip | |
564 | */ | |
565 | ||
566 | static void aic_ipi_mask(struct irq_data *d) | |
567 | { | |
568 | u32 irq_bit = BIT(irqd_to_hwirq(d)); | |
569 | ||
570 | /* No specific ordering requirements needed here. */ | |
571 | atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable)); | |
572 | } | |
573 | ||
574 | static void aic_ipi_unmask(struct irq_data *d) | |
575 | { | |
576 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); | |
577 | u32 irq_bit = BIT(irqd_to_hwirq(d)); | |
578 | ||
579 | atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable)); | |
580 | ||
581 | /* | |
582 | * The atomic_or() above must complete before the atomic_read() | |
583 | * below to avoid racing aic_ipi_send_mask(). | |
584 | */ | |
585 | smp_mb__after_atomic(); | |
586 | ||
587 | /* | |
588 | * If a pending vIPI was unmasked, raise a HW IPI to ourselves. | |
589 | * No barriers needed here since this is a self-IPI. | |
590 | */ | |
591 | if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) | |
592 | aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id())); | |
593 | } | |
594 | ||
595 | static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) | |
596 | { | |
597 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); | |
598 | u32 irq_bit = BIT(irqd_to_hwirq(d)); | |
599 | u32 send = 0; | |
600 | int cpu; | |
601 | unsigned long pending; | |
602 | ||
603 | for_each_cpu(cpu, mask) { | |
604 | /* | |
605 | * This sequence is the mirror of the one in aic_ipi_unmask(); | |
606 | * see the comment there. Additionally, release semantics | |
607 | * ensure that the vIPI flag set is ordered after any shared | |
608 | * memory accesses that precede it. This therefore also pairs | |
609 | * with the atomic_fetch_andnot in aic_handle_ipi(). | |
610 | */ | |
611 | pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu)); | |
612 | ||
613 | /* | |
614 | * The atomic_fetch_or_release() above must complete before the | |
615 | * atomic_read() below to avoid racing aic_ipi_unmask(). | |
616 | */ | |
617 | smp_mb__after_atomic(); | |
618 | ||
619 | if (!(pending & irq_bit) && | |
620 | (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) | |
621 | send |= AIC_IPI_SEND_CPU(cpu); | |
622 | } | |
623 | ||
624 | /* | |
625 | * The flag writes must complete before the physical IPI is issued | |
626 | * to another CPU. This is implied by the control dependency on | |
627 | * the result of atomic_read_acquire() above, which is itself | |
628 | * already ordered after the vIPI flag write. | |
629 | */ | |
630 | if (send) | |
631 | aic_ic_write(ic, AIC_IPI_SEND, send); | |
632 | } | |
633 | ||
634 | static struct irq_chip ipi_chip = { | |
635 | .name = "AIC-IPI", | |
636 | .irq_mask = aic_ipi_mask, | |
637 | .irq_unmask = aic_ipi_unmask, | |
638 | .ipi_send_mask = aic_ipi_send_mask, | |
639 | }; | |
640 | ||
641 | /* | |
642 | * IPI IRQ domain | |
643 | */ | |
644 | ||
645 | static void aic_handle_ipi(struct pt_regs *regs) | |
646 | { | |
647 | int i; | |
648 | unsigned long enabled, firing; | |
649 | ||
650 | /* | |
651 | * Ack the IPI. We need to order this after the AIC event read, but | |
652 | * that is enforced by normal MMIO ordering guarantees. | |
653 | */ | |
654 | aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); | |
655 | ||
656 | /* | |
657 | * The mask read does not need to be ordered. Only we can change | |
658 | * our own mask anyway, so no races are possible here, as long as | |
659 | * we are properly in the interrupt handler (which is covered by | |
660 | * the barrier that is part of the top-level AIC handler's readl()). | |
661 | */ | |
662 | enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable)); | |
663 | ||
664 | /* | |
665 | * Clear the IPIs we are about to handle. This pairs with the | |
666 | * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be | |
667 | * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and | |
668 | * before IPI handling code (to avoid races handling vIPIs before they | |
669 | * are signaled). The former is taken care of by the release semantics | |
670 | * of the write portion, while the latter is taken care of by the | |
671 | * acquire semantics of the read portion. | |
672 | */ | |
673 | firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled; | |
674 | ||
675 | for_each_set_bit(i, &firing, AIC_NR_SWIPI) | |
676 | handle_domain_irq(aic_irqc->ipi_domain, i, regs); | |
677 | ||
678 | /* | |
679 | * No ordering needed here; at worst this just changes the timing of | |
680 | * when the next IPI will be delivered. | |
681 | */ | |
682 | aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); | |
683 | } | |
684 | ||
685 | static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq, | |
686 | unsigned int nr_irqs, void *args) | |
687 | { | |
688 | int i; | |
689 | ||
690 | for (i = 0; i < nr_irqs; i++) { | |
691 | irq_set_percpu_devid(virq + i); | |
692 | irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data, | |
693 | handle_percpu_devid_irq, NULL, NULL); | |
694 | } | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
699 | static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) | |
700 | { | |
701 | /* Not freeing IPIs */ | |
702 | } | |
703 | ||
704 | static const struct irq_domain_ops aic_ipi_domain_ops = { | |
705 | .alloc = aic_ipi_alloc, | |
706 | .free = aic_ipi_free, | |
707 | }; | |
708 | ||
709 | static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) | |
710 | { | |
711 | struct irq_domain *ipi_domain; | |
712 | int base_ipi; | |
713 | ||
714 | ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI, | |
715 | &aic_ipi_domain_ops, irqc); | |
716 | if (WARN_ON(!ipi_domain)) | |
717 | return -ENODEV; | |
718 | ||
719 | ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE; | |
720 | irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); | |
721 | ||
722 | base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI, | |
723 | NUMA_NO_NODE, NULL, false, NULL); | |
724 | ||
725 | if (WARN_ON(!base_ipi)) { | |
726 | irq_domain_remove(ipi_domain); | |
727 | return -ENODEV; | |
728 | } | |
729 | ||
730 | set_smp_ipi_range(base_ipi, AIC_NR_SWIPI); | |
731 | ||
732 | irqc->ipi_domain = ipi_domain; | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | static int aic_init_cpu(unsigned int cpu) | |
738 | { | |
739 | /* Mask all hard-wired per-CPU IRQ/FIQ sources */ | |
740 | ||
741 | /* Pending Fast IPI FIQs */ | |
742 | write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); | |
743 | ||
744 | /* Timer FIQs */ | |
745 | sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); | |
746 | sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); | |
747 | ||
748 | /* EL2-only (VHE mode) IRQ sources */ | |
749 | if (is_kernel_in_hyp_mode()) { | |
750 | /* Guest timers */ | |
751 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, | |
752 | VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0); | |
753 | ||
754 | /* vGIC maintenance IRQ */ | |
755 | sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0); | |
756 | } | |
757 | ||
758 | /* PMC FIQ */ | |
759 | sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT, | |
760 | FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); | |
761 | ||
762 | /* Uncore PMC FIQ */ | |
763 | sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, | |
764 | FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); | |
765 | ||
766 | /* Commit all of the above */ | |
767 | isb(); | |
768 | ||
769 | /* | |
770 | * Make sure the kernel's idea of logical CPU order is the same as AIC's | |
771 | * If we ever end up with a mismatch here, we will have to introduce | |
772 | * a mapping table similar to what other irqchip drivers do. | |
773 | */ | |
774 | WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); | |
775 | ||
776 | /* | |
777 | * Always keep IPIs unmasked at the hardware level (except auto-masking | |
778 | * by AIC during processing). We manage masks at the vIPI level. | |
779 | */ | |
780 | aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); | |
781 | aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); | |
782 | aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); | |
783 | ||
784 | /* Initialize the local mask state */ | |
785 | __this_cpu_write(aic_fiq_unmasked, 0); | |
786 | ||
787 | return 0; | |
788 | } | |
789 | ||
790 | static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent) | |
791 | { | |
792 | int i; | |
793 | void __iomem *regs; | |
794 | u32 info; | |
795 | struct aic_irq_chip *irqc; | |
796 | ||
797 | regs = of_iomap(node, 0); | |
798 | if (WARN_ON(!regs)) | |
799 | return -EIO; | |
800 | ||
801 | irqc = kzalloc(sizeof(*irqc), GFP_KERNEL); | |
802 | if (!irqc) | |
803 | return -ENOMEM; | |
804 | ||
805 | aic_irqc = irqc; | |
806 | irqc->base = regs; | |
807 | ||
808 | info = aic_ic_read(irqc, AIC_INFO); | |
809 | irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info); | |
810 | ||
811 | irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node), | |
812 | irqc->nr_hw + AIC_NR_FIQ, | |
813 | &aic_irq_domain_ops, irqc); | |
814 | if (WARN_ON(!irqc->hw_domain)) { | |
815 | iounmap(irqc->base); | |
816 | kfree(irqc); | |
817 | return -ENODEV; | |
818 | } | |
819 | ||
820 | irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED); | |
821 | ||
822 | if (aic_init_smp(irqc, node)) { | |
823 | irq_domain_remove(irqc->hw_domain); | |
824 | iounmap(irqc->base); | |
825 | kfree(irqc); | |
826 | return -ENODEV; | |
827 | } | |
828 | ||
829 | set_handle_irq(aic_handle_irq); | |
830 | set_handle_fiq(aic_handle_fiq); | |
831 | ||
832 | for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++) | |
833 | aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX); | |
834 | for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++) | |
835 | aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX); | |
836 | for (i = 0; i < irqc->nr_hw; i++) | |
837 | aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1); | |
838 | ||
839 | if (!is_kernel_in_hyp_mode()) | |
840 | pr_info("Kernel running in EL1, mapping interrupts"); | |
841 | ||
842 | cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING, | |
843 | "irqchip/apple-aic/ipi:starting", | |
844 | aic_init_cpu, NULL); | |
845 | ||
846 | pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n", | |
847 | irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI); | |
848 | ||
849 | return 0; | |
850 | } | |
851 | ||
852 | IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init); |