1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright The Asahi Linux Contributors
5 * Based on irq-lpc32xx:
6 * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
7 * Based on irq-bcm2836:
8 * Copyright 2015 Broadcom
12 * AIC is a fairly simple interrupt controller with the following features:
14 * - 896 level-triggered hardware IRQs
15 * - Single mask bit per IRQ
16 * - Per-IRQ affinity setting
17 * - Automatic masking on event delivery (auto-ack)
18 * - Software triggering (ORed with hw line)
19 * - 2 per-CPU IPIs (meant as "self" and "other", but they are
20 * interchangeable if not symmetric)
21 * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
23 * - Automatic masking on ack
24 * - Default "this CPU" register view and explicit per-CPU views
26 * In addition, this driver also handles FIQs, as these are routed to the same
27 * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
28 * performance counters (TODO).
30 * Implementation notes:
32 * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
34 * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
35 * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
36 * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
37 * - DT bindings use 3-cell form (like GIC):
38 * - <0 nr flags> - hwirq #nr
39 * - <1 nr flags> - FIQ #nr
40 * - nr=0 Physical HV timer
41 * - nr=1 Virtual HV timer
42 * - nr=2 Physical guest timer
43 * - nr=3 Virtual guest timer
46 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 #include <linux/bits.h>
49 #include <linux/bitfield.h>
50 #include <linux/cpuhotplug.h>
52 #include <linux/irqchip.h>
53 #include <linux/irqdomain.h>
54 #include <linux/limits.h>
55 #include <linux/of_address.h>
56 #include <linux/slab.h>
57 #include <asm/exception.h>
58 #include <asm/sysreg.h>
61 #include <dt-bindings/interrupt-controller/apple-aic.h>
64 * AIC registers (MMIO)
67 #define AIC_INFO 0x0004
68 #define AIC_INFO_NR_HW GENMASK(15, 0)
70 #define AIC_CONFIG 0x0010
72 #define AIC_WHOAMI 0x2000
73 #define AIC_EVENT 0x2004
74 #define AIC_EVENT_TYPE GENMASK(31, 16)
75 #define AIC_EVENT_NUM GENMASK(15, 0)
77 #define AIC_EVENT_TYPE_HW 1
78 #define AIC_EVENT_TYPE_IPI 4
79 #define AIC_EVENT_IPI_OTHER 1
80 #define AIC_EVENT_IPI_SELF 2
82 #define AIC_IPI_SEND 0x2008
83 #define AIC_IPI_ACK 0x200c
84 #define AIC_IPI_MASK_SET 0x2024
85 #define AIC_IPI_MASK_CLR 0x2028
87 #define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
89 #define AIC_IPI_OTHER BIT(0)
90 #define AIC_IPI_SELF BIT(31)
92 #define AIC_TARGET_CPU 0x3000
93 #define AIC_SW_SET 0x4000
94 #define AIC_SW_CLR 0x4080
95 #define AIC_MASK_SET 0x4100
96 #define AIC_MASK_CLR 0x4180
98 #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
99 #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
100 #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
101 #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
103 #define MASK_REG(x) (4 * ((x) >> 5))
104 #define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
107 * IMP-DEF sysregs that control FIQ sources
108 * Note: sysreg-based IPIs are not supported yet.
111 /* Core PMC control register */
112 #define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0)
113 #define PMCR0_IMODE GENMASK(10, 8)
114 #define PMCR0_IMODE_OFF 0
115 #define PMCR0_IMODE_PMI 1
116 #define PMCR0_IMODE_AIC 2
117 #define PMCR0_IMODE_HALT 3
118 #define PMCR0_IMODE_FIQ 4
119 #define PMCR0_IACT BIT(11)
121 /* IPI request registers */
122 #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
123 #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
124 #define IPI_RR_CPU GENMASK(7, 0)
125 /* Cluster only used for the GLOBAL register */
126 #define IPI_RR_CLUSTER GENMASK(23, 16)
127 #define IPI_RR_TYPE GENMASK(29, 28)
128 #define IPI_RR_IMMEDIATE 0
129 #define IPI_RR_RETRACT 1
130 #define IPI_RR_DEFERRED 2
131 #define IPI_RR_NOWAKE 3
133 /* IPI status register */
134 #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
135 #define IPI_SR_PENDING BIT(0)
137 /* Guest timer FIQ enable register */
138 #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
139 #define VM_TMR_FIQ_ENABLE_V BIT(0)
140 #define VM_TMR_FIQ_ENABLE_P BIT(1)
142 /* Deferred IPI countdown register */
143 #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
145 /* Uncore PMC control register */
146 #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
147 #define UPMCR0_IMODE GENMASK(18, 16)
148 #define UPMCR0_IMODE_OFF 0
149 #define UPMCR0_IMODE_AIC 2
150 #define UPMCR0_IMODE_HALT 3
151 #define UPMCR0_IMODE_FIQ 4
153 /* Uncore PMC status register */
154 #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
155 #define UPMSR_IACT BIT(0)
158 #define AIC_NR_SWIPI 32
161 * FIQ hwirq index definitions: FIQ sources use the DT binding defines
162 * directly, except that timers are special. At the irqchip level, the
163 * two timer types are represented by their access method: _EL0 registers
164 * or _EL02 registers. In the DT binding, the timers are represented
165 * by their purpose (HV or guest). This mapping is for when the kernel is
166 * running at EL2 (with VHE). When the kernel is running at EL1, the
167 * mapping differs and aic_irq_domain_translate() performs the remapping.
170 #define AIC_TMR_EL0_PHYS AIC_TMR_HV_PHYS
171 #define AIC_TMR_EL0_VIRT AIC_TMR_HV_VIRT
172 #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
173 #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
175 struct aic_irq_chip
{
177 struct irq_domain
*hw_domain
;
178 struct irq_domain
*ipi_domain
;
183 static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked
);
185 static DEFINE_PER_CPU(atomic_t
, aic_vipi_flag
);
186 static DEFINE_PER_CPU(atomic_t
, aic_vipi_enable
);
188 static struct aic_irq_chip
*aic_irqc
;
190 static void aic_handle_ipi(struct pt_regs
*regs
);
192 static u32
aic_ic_read(struct aic_irq_chip
*ic
, u32 reg
)
194 return readl_relaxed(ic
->base
+ reg
);
197 static void aic_ic_write(struct aic_irq_chip
*ic
, u32 reg
, u32 val
)
199 writel_relaxed(val
, ic
->base
+ reg
);
206 static void aic_irq_mask(struct irq_data
*d
)
208 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
210 aic_ic_write(ic
, AIC_MASK_SET
+ MASK_REG(irqd_to_hwirq(d
)),
211 MASK_BIT(irqd_to_hwirq(d
)));
214 static void aic_irq_unmask(struct irq_data
*d
)
216 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
218 aic_ic_write(ic
, AIC_MASK_CLR
+ MASK_REG(d
->hwirq
),
219 MASK_BIT(irqd_to_hwirq(d
)));
222 static void aic_irq_eoi(struct irq_data
*d
)
225 * Reading the interrupt reason automatically acknowledges and masks
226 * the IRQ, so we just unmask it here if needed.
228 if (!irqd_irq_disabled(d
) && !irqd_irq_masked(d
))
232 static void __exception_irq_entry
aic_handle_irq(struct pt_regs
*regs
)
234 struct aic_irq_chip
*ic
= aic_irqc
;
235 u32 event
, type
, irq
;
239 * We cannot use a relaxed read here, as reads from DMA buffers
240 * need to be ordered after the IRQ fires.
242 event
= readl(ic
->base
+ AIC_EVENT
);
243 type
= FIELD_GET(AIC_EVENT_TYPE
, event
);
244 irq
= FIELD_GET(AIC_EVENT_NUM
, event
);
246 if (type
== AIC_EVENT_TYPE_HW
)
247 handle_domain_irq(aic_irqc
->hw_domain
, irq
, regs
);
248 else if (type
== AIC_EVENT_TYPE_IPI
&& irq
== 1)
249 aic_handle_ipi(regs
);
251 pr_err_ratelimited("Unknown IRQ event %d, %d\n", type
, irq
);
255 * vGIC maintenance interrupts end up here too, so we need to check
256 * for them separately. This should never trigger if KVM is working
257 * properly, because it will have already taken care of clearing it
258 * on guest exit before this handler runs.
260 if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2
) & ICH_HCR_EN
) &&
261 read_sysreg_s(SYS_ICH_MISR_EL2
) != 0) {
262 pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
263 sysreg_clear_set_s(SYS_ICH_HCR_EL2
, ICH_HCR_EN
, 0);
267 static int aic_irq_set_affinity(struct irq_data
*d
,
268 const struct cpumask
*mask_val
, bool force
)
270 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
271 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
275 cpu
= cpumask_first(mask_val
);
277 cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
279 aic_ic_write(ic
, AIC_TARGET_CPU
+ hwirq
* 4, BIT(cpu
));
280 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
282 return IRQ_SET_MASK_OK
;
285 static int aic_irq_set_type(struct irq_data
*d
, unsigned int type
)
288 * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
289 * have a way to find out the type of any given IRQ, so just allow both.
291 return (type
== IRQ_TYPE_LEVEL_HIGH
|| type
== IRQ_TYPE_EDGE_RISING
) ? 0 : -EINVAL
;
294 static struct irq_chip aic_chip
= {
296 .irq_mask
= aic_irq_mask
,
297 .irq_unmask
= aic_irq_unmask
,
298 .irq_eoi
= aic_irq_eoi
,
299 .irq_set_affinity
= aic_irq_set_affinity
,
300 .irq_set_type
= aic_irq_set_type
,
307 static unsigned long aic_fiq_get_idx(struct irq_data
*d
)
309 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
311 return irqd_to_hwirq(d
) - ic
->nr_hw
;
314 static void aic_fiq_set_mask(struct irq_data
*d
)
316 /* Only the guest timers have real mask bits, unfortunately. */
317 switch (aic_fiq_get_idx(d
)) {
318 case AIC_TMR_EL02_PHYS
:
319 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, VM_TMR_FIQ_ENABLE_P
, 0);
322 case AIC_TMR_EL02_VIRT
:
323 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, VM_TMR_FIQ_ENABLE_V
, 0);
331 static void aic_fiq_clear_mask(struct irq_data
*d
)
333 switch (aic_fiq_get_idx(d
)) {
334 case AIC_TMR_EL02_PHYS
:
335 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, 0, VM_TMR_FIQ_ENABLE_P
);
338 case AIC_TMR_EL02_VIRT
:
339 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
, 0, VM_TMR_FIQ_ENABLE_V
);
347 static void aic_fiq_mask(struct irq_data
*d
)
350 __this_cpu_and(aic_fiq_unmasked
, ~BIT(aic_fiq_get_idx(d
)));
353 static void aic_fiq_unmask(struct irq_data
*d
)
355 aic_fiq_clear_mask(d
);
356 __this_cpu_or(aic_fiq_unmasked
, BIT(aic_fiq_get_idx(d
)));
359 static void aic_fiq_eoi(struct irq_data
*d
)
361 /* We mask to ack (where we can), so we need to unmask at EOI. */
362 if (__this_cpu_read(aic_fiq_unmasked
) & BIT(aic_fiq_get_idx(d
)))
363 aic_fiq_clear_mask(d
);
366 #define TIMER_FIRING(x) \
367 (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
368 ARCH_TIMER_CTRL_IT_STAT)) == \
369 (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
371 static void __exception_irq_entry
aic_handle_fiq(struct pt_regs
*regs
)
374 * It would be really nice if we had a system register that lets us get
375 * the FIQ source state without having to peek down into sources...
376 * but such a register does not seem to exist.
378 * So, we have these potential sources to test for:
379 * - Fast IPIs (not yet used)
380 * - The 4 timers (CNTP, CNTV for each of HV and guest)
381 * - Per-core PMCs (not yet supported)
382 * - Per-cluster uncore PMCs (not yet supported)
384 * Since not dealing with any of these results in a FIQ storm,
385 * we check for everything here, even things we don't support yet.
388 if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1
) & IPI_SR_PENDING
) {
389 pr_err_ratelimited("Fast IPI fired. Acking.\n");
390 write_sysreg_s(IPI_SR_PENDING
, SYS_IMP_APL_IPI_SR_EL1
);
393 if (TIMER_FIRING(read_sysreg(cntp_ctl_el0
)))
394 handle_domain_irq(aic_irqc
->hw_domain
,
395 aic_irqc
->nr_hw
+ AIC_TMR_EL0_PHYS
, regs
);
397 if (TIMER_FIRING(read_sysreg(cntv_ctl_el0
)))
398 handle_domain_irq(aic_irqc
->hw_domain
,
399 aic_irqc
->nr_hw
+ AIC_TMR_EL0_VIRT
, regs
);
401 if (is_kernel_in_hyp_mode()) {
402 uint64_t enabled
= read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
);
404 if ((enabled
& VM_TMR_FIQ_ENABLE_P
) &&
405 TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02
)))
406 handle_domain_irq(aic_irqc
->hw_domain
,
407 aic_irqc
->nr_hw
+ AIC_TMR_EL02_PHYS
, regs
);
409 if ((enabled
& VM_TMR_FIQ_ENABLE_V
) &&
410 TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02
)))
411 handle_domain_irq(aic_irqc
->hw_domain
,
412 aic_irqc
->nr_hw
+ AIC_TMR_EL02_VIRT
, regs
);
415 if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1
) & (PMCR0_IMODE
| PMCR0_IACT
)) ==
416 (FIELD_PREP(PMCR0_IMODE
, PMCR0_IMODE_FIQ
) | PMCR0_IACT
)) {
418 * Not supported yet, let's figure out how to handle this when
419 * we implement these proprietary performance counters. For now,
420 * just mask it and move on.
422 pr_err_ratelimited("PMC FIQ fired. Masking.\n");
423 sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1
, PMCR0_IMODE
| PMCR0_IACT
,
424 FIELD_PREP(PMCR0_IMODE
, PMCR0_IMODE_OFF
));
427 if (FIELD_GET(UPMCR0_IMODE
, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1
)) == UPMCR0_IMODE_FIQ
&&
428 (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1
) & UPMSR_IACT
)) {
429 /* Same story with uncore PMCs */
430 pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
431 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1
, UPMCR0_IMODE
,
432 FIELD_PREP(UPMCR0_IMODE
, UPMCR0_IMODE_OFF
));
436 static int aic_fiq_set_type(struct irq_data
*d
, unsigned int type
)
438 return (type
== IRQ_TYPE_LEVEL_HIGH
) ? 0 : -EINVAL
;
441 static struct irq_chip fiq_chip
= {
443 .irq_mask
= aic_fiq_mask
,
444 .irq_unmask
= aic_fiq_unmask
,
445 .irq_ack
= aic_fiq_set_mask
,
446 .irq_eoi
= aic_fiq_eoi
,
447 .irq_set_type
= aic_fiq_set_type
,
454 static int aic_irq_domain_map(struct irq_domain
*id
, unsigned int irq
,
457 struct aic_irq_chip
*ic
= id
->host_data
;
459 if (hw
< ic
->nr_hw
) {
460 irq_domain_set_info(id
, irq
, hw
, &aic_chip
, id
->host_data
,
461 handle_fasteoi_irq
, NULL
, NULL
);
462 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq
)));
464 irq_set_percpu_devid(irq
);
465 irq_domain_set_info(id
, irq
, hw
, &fiq_chip
, id
->host_data
,
466 handle_percpu_devid_irq
, NULL
, NULL
);
472 static int aic_irq_domain_translate(struct irq_domain
*id
,
473 struct irq_fwspec
*fwspec
,
474 unsigned long *hwirq
,
477 struct aic_irq_chip
*ic
= id
->host_data
;
479 if (fwspec
->param_count
!= 3 || !is_of_node(fwspec
->fwnode
))
482 switch (fwspec
->param
[0]) {
484 if (fwspec
->param
[1] >= ic
->nr_hw
)
486 *hwirq
= fwspec
->param
[1];
489 if (fwspec
->param
[1] >= AIC_NR_FIQ
)
491 *hwirq
= ic
->nr_hw
+ fwspec
->param
[1];
494 * In EL1 the non-redirected registers are the guest's,
495 * not EL2's, so remap the hwirqs to match.
497 if (!is_kernel_in_hyp_mode()) {
498 switch (fwspec
->param
[1]) {
499 case AIC_TMR_GUEST_PHYS
:
500 *hwirq
= ic
->nr_hw
+ AIC_TMR_EL0_PHYS
;
502 case AIC_TMR_GUEST_VIRT
:
503 *hwirq
= ic
->nr_hw
+ AIC_TMR_EL0_VIRT
;
505 case AIC_TMR_HV_PHYS
:
506 case AIC_TMR_HV_VIRT
:
517 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
522 static int aic_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
523 unsigned int nr_irqs
, void *arg
)
525 unsigned int type
= IRQ_TYPE_NONE
;
526 struct irq_fwspec
*fwspec
= arg
;
527 irq_hw_number_t hwirq
;
530 ret
= aic_irq_domain_translate(domain
, fwspec
, &hwirq
, &type
);
534 for (i
= 0; i
< nr_irqs
; i
++) {
535 ret
= aic_irq_domain_map(domain
, virq
+ i
, hwirq
+ i
);
543 static void aic_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
544 unsigned int nr_irqs
)
548 for (i
= 0; i
< nr_irqs
; i
++) {
549 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
551 irq_set_handler(virq
+ i
, NULL
);
552 irq_domain_reset_irq_data(d
);
556 static const struct irq_domain_ops aic_irq_domain_ops
= {
557 .translate
= aic_irq_domain_translate
,
558 .alloc
= aic_irq_domain_alloc
,
559 .free
= aic_irq_domain_free
,
566 static void aic_ipi_mask(struct irq_data
*d
)
568 u32 irq_bit
= BIT(irqd_to_hwirq(d
));
570 /* No specific ordering requirements needed here. */
571 atomic_andnot(irq_bit
, this_cpu_ptr(&aic_vipi_enable
));
574 static void aic_ipi_unmask(struct irq_data
*d
)
576 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
577 u32 irq_bit
= BIT(irqd_to_hwirq(d
));
579 atomic_or(irq_bit
, this_cpu_ptr(&aic_vipi_enable
));
582 * The atomic_or() above must complete before the atomic_read()
583 * below to avoid racing aic_ipi_send_mask().
585 smp_mb__after_atomic();
588 * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
589 * No barriers needed here since this is a self-IPI.
591 if (atomic_read(this_cpu_ptr(&aic_vipi_flag
)) & irq_bit
)
592 aic_ic_write(ic
, AIC_IPI_SEND
, AIC_IPI_SEND_CPU(smp_processor_id()));
595 static void aic_ipi_send_mask(struct irq_data
*d
, const struct cpumask
*mask
)
597 struct aic_irq_chip
*ic
= irq_data_get_irq_chip_data(d
);
598 u32 irq_bit
= BIT(irqd_to_hwirq(d
));
601 unsigned long pending
;
603 for_each_cpu(cpu
, mask
) {
605 * This sequence is the mirror of the one in aic_ipi_unmask();
606 * see the comment there. Additionally, release semantics
607 * ensure that the vIPI flag set is ordered after any shared
608 * memory accesses that precede it. This therefore also pairs
609 * with the atomic_fetch_andnot in aic_handle_ipi().
611 pending
= atomic_fetch_or_release(irq_bit
, per_cpu_ptr(&aic_vipi_flag
, cpu
));
614 * The atomic_fetch_or_release() above must complete before the
615 * atomic_read() below to avoid racing aic_ipi_unmask().
617 smp_mb__after_atomic();
619 if (!(pending
& irq_bit
) &&
620 (atomic_read(per_cpu_ptr(&aic_vipi_enable
, cpu
)) & irq_bit
))
621 send
|= AIC_IPI_SEND_CPU(cpu
);
625 * The flag writes must complete before the physical IPI is issued
626 * to another CPU. This is implied by the control dependency on
627 * the result of atomic_read_acquire() above, which is itself
628 * already ordered after the vIPI flag write.
631 aic_ic_write(ic
, AIC_IPI_SEND
, send
);
634 static struct irq_chip ipi_chip
= {
636 .irq_mask
= aic_ipi_mask
,
637 .irq_unmask
= aic_ipi_unmask
,
638 .ipi_send_mask
= aic_ipi_send_mask
,
645 static void aic_handle_ipi(struct pt_regs
*regs
)
648 unsigned long enabled
, firing
;
651 * Ack the IPI. We need to order this after the AIC event read, but
652 * that is enforced by normal MMIO ordering guarantees.
654 aic_ic_write(aic_irqc
, AIC_IPI_ACK
, AIC_IPI_OTHER
);
657 * The mask read does not need to be ordered. Only we can change
658 * our own mask anyway, so no races are possible here, as long as
659 * we are properly in the interrupt handler (which is covered by
660 * the barrier that is part of the top-level AIC handler's readl()).
662 enabled
= atomic_read(this_cpu_ptr(&aic_vipi_enable
));
665 * Clear the IPIs we are about to handle. This pairs with the
666 * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
667 * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
668 * before IPI handling code (to avoid races handling vIPIs before they
669 * are signaled). The former is taken care of by the release semantics
670 * of the write portion, while the latter is taken care of by the
671 * acquire semantics of the read portion.
673 firing
= atomic_fetch_andnot(enabled
, this_cpu_ptr(&aic_vipi_flag
)) & enabled
;
675 for_each_set_bit(i
, &firing
, AIC_NR_SWIPI
)
676 handle_domain_irq(aic_irqc
->ipi_domain
, i
, regs
);
679 * No ordering needed here; at worst this just changes the timing of
680 * when the next IPI will be delivered.
682 aic_ic_write(aic_irqc
, AIC_IPI_MASK_CLR
, AIC_IPI_OTHER
);
685 static int aic_ipi_alloc(struct irq_domain
*d
, unsigned int virq
,
686 unsigned int nr_irqs
, void *args
)
690 for (i
= 0; i
< nr_irqs
; i
++) {
691 irq_set_percpu_devid(virq
+ i
);
692 irq_domain_set_info(d
, virq
+ i
, i
, &ipi_chip
, d
->host_data
,
693 handle_percpu_devid_irq
, NULL
, NULL
);
699 static void aic_ipi_free(struct irq_domain
*d
, unsigned int virq
, unsigned int nr_irqs
)
701 /* Not freeing IPIs */
704 static const struct irq_domain_ops aic_ipi_domain_ops
= {
705 .alloc
= aic_ipi_alloc
,
706 .free
= aic_ipi_free
,
709 static int aic_init_smp(struct aic_irq_chip
*irqc
, struct device_node
*node
)
711 struct irq_domain
*ipi_domain
;
714 ipi_domain
= irq_domain_create_linear(irqc
->hw_domain
->fwnode
, AIC_NR_SWIPI
,
715 &aic_ipi_domain_ops
, irqc
);
716 if (WARN_ON(!ipi_domain
))
719 ipi_domain
->flags
|= IRQ_DOMAIN_FLAG_IPI_SINGLE
;
720 irq_domain_update_bus_token(ipi_domain
, DOMAIN_BUS_IPI
);
722 base_ipi
= __irq_domain_alloc_irqs(ipi_domain
, -1, AIC_NR_SWIPI
,
723 NUMA_NO_NODE
, NULL
, false, NULL
);
725 if (WARN_ON(!base_ipi
)) {
726 irq_domain_remove(ipi_domain
);
730 set_smp_ipi_range(base_ipi
, AIC_NR_SWIPI
);
732 irqc
->ipi_domain
= ipi_domain
;
737 static int aic_init_cpu(unsigned int cpu
)
739 /* Mask all hard-wired per-CPU IRQ/FIQ sources */
741 /* Pending Fast IPI FIQs */
742 write_sysreg_s(IPI_SR_PENDING
, SYS_IMP_APL_IPI_SR_EL1
);
745 sysreg_clear_set(cntp_ctl_el0
, 0, ARCH_TIMER_CTRL_IT_MASK
);
746 sysreg_clear_set(cntv_ctl_el0
, 0, ARCH_TIMER_CTRL_IT_MASK
);
748 /* EL2-only (VHE mode) IRQ sources */
749 if (is_kernel_in_hyp_mode()) {
751 sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2
,
752 VM_TMR_FIQ_ENABLE_V
| VM_TMR_FIQ_ENABLE_P
, 0);
754 /* vGIC maintenance IRQ */
755 sysreg_clear_set_s(SYS_ICH_HCR_EL2
, ICH_HCR_EN
, 0);
759 sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1
, PMCR0_IMODE
| PMCR0_IACT
,
760 FIELD_PREP(PMCR0_IMODE
, PMCR0_IMODE_OFF
));
763 sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1
, UPMCR0_IMODE
,
764 FIELD_PREP(UPMCR0_IMODE
, UPMCR0_IMODE_OFF
));
766 /* Commit all of the above */
770 * Make sure the kernel's idea of logical CPU order is the same as AIC's
771 * If we ever end up with a mismatch here, we will have to introduce
772 * a mapping table similar to what other irqchip drivers do.
774 WARN_ON(aic_ic_read(aic_irqc
, AIC_WHOAMI
) != smp_processor_id());
777 * Always keep IPIs unmasked at the hardware level (except auto-masking
778 * by AIC during processing). We manage masks at the vIPI level.
780 aic_ic_write(aic_irqc
, AIC_IPI_ACK
, AIC_IPI_SELF
| AIC_IPI_OTHER
);
781 aic_ic_write(aic_irqc
, AIC_IPI_MASK_SET
, AIC_IPI_SELF
);
782 aic_ic_write(aic_irqc
, AIC_IPI_MASK_CLR
, AIC_IPI_OTHER
);
784 /* Initialize the local mask state */
785 __this_cpu_write(aic_fiq_unmasked
, 0);
790 static int __init
aic_of_ic_init(struct device_node
*node
, struct device_node
*parent
)
795 struct aic_irq_chip
*irqc
;
797 regs
= of_iomap(node
, 0);
801 irqc
= kzalloc(sizeof(*irqc
), GFP_KERNEL
);
808 info
= aic_ic_read(irqc
, AIC_INFO
);
809 irqc
->nr_hw
= FIELD_GET(AIC_INFO_NR_HW
, info
);
811 irqc
->hw_domain
= irq_domain_create_linear(of_node_to_fwnode(node
),
812 irqc
->nr_hw
+ AIC_NR_FIQ
,
813 &aic_irq_domain_ops
, irqc
);
814 if (WARN_ON(!irqc
->hw_domain
)) {
820 irq_domain_update_bus_token(irqc
->hw_domain
, DOMAIN_BUS_WIRED
);
822 if (aic_init_smp(irqc
, node
)) {
823 irq_domain_remove(irqc
->hw_domain
);
829 set_handle_irq(aic_handle_irq
);
830 set_handle_fiq(aic_handle_fiq
);
832 for (i
= 0; i
< BITS_TO_U32(irqc
->nr_hw
); i
++)
833 aic_ic_write(irqc
, AIC_MASK_SET
+ i
* 4, U32_MAX
);
834 for (i
= 0; i
< BITS_TO_U32(irqc
->nr_hw
); i
++)
835 aic_ic_write(irqc
, AIC_SW_CLR
+ i
* 4, U32_MAX
);
836 for (i
= 0; i
< irqc
->nr_hw
; i
++)
837 aic_ic_write(irqc
, AIC_TARGET_CPU
+ i
* 4, 1);
839 if (!is_kernel_in_hyp_mode())
840 pr_info("Kernel running in EL1, mapping interrupts");
842 cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING
,
843 "irqchip/apple-aic/ipi:starting",
846 pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n",
847 irqc
->nr_hw
, AIC_NR_FIQ
, AIC_NR_SWIPI
);
852 IRQCHIP_DECLARE(apple_m1_aic
, "apple,aic", aic_of_ic_init
);