2 * Copyright 2016 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/cpu.h>
19 #include <asm/errno.h>
23 #include <asm/kvm_ppc.h>
25 static void icp_opal_teardown_cpu(void)
27 int hw_cpu
= hard_smp_processor_id();
29 /* Clear any pending IPI */
30 opal_int_set_mfrr(hw_cpu
, 0xff);
33 static void icp_opal_flush_ipi(void)
36 * We take the ipi irq but and never return so we need to EOI the IPI,
37 * but want to leave our priority 0.
39 * Should we check all the other interrupts too?
40 * Should we be flagging idle loop instead?
41 * Or creating some task to be scheduled?
43 if (opal_int_eoi((0x00 << 24) | XICS_IPI
) > 0)
44 force_external_irq_replay();
47 static unsigned int icp_opal_get_xirr(void)
49 unsigned int kvm_xirr
;
53 /* Handle an interrupt latched by KVM first */
54 kvm_xirr
= kvmppc_get_xics_latch();
59 rc
= opal_int_get_xirr(&hw_xirr
, false);
62 return be32_to_cpu(hw_xirr
);
65 static unsigned int icp_opal_get_irq(void)
71 xirr
= icp_opal_get_xirr();
72 vec
= xirr
& 0x00ffffff;
73 if (vec
== XICS_IRQ_SPURIOUS
)
76 irq
= irq_find_mapping(xics_host
, vec
);
82 /* We don't have a linux mapping, so have rtas mask it. */
83 xics_mask_unknown_vec(vec
);
85 /* We might learn about it later, so EOI it */
86 if (opal_int_eoi(xirr
) > 0)
87 force_external_irq_replay();
92 static void icp_opal_set_cpu_priority(unsigned char cppr
)
94 xics_set_base_cppr(cppr
);
95 opal_int_set_cppr(cppr
);
99 static void icp_opal_eoi(struct irq_data
*d
)
101 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
105 rc
= opal_int_eoi((xics_pop_cppr() << 24) | hw_irq
);
108 * EOI tells us whether there are more interrupts to fetch.
110 * Some HW implementations might not be able to send us another
111 * external interrupt in that case, so we force a replay.
114 force_external_irq_replay();
119 static void icp_opal_cause_ipi(int cpu
, unsigned long data
)
121 int hw_cpu
= get_hard_smp_processor_id(cpu
);
123 kvmppc_set_host_ipi(cpu
, 1);
124 opal_int_set_mfrr(hw_cpu
, IPI_PRIORITY
);
127 static irqreturn_t
icp_opal_ipi_action(int irq
, void *dev_id
)
129 int cpu
= smp_processor_id();
131 kvmppc_set_host_ipi(cpu
, 0);
132 opal_int_set_mfrr(get_hard_smp_processor_id(cpu
), 0xff);
134 return smp_ipi_demux();
138 * Called when an interrupt is received on an off-line CPU to
139 * clear the interrupt, so that the CPU can go back to nap mode.
141 void icp_opal_flush_interrupt(void)
147 xirr
= icp_opal_get_xirr();
148 vec
= xirr
& 0x00ffffff;
149 if (vec
== XICS_IRQ_SPURIOUS
)
151 if (vec
== XICS_IPI
) {
152 /* Clear pending IPI */
153 int cpu
= smp_processor_id();
154 kvmppc_set_host_ipi(cpu
, 0);
155 opal_int_set_mfrr(get_hard_smp_processor_id(cpu
), 0xff);
157 pr_err("XICS: hw interrupt 0x%x to offline cpu, "
159 xics_mask_unknown_vec(vec
);
162 /* EOI the interrupt */
163 } while (opal_int_eoi(xirr
) > 0);
166 #endif /* CONFIG_SMP */
168 static const struct icp_ops icp_opal_ops
= {
169 .get_irq
= icp_opal_get_irq
,
171 .set_priority
= icp_opal_set_cpu_priority
,
172 .teardown_cpu
= icp_opal_teardown_cpu
,
173 .flush_ipi
= icp_opal_flush_ipi
,
175 .ipi_action
= icp_opal_ipi_action
,
176 .cause_ipi
= icp_opal_cause_ipi
,
180 int icp_opal_init(void)
182 struct device_node
*np
;
184 np
= of_find_compatible_node(NULL
, NULL
, "ibm,opal-intc");
188 icp_ops
= &icp_opal_ops
;
190 printk("XICS: Using OPAL ICP fallbacks\n");