]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/irqchip/irq-mips-cpu.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-eoan-kernel.git] / drivers / irqchip / irq-mips-cpu.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2001 MontaVista Software Inc.
4 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
5 *
6 * Copyright (C) 2001 Ralf Baechle
7 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
8 * Author: Maciej W. Rozycki <macro@mips.com>
9 *
10 * This file define the irq handler for MIPS CPU interrupts.
11 */
12
13 /*
14 * Almost all MIPS CPUs define 8 interrupt sources. They are typically
15 * level triggered (i.e., cannot be cleared from CPU; must be cleared from
16 * device).
17 *
18 * The first two are software interrupts (i.e. not exposed as pins) which
19 * may be used for IPIs in multi-threaded single-core systems.
20 *
21 * The last one is usually the CPU timer interrupt if the counter register
22 * is present, or for old CPUs with an external FPU by convention it's the
23 * FPU exception interrupt.
24 */
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/irq.h>
29 #include <linux/irqchip.h>
30 #include <linux/irqdomain.h>
31
32 #include <asm/irq_cpu.h>
33 #include <asm/mipsregs.h>
34 #include <asm/mipsmtregs.h>
35 #include <asm/setup.h>
36
37 static struct irq_domain *irq_domain;
38 static struct irq_domain *ipi_domain;
39
40 static inline void unmask_mips_irq(struct irq_data *d)
41 {
42 set_c0_status(IE_SW0 << d->hwirq);
43 irq_enable_hazard();
44 }
45
46 static inline void mask_mips_irq(struct irq_data *d)
47 {
48 clear_c0_status(IE_SW0 << d->hwirq);
49 irq_disable_hazard();
50 }
51
52 static struct irq_chip mips_cpu_irq_controller = {
53 .name = "MIPS",
54 .irq_ack = mask_mips_irq,
55 .irq_mask = mask_mips_irq,
56 .irq_mask_ack = mask_mips_irq,
57 .irq_unmask = unmask_mips_irq,
58 .irq_eoi = unmask_mips_irq,
59 .irq_disable = mask_mips_irq,
60 .irq_enable = unmask_mips_irq,
61 };
62
63 /*
64 * Basically the same as above but taking care of all the MT stuff
65 */
66
67 static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
68 {
69 unsigned int vpflags = dvpe();
70
71 clear_c0_cause(C_SW0 << d->hwirq);
72 evpe(vpflags);
73 unmask_mips_irq(d);
74 return 0;
75 }
76
77 /*
78 * While we ack the interrupt interrupts are disabled and thus we don't need
79 * to deal with concurrency issues. Same for mips_cpu_irq_end.
80 */
81 static void mips_mt_cpu_irq_ack(struct irq_data *d)
82 {
83 unsigned int vpflags = dvpe();
84 clear_c0_cause(C_SW0 << d->hwirq);
85 evpe(vpflags);
86 mask_mips_irq(d);
87 }
88
89 #ifdef CONFIG_GENERIC_IRQ_IPI
90
91 static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
92 {
93 irq_hw_number_t hwirq = irqd_to_hwirq(d);
94 unsigned long flags;
95 int vpflags;
96
97 local_irq_save(flags);
98
99 /* We can only send IPIs to VPEs within the local core */
100 WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
101
102 vpflags = dvpe();
103 settc(cpu_vpe_id(&cpu_data[cpu]));
104 write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
105 evpe(vpflags);
106
107 local_irq_restore(flags);
108 }
109
110 #endif /* CONFIG_GENERIC_IRQ_IPI */
111
112 static struct irq_chip mips_mt_cpu_irq_controller = {
113 .name = "MIPS",
114 .irq_startup = mips_mt_cpu_irq_startup,
115 .irq_ack = mips_mt_cpu_irq_ack,
116 .irq_mask = mask_mips_irq,
117 .irq_mask_ack = mips_mt_cpu_irq_ack,
118 .irq_unmask = unmask_mips_irq,
119 .irq_eoi = unmask_mips_irq,
120 .irq_disable = mask_mips_irq,
121 .irq_enable = unmask_mips_irq,
122 #ifdef CONFIG_GENERIC_IRQ_IPI
123 .ipi_send_single = mips_mt_send_ipi,
124 #endif
125 };
126
127 asmlinkage void __weak plat_irq_dispatch(void)
128 {
129 unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
130 unsigned int virq;
131 int irq;
132
133 if (!pending) {
134 spurious_interrupt();
135 return;
136 }
137
138 pending >>= CAUSEB_IP;
139 while (pending) {
140 irq = fls(pending) - 1;
141 if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
142 virq = irq_linear_revmap(ipi_domain, irq);
143 else
144 virq = irq_linear_revmap(irq_domain, irq);
145 do_IRQ(virq);
146 pending &= ~BIT(irq);
147 }
148 }
149
150 static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
151 irq_hw_number_t hw)
152 {
153 struct irq_chip *chip;
154
155 if (hw < 2 && cpu_has_mipsmt) {
156 /* Software interrupts are used for MT/CMT IPI */
157 chip = &mips_mt_cpu_irq_controller;
158 } else {
159 chip = &mips_cpu_irq_controller;
160 }
161
162 if (cpu_has_vint)
163 set_vi_handler(hw, plat_irq_dispatch);
164
165 irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
166
167 return 0;
168 }
169
170 static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
171 .map = mips_cpu_intc_map,
172 .xlate = irq_domain_xlate_onecell,
173 };
174
175 #ifdef CONFIG_GENERIC_IRQ_IPI
176
177 struct cpu_ipi_domain_state {
178 DECLARE_BITMAP(allocated, 2);
179 };
180
181 static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
182 unsigned int nr_irqs, void *arg)
183 {
184 struct cpu_ipi_domain_state *state = domain->host_data;
185 unsigned int i, hwirq;
186 int ret;
187
188 for (i = 0; i < nr_irqs; i++) {
189 hwirq = find_first_zero_bit(state->allocated, 2);
190 if (hwirq == 2)
191 return -EBUSY;
192 bitmap_set(state->allocated, hwirq, 1);
193
194 ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
195 &mips_mt_cpu_irq_controller,
196 NULL);
197 if (ret)
198 return ret;
199
200 ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
201 if (ret)
202 return ret;
203 }
204
205 return 0;
206 }
207
208 static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
209 enum irq_domain_bus_token bus_token)
210 {
211 bool is_ipi;
212
213 switch (bus_token) {
214 case DOMAIN_BUS_IPI:
215 is_ipi = d->bus_token == bus_token;
216 return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
217 default:
218 return 0;
219 }
220 }
221
222 static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
223 .alloc = mips_cpu_ipi_alloc,
224 .match = mips_cpu_ipi_match,
225 };
226
227 static void mips_cpu_register_ipi_domain(struct device_node *of_node)
228 {
229 struct cpu_ipi_domain_state *ipi_domain_state;
230
231 ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
232 ipi_domain = irq_domain_add_hierarchy(irq_domain,
233 IRQ_DOMAIN_FLAG_IPI_SINGLE,
234 2, of_node,
235 &mips_cpu_ipi_chip_ops,
236 ipi_domain_state);
237 if (!ipi_domain)
238 panic("Failed to add MIPS CPU IPI domain");
239 irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
240 }
241
242 #else /* !CONFIG_GENERIC_IRQ_IPI */
243
244 static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
245
246 #endif /* !CONFIG_GENERIC_IRQ_IPI */
247
248 static void __init __mips_cpu_irq_init(struct device_node *of_node)
249 {
250 /* Mask interrupts. */
251 clear_c0_status(ST0_IM);
252 clear_c0_cause(CAUSEF_IP);
253
254 irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
255 &mips_cpu_intc_irq_domain_ops,
256 NULL);
257 if (!irq_domain)
258 panic("Failed to add irqdomain for MIPS CPU");
259
260 /*
261 * Only proceed to register the software interrupt IPI implementation
262 * for CPUs which implement the MIPS MT (multi-threading) ASE.
263 */
264 if (cpu_has_mipsmt)
265 mips_cpu_register_ipi_domain(of_node);
266 }
267
268 void __init mips_cpu_irq_init(void)
269 {
270 __mips_cpu_irq_init(NULL);
271 }
272
273 int __init mips_cpu_irq_of_init(struct device_node *of_node,
274 struct device_node *parent)
275 {
276 __mips_cpu_irq_init(of_node);
277 return 0;
278 }
279 IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);