]>
Commit | Line | Data |
---|---|---|
82fea5a1 VG |
1 | /* |
2 | * ARC ARConnect (MultiCore IP) support (formerly known as MCIP) | |
3 | * | |
4 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/smp.h> | |
12 | #include <linux/irq.h> | |
e51d5d02 | 13 | #include <linux/irqchip/chained_irq.h> |
82fea5a1 | 14 | #include <linux/spinlock.h> |
2d7f5c48 | 15 | #include <soc/arc/mcip.h> |
bb143f81 | 16 | #include <asm/irqflags-arcv2.h> |
964cf28f | 17 | #include <asm/setup.h> |
82fea5a1 | 18 | |
82fea5a1 VG |
19 | static DEFINE_RAW_SPINLOCK(mcip_lock); |
20 | ||
3ce0fefc VG |
21 | #ifdef CONFIG_SMP |
22 | ||
23 | static char smp_cpuinfo_buf[128]; | |
24 | ||
aa0efcde | 25 | static void mcip_setup_per_cpu(int cpu) |
82fea5a1 VG |
26 | { |
27 | smp_ipi_irq_setup(cpu, IPI_IRQ); | |
bb143f81 | 28 | smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); |
82fea5a1 VG |
29 | } |
30 | ||
31 | static void mcip_ipi_send(int cpu) | |
32 | { | |
33 | unsigned long flags; | |
aa6083ed VG |
34 | int ipi_was_pending; |
35 | ||
bb143f81 VG |
36 | /* ARConnect can only send IPI to others */ |
37 | if (unlikely(cpu == raw_smp_processor_id())) { | |
38 | arc_softirq_trigger(SOFTIRQ_IRQ); | |
39 | return; | |
40 | } | |
41 | ||
3dea30ca VG |
42 | raw_spin_lock_irqsave(&mcip_lock, flags); |
43 | ||
aa6083ed | 44 | /* |
3dea30ca VG |
45 | * If receiver already has a pending interrupt, elide sending this one. |
46 | * Linux cross core calling works well with concurrent IPIs | |
47 | * coalesced into one | |
48 | * see arch/arc/kernel/smp.c: ipi_send_msg_one() | |
aa6083ed | 49 | */ |
3dea30ca VG |
50 | __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu); |
51 | ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK); | |
52 | if (!ipi_was_pending) | |
53 | __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu); | |
aa6083ed | 54 | |
82fea5a1 VG |
55 | raw_spin_unlock_irqrestore(&mcip_lock, flags); |
56 | } | |
57 | ||
58 | static void mcip_ipi_clear(int irq) | |
59 | { | |
aa6083ed | 60 | unsigned int cpu, c; |
82fea5a1 VG |
61 | unsigned long flags; |
62 | ||
bb143f81 VG |
63 | if (unlikely(irq == SOFTIRQ_IRQ)) { |
64 | arc_softirq_clear(irq); | |
65 | return; | |
66 | } | |
67 | ||
82fea5a1 VG |
68 | raw_spin_lock_irqsave(&mcip_lock, flags); |
69 | ||
70 | /* Who sent the IPI */ | |
71 | __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); | |
72 | ||
d73b73f5 | 73 | cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ |
82fea5a1 | 74 | |
aa6083ed VG |
75 | /* |
76 | * In rare case, multiple concurrent IPIs sent to same target can | |
77 | * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be | |
78 | * "vectored" (multiple bits sets) as opposed to typical single bit | |
79 | */ | |
80 | do { | |
81 | c = __ffs(cpu); /* 0,1,2,3 */ | |
82 | __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c); | |
83 | cpu &= ~(1U << c); | |
84 | } while (cpu); | |
82fea5a1 VG |
85 | |
86 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
87 | } | |
88 | ||
26b8f996 | 89 | static void mcip_probe_n_setup(void) |
82fea5a1 | 90 | { |
3ce0fefc | 91 | struct mcip_bcr mp; |
82fea5a1 VG |
92 | |
93 | READ_BCR(ARC_REG_MCIP_BCR, mp); | |
94 | ||
95 | sprintf(smp_cpuinfo_buf, | |
98341f7d | 96 | "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", |
82fea5a1 VG |
97 | mp.ver, mp.num_cores, |
98 | IS_AVAIL1(mp.ipi, "IPI "), | |
99 | IS_AVAIL1(mp.idu, "IDU "), | |
98341f7d | 100 | IS_AVAIL1(mp.llm, "LLM "), |
82fea5a1 | 101 | IS_AVAIL1(mp.dbg, "DEBUG "), |
d584f0fb | 102 | IS_AVAIL1(mp.gfrc, "GFRC")); |
82fea5a1 | 103 | |
e608b53e | 104 | cpuinfo_arc700[0].extn.gfrc = mp.gfrc; |
eaf0ecc3 | 105 | |
82fea5a1 VG |
106 | if (mp.dbg) { |
107 | __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); | |
108 | __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); | |
109 | } | |
110 | } | |
eaf0ecc3 | 111 | |
26b8f996 VG |
112 | struct plat_smp_ops plat_smp_ops = { |
113 | .info = smp_cpuinfo_buf, | |
114 | .init_early_smp = mcip_probe_n_setup, | |
b474a023 | 115 | .init_per_cpu = mcip_setup_per_cpu, |
26b8f996 VG |
116 | .ipi_send = mcip_ipi_send, |
117 | .ipi_clear = mcip_ipi_clear, | |
118 | }; | |
119 | ||
3ce0fefc VG |
120 | #endif |
121 | ||
eaf0ecc3 VG |
122 | /*************************************************************************** |
123 | * ARCv2 Interrupt Distribution Unit (IDU) | |
124 | * | |
125 | * Connects external "COMMON" IRQs to core intc, providing: | |
126 | * -dynamic routing (IRQ affinity) | |
127 | * -load balancing (Round Robin interrupt distribution) | |
128 | * -1:N distribution | |
129 | * | |
130 | * It physically resides in the MCIP hw block | |
131 | */ | |
132 | ||
133 | #include <linux/irqchip.h> | |
134 | #include <linux/of.h> | |
135 | #include <linux/of_irq.h> | |
eaf0ecc3 VG |
136 | |
137 | /* | |
138 | * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) | |
139 | */ | |
140 | static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) | |
141 | { | |
142 | __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); | |
143 | } | |
144 | ||
145 | static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, | |
146 | unsigned int distr) | |
147 | { | |
148 | union { | |
149 | unsigned int word; | |
150 | struct { | |
151 | unsigned int distr:2, pad:2, lvl:1, pad2:27; | |
152 | }; | |
153 | } data; | |
154 | ||
155 | data.distr = distr; | |
156 | data.lvl = lvl; | |
157 | __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); | |
158 | } | |
159 | ||
160 | static void idu_irq_mask(struct irq_data *data) | |
161 | { | |
162 | unsigned long flags; | |
163 | ||
164 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
165 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); | |
166 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
167 | } | |
168 | ||
169 | static void idu_irq_unmask(struct irq_data *data) | |
170 | { | |
171 | unsigned long flags; | |
172 | ||
173 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
174 | __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0); | |
175 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
176 | } | |
177 | ||
178 | static int | |
83ce3e6f VG |
179 | idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, |
180 | bool force) | |
eaf0ecc3 | 181 | { |
83ce3e6f VG |
182 | unsigned long flags; |
183 | cpumask_t online; | |
0a0a047d YK |
184 | unsigned int destination_bits; |
185 | unsigned int distribution_mode; | |
83ce3e6f VG |
186 | |
187 | /* errout if no online cpu per @cpumask */ | |
188 | if (!cpumask_and(&online, cpumask, cpu_online_mask)) | |
189 | return -EINVAL; | |
190 | ||
191 | raw_spin_lock_irqsave(&mcip_lock, flags); | |
192 | ||
0a0a047d YK |
193 | destination_bits = cpumask_bits(&online)[0]; |
194 | idu_set_dest(data->hwirq, destination_bits); | |
195 | ||
196 | if (ffs(destination_bits) == fls(destination_bits)) | |
197 | distribution_mode = IDU_M_DISTRI_DEST; | |
198 | else | |
199 | distribution_mode = IDU_M_DISTRI_RR; | |
200 | ||
201 | idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); | |
83ce3e6f VG |
202 | |
203 | raw_spin_unlock_irqrestore(&mcip_lock, flags); | |
204 | ||
eaf0ecc3 VG |
205 | return IRQ_SET_MASK_OK; |
206 | } | |
92fdb527 YK |
207 | |
208 | static void idu_irq_enable(struct irq_data *data) | |
209 | { | |
210 | /* | |
211 | * By default send all common interrupts to all available online CPUs. | |
212 | * The affinity of common interrupts in IDU must be set manually since | |
213 | * in some cases the kernel will not call irq_set_affinity() by itself: | |
214 | * 1. When the kernel is not configured with support of SMP. | |
215 | * 2. When the kernel is configured with support of SMP but upper | |
216 | * interrupt controllers does not support setting of the affinity | |
217 | * and cannot propagate it to IDU. | |
218 | */ | |
219 | idu_irq_set_affinity(data, cpu_online_mask, false); | |
220 | idu_irq_unmask(data); | |
221 | } | |
eaf0ecc3 VG |
222 | |
223 | static struct irq_chip idu_irq_chip = { | |
224 | .name = "MCIP IDU Intc", | |
225 | .irq_mask = idu_irq_mask, | |
226 | .irq_unmask = idu_irq_unmask, | |
92fdb527 | 227 | .irq_enable = idu_irq_enable, |
eaf0ecc3 VG |
228 | #ifdef CONFIG_SMP |
229 | .irq_set_affinity = idu_irq_set_affinity, | |
230 | #endif | |
231 | ||
232 | }; | |
233 | ||
34e71e4c | 234 | static irq_hw_number_t idu_first_hwirq; |
eaf0ecc3 | 235 | |
bd0b9ac4 | 236 | static void idu_cascade_isr(struct irq_desc *desc) |
eaf0ecc3 | 237 | { |
34e71e4c | 238 | struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); |
e51d5d02 | 239 | struct irq_chip *core_chip = irq_desc_get_chip(desc); |
34e71e4c YK |
240 | irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); |
241 | irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; | |
eaf0ecc3 | 242 | |
e51d5d02 | 243 | chained_irq_enter(core_chip, desc); |
34e71e4c | 244 | generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); |
e51d5d02 | 245 | chained_irq_exit(core_chip, desc); |
eaf0ecc3 VG |
246 | } |
247 | ||
248 | static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) | |
249 | { | |
250 | irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq); | |
251 | irq_set_status_flags(virq, IRQ_MOVE_PCNTXT); | |
252 | ||
253 | return 0; | |
254 | } | |
255 | ||
256 | static int idu_irq_xlate(struct irq_domain *d, struct device_node *n, | |
257 | const u32 *intspec, unsigned int intsize, | |
258 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | |
259 | { | |
92fdb527 YK |
260 | /* |
261 | * Ignore value of interrupt distribution mode for common interrupts in | |
262 | * IDU which resides in intspec[1] since setting an affinity using value | |
263 | * from Device Tree is deprecated in ARC. | |
264 | */ | |
265 | *out_hwirq = intspec[0]; | |
eaf0ecc3 VG |
266 | *out_type = IRQ_TYPE_NONE; |
267 | ||
eaf0ecc3 VG |
268 | return 0; |
269 | } | |
270 | ||
271 | static const struct irq_domain_ops idu_irq_ops = { | |
272 | .xlate = idu_irq_xlate, | |
273 | .map = idu_irq_map, | |
274 | }; | |
275 | ||
276 | /* | |
277 | * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI) | |
278 | * [24, 23+C]: If C > 0 then "C" common IRQs | |
279 | * [24+C, N]: Not statically assigned, private-per-core | |
280 | */ | |
281 | ||
282 | ||
283 | static int __init | |
284 | idu_of_init(struct device_node *intc, struct device_node *parent) | |
285 | { | |
286 | struct irq_domain *domain; | |
287 | /* Read IDU BCR to confirm nr_irqs */ | |
288 | int nr_irqs = of_irq_count(intc); | |
34e71e4c | 289 | int i, virq; |
3ce0fefc VG |
290 | struct mcip_bcr mp; |
291 | ||
292 | READ_BCR(ARC_REG_MCIP_BCR, mp); | |
eaf0ecc3 | 293 | |
3ce0fefc | 294 | if (!mp.idu) |
eaf0ecc3 VG |
295 | panic("IDU not detected, but DeviceTree using it"); |
296 | ||
297 | pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); | |
298 | ||
299 | domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); | |
300 | ||
301 | /* Parent interrupts (core-intc) are already mapped */ | |
302 | ||
303 | for (i = 0; i < nr_irqs; i++) { | |
304 | /* | |
305 | * Return parent uplink IRQs (towards core intc) 24,25,..... | |
306 | * this step has been done before already | |
307 | * however we need it to get the parent virq and set IDU handler | |
308 | * as first level isr | |
309 | */ | |
34e71e4c | 310 | virq = irq_of_parse_and_map(intc, i); |
eaf0ecc3 | 311 | if (!i) |
34e71e4c | 312 | idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq)); |
eaf0ecc3 | 313 | |
34e71e4c | 314 | irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); |
eaf0ecc3 VG |
315 | } |
316 | ||
317 | __mcip_cmd(CMD_IDU_ENABLE, 0); | |
318 | ||
319 | return 0; | |
320 | } | |
321 | IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init); |