]>
Commit | Line | Data |
---|---|---|
021f6537 MZ |
1 | /* |
2 | * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/cpu.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/of.h> | |
22 | #include <linux/of_address.h> | |
23 | #include <linux/of_irq.h> | |
24 | #include <linux/percpu.h> | |
25 | #include <linux/slab.h> | |
26 | ||
27 | #include <linux/irqchip/arm-gic-v3.h> | |
28 | ||
29 | #include <asm/cputype.h> | |
30 | #include <asm/exception.h> | |
31 | #include <asm/smp_plat.h> | |
32 | ||
33 | #include "irq-gic-common.h" | |
34 | #include "irqchip.h" | |
35 | ||
36 | struct gic_chip_data { | |
37 | void __iomem *dist_base; | |
38 | void __iomem **redist_base; | |
39 | void __percpu __iomem **rdist; | |
40 | struct irq_domain *domain; | |
41 | u64 redist_stride; | |
42 | u32 redist_regions; | |
43 | unsigned int irq_nr; | |
44 | }; | |
45 | ||
46 | static struct gic_chip_data gic_data __read_mostly; | |
47 | ||
48 | #define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) | |
49 | #define gic_data_rdist_rd_base() (*gic_data_rdist()) | |
50 | #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) | |
51 | ||
52 | /* Our default, arbitrary priority value. Linux only uses one anyway. */ | |
53 | #define DEFAULT_PMR_VALUE 0xf0 | |
54 | ||
55 | static inline unsigned int gic_irq(struct irq_data *d) | |
56 | { | |
57 | return d->hwirq; | |
58 | } | |
59 | ||
60 | static inline int gic_irq_in_rdist(struct irq_data *d) | |
61 | { | |
62 | return gic_irq(d) < 32; | |
63 | } | |
64 | ||
65 | static inline void __iomem *gic_dist_base(struct irq_data *d) | |
66 | { | |
67 | if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ | |
68 | return gic_data_rdist_sgi_base(); | |
69 | ||
70 | if (d->hwirq <= 1023) /* SPI -> dist_base */ | |
71 | return gic_data.dist_base; | |
72 | ||
73 | if (d->hwirq >= 8192) | |
74 | BUG(); /* LPI Detected!!! */ | |
75 | ||
76 | return NULL; | |
77 | } | |
78 | ||
79 | static void gic_do_wait_for_rwp(void __iomem *base) | |
80 | { | |
81 | u32 count = 1000000; /* 1s! */ | |
82 | ||
83 | while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { | |
84 | count--; | |
85 | if (!count) { | |
86 | pr_err_ratelimited("RWP timeout, gone fishing\n"); | |
87 | return; | |
88 | } | |
89 | cpu_relax(); | |
90 | udelay(1); | |
91 | }; | |
92 | } | |
93 | ||
94 | /* Wait for completion of a distributor change */ | |
95 | static void gic_dist_wait_for_rwp(void) | |
96 | { | |
97 | gic_do_wait_for_rwp(gic_data.dist_base); | |
98 | } | |
99 | ||
100 | /* Wait for completion of a redistributor change */ | |
101 | static void gic_redist_wait_for_rwp(void) | |
102 | { | |
103 | gic_do_wait_for_rwp(gic_data_rdist_rd_base()); | |
104 | } | |
105 | ||
106 | /* Low level accessors */ | |
107 | static u64 gic_read_iar(void) | |
108 | { | |
109 | u64 irqstat; | |
110 | ||
111 | asm volatile("mrs %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); | |
112 | return irqstat; | |
113 | } | |
114 | ||
115 | static void gic_write_pmr(u64 val) | |
116 | { | |
117 | asm volatile("msr " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); | |
118 | } | |
119 | ||
120 | static void gic_write_ctlr(u64 val) | |
121 | { | |
122 | asm volatile("msr " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); | |
123 | isb(); | |
124 | } | |
125 | ||
126 | static void gic_write_grpen1(u64 val) | |
127 | { | |
128 | asm volatile("msr " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); | |
129 | isb(); | |
130 | } | |
131 | ||
132 | static void gic_write_sgi1r(u64 val) | |
133 | { | |
134 | asm volatile("msr " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); | |
135 | } | |
136 | ||
137 | static void gic_enable_sre(void) | |
138 | { | |
139 | u64 val; | |
140 | ||
141 | asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); | |
142 | val |= ICC_SRE_EL1_SRE; | |
143 | asm volatile("msr " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val)); | |
144 | isb(); | |
145 | ||
146 | /* | |
147 | * Need to check that the SRE bit has actually been set. If | |
148 | * not, it means that SRE is disabled at EL2. We're going to | |
149 | * die painfully, and there is nothing we can do about it. | |
150 | * | |
151 | * Kindly inform the luser. | |
152 | */ | |
153 | asm volatile("mrs %0, " __stringify(ICC_SRE_EL1) : "=r" (val)); | |
154 | if (!(val & ICC_SRE_EL1_SRE)) | |
155 | pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); | |
156 | } | |
157 | ||
158 | static void gic_enable_redist(void) | |
159 | { | |
160 | void __iomem *rbase; | |
161 | u32 count = 1000000; /* 1s! */ | |
162 | u32 val; | |
163 | ||
164 | rbase = gic_data_rdist_rd_base(); | |
165 | ||
166 | /* Wake up this CPU redistributor */ | |
167 | val = readl_relaxed(rbase + GICR_WAKER); | |
168 | val &= ~GICR_WAKER_ProcessorSleep; | |
169 | writel_relaxed(val, rbase + GICR_WAKER); | |
170 | ||
171 | while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) { | |
172 | count--; | |
173 | if (!count) { | |
174 | pr_err_ratelimited("redist didn't wake up...\n"); | |
175 | return; | |
176 | } | |
177 | cpu_relax(); | |
178 | udelay(1); | |
179 | }; | |
180 | } | |
181 | ||
182 | /* | |
183 | * Routines to disable, enable, EOI and route interrupts | |
184 | */ | |
185 | static void gic_poke_irq(struct irq_data *d, u32 offset) | |
186 | { | |
187 | u32 mask = 1 << (gic_irq(d) % 32); | |
188 | void (*rwp_wait)(void); | |
189 | void __iomem *base; | |
190 | ||
191 | if (gic_irq_in_rdist(d)) { | |
192 | base = gic_data_rdist_sgi_base(); | |
193 | rwp_wait = gic_redist_wait_for_rwp; | |
194 | } else { | |
195 | base = gic_data.dist_base; | |
196 | rwp_wait = gic_dist_wait_for_rwp; | |
197 | } | |
198 | ||
199 | writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); | |
200 | rwp_wait(); | |
201 | } | |
202 | ||
203 | static int gic_peek_irq(struct irq_data *d, u32 offset) | |
204 | { | |
205 | u32 mask = 1 << (gic_irq(d) % 32); | |
206 | void __iomem *base; | |
207 | ||
208 | if (gic_irq_in_rdist(d)) | |
209 | base = gic_data_rdist_sgi_base(); | |
210 | else | |
211 | base = gic_data.dist_base; | |
212 | ||
213 | return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); | |
214 | } | |
215 | ||
216 | static void gic_mask_irq(struct irq_data *d) | |
217 | { | |
218 | gic_poke_irq(d, GICD_ICENABLER); | |
219 | } | |
220 | ||
221 | static void gic_unmask_irq(struct irq_data *d) | |
222 | { | |
223 | gic_poke_irq(d, GICD_ISENABLER); | |
224 | } | |
225 | ||
226 | static void gic_eoi_irq(struct irq_data *d) | |
227 | { | |
228 | gic_write_eoir(gic_irq(d)); | |
229 | } | |
230 | ||
231 | static int gic_set_type(struct irq_data *d, unsigned int type) | |
232 | { | |
233 | unsigned int irq = gic_irq(d); | |
234 | void (*rwp_wait)(void); | |
235 | void __iomem *base; | |
236 | ||
237 | /* Interrupt configuration for SGIs can't be changed */ | |
238 | if (irq < 16) | |
239 | return -EINVAL; | |
240 | ||
241 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | |
242 | return -EINVAL; | |
243 | ||
244 | if (gic_irq_in_rdist(d)) { | |
245 | base = gic_data_rdist_sgi_base(); | |
246 | rwp_wait = gic_redist_wait_for_rwp; | |
247 | } else { | |
248 | base = gic_data.dist_base; | |
249 | rwp_wait = gic_dist_wait_for_rwp; | |
250 | } | |
251 | ||
252 | gic_configure_irq(irq, type, base, rwp_wait); | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | static u64 gic_mpidr_to_affinity(u64 mpidr) | |
258 | { | |
259 | u64 aff; | |
260 | ||
261 | aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | | |
262 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | |
263 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | | |
264 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); | |
265 | ||
266 | return aff; | |
267 | } | |
268 | ||
269 | static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | |
270 | { | |
271 | u64 irqnr; | |
272 | ||
273 | do { | |
274 | irqnr = gic_read_iar(); | |
275 | ||
276 | if (likely(irqnr > 15 && irqnr < 1020)) { | |
277 | u64 irq = irq_find_mapping(gic_data.domain, irqnr); | |
278 | if (likely(irq)) { | |
279 | handle_IRQ(irq, regs); | |
280 | continue; | |
281 | } | |
282 | ||
283 | WARN_ONCE(true, "Unexpected SPI received!\n"); | |
284 | gic_write_eoir(irqnr); | |
285 | } | |
286 | if (irqnr < 16) { | |
287 | gic_write_eoir(irqnr); | |
288 | #ifdef CONFIG_SMP | |
289 | handle_IPI(irqnr, regs); | |
290 | #else | |
291 | WARN_ONCE(true, "Unexpected SGI received!\n"); | |
292 | #endif | |
293 | continue; | |
294 | } | |
295 | } while (irqnr != ICC_IAR1_EL1_SPURIOUS); | |
296 | } | |
297 | ||
298 | static void __init gic_dist_init(void) | |
299 | { | |
300 | unsigned int i; | |
301 | u64 affinity; | |
302 | void __iomem *base = gic_data.dist_base; | |
303 | ||
304 | /* Disable the distributor */ | |
305 | writel_relaxed(0, base + GICD_CTLR); | |
306 | gic_dist_wait_for_rwp(); | |
307 | ||
308 | gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); | |
309 | ||
310 | /* Enable distributor with ARE, Group1 */ | |
311 | writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, | |
312 | base + GICD_CTLR); | |
313 | ||
314 | /* | |
315 | * Set all global interrupts to the boot CPU only. ARE must be | |
316 | * enabled. | |
317 | */ | |
318 | affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); | |
319 | for (i = 32; i < gic_data.irq_nr; i++) | |
320 | writeq_relaxed(affinity, base + GICD_IROUTER + i * 8); | |
321 | } | |
322 | ||
323 | static int gic_populate_rdist(void) | |
324 | { | |
325 | u64 mpidr = cpu_logical_map(smp_processor_id()); | |
326 | u64 typer; | |
327 | u32 aff; | |
328 | int i; | |
329 | ||
330 | /* | |
331 | * Convert affinity to a 32bit value that can be matched to | |
332 | * GICR_TYPER bits [63:32]. | |
333 | */ | |
334 | aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | | |
335 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | | |
336 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | | |
337 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); | |
338 | ||
339 | for (i = 0; i < gic_data.redist_regions; i++) { | |
340 | void __iomem *ptr = gic_data.redist_base[i]; | |
341 | u32 reg; | |
342 | ||
343 | reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
344 | if (reg != GIC_PIDR2_ARCH_GICv3 && | |
345 | reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ | |
346 | pr_warn("No redistributor present @%p\n", ptr); | |
347 | break; | |
348 | } | |
349 | ||
350 | do { | |
351 | typer = readq_relaxed(ptr + GICR_TYPER); | |
352 | if ((typer >> 32) == aff) { | |
353 | gic_data_rdist_rd_base() = ptr; | |
354 | pr_info("CPU%d: found redistributor %llx @%p\n", | |
355 | smp_processor_id(), | |
356 | (unsigned long long)mpidr, ptr); | |
357 | return 0; | |
358 | } | |
359 | ||
360 | if (gic_data.redist_stride) { | |
361 | ptr += gic_data.redist_stride; | |
362 | } else { | |
363 | ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ | |
364 | if (typer & GICR_TYPER_VLPIS) | |
365 | ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ | |
366 | } | |
367 | } while (!(typer & GICR_TYPER_LAST)); | |
368 | } | |
369 | ||
370 | /* We couldn't even deal with ourselves... */ | |
371 | WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n", | |
372 | smp_processor_id(), (unsigned long long)mpidr); | |
373 | return -ENODEV; | |
374 | } | |
375 | ||
376 | static void gic_cpu_init(void) | |
377 | { | |
378 | void __iomem *rbase; | |
379 | ||
380 | /* Register ourselves with the rest of the world */ | |
381 | if (gic_populate_rdist()) | |
382 | return; | |
383 | ||
384 | gic_enable_redist(); | |
385 | ||
386 | rbase = gic_data_rdist_sgi_base(); | |
387 | ||
388 | gic_cpu_config(rbase, gic_redist_wait_for_rwp); | |
389 | ||
390 | /* Enable system registers */ | |
391 | gic_enable_sre(); | |
392 | ||
393 | /* Set priority mask register */ | |
394 | gic_write_pmr(DEFAULT_PMR_VALUE); | |
395 | ||
396 | /* EOI deactivates interrupt too (mode 0) */ | |
397 | gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); | |
398 | ||
399 | /* ... and let's hit the road... */ | |
400 | gic_write_grpen1(1); | |
401 | } | |
402 | ||
403 | #ifdef CONFIG_SMP | |
404 | static int gic_secondary_init(struct notifier_block *nfb, | |
405 | unsigned long action, void *hcpu) | |
406 | { | |
407 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | |
408 | gic_cpu_init(); | |
409 | return NOTIFY_OK; | |
410 | } | |
411 | ||
412 | /* | |
413 | * Notifier for enabling the GIC CPU interface. Set an arbitrarily high | |
414 | * priority because the GIC needs to be up before the ARM generic timers. | |
415 | */ | |
416 | static struct notifier_block gic_cpu_notifier = { | |
417 | .notifier_call = gic_secondary_init, | |
418 | .priority = 100, | |
419 | }; | |
420 | ||
421 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |
422 | u64 cluster_id) | |
423 | { | |
424 | int cpu = *base_cpu; | |
425 | u64 mpidr = cpu_logical_map(cpu); | |
426 | u16 tlist = 0; | |
427 | ||
428 | while (cpu < nr_cpu_ids) { | |
429 | /* | |
430 | * If we ever get a cluster of more than 16 CPUs, just | |
431 | * scream and skip that CPU. | |
432 | */ | |
433 | if (WARN_ON((mpidr & 0xff) >= 16)) | |
434 | goto out; | |
435 | ||
436 | tlist |= 1 << (mpidr & 0xf); | |
437 | ||
438 | cpu = cpumask_next(cpu, mask); | |
439 | if (cpu == nr_cpu_ids) | |
440 | goto out; | |
441 | ||
442 | mpidr = cpu_logical_map(cpu); | |
443 | ||
444 | if (cluster_id != (mpidr & ~0xffUL)) { | |
445 | cpu--; | |
446 | goto out; | |
447 | } | |
448 | } | |
449 | out: | |
450 | *base_cpu = cpu; | |
451 | return tlist; | |
452 | } | |
453 | ||
454 | static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) | |
455 | { | |
456 | u64 val; | |
457 | ||
458 | val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | | |
459 | MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | | |
460 | irq << 24 | | |
461 | MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | | |
462 | tlist); | |
463 | ||
464 | pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); | |
465 | gic_write_sgi1r(val); | |
466 | } | |
467 | ||
468 | static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |
469 | { | |
470 | int cpu; | |
471 | ||
472 | if (WARN_ON(irq >= 16)) | |
473 | return; | |
474 | ||
475 | /* | |
476 | * Ensure that stores to Normal memory are visible to the | |
477 | * other CPUs before issuing the IPI. | |
478 | */ | |
479 | smp_wmb(); | |
480 | ||
481 | for_each_cpu_mask(cpu, *mask) { | |
482 | u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL; | |
483 | u16 tlist; | |
484 | ||
485 | tlist = gic_compute_target_list(&cpu, mask, cluster_id); | |
486 | gic_send_sgi(cluster_id, tlist, irq); | |
487 | } | |
488 | ||
489 | /* Force the above writes to ICC_SGI1R_EL1 to be executed */ | |
490 | isb(); | |
491 | } | |
492 | ||
493 | static void gic_smp_init(void) | |
494 | { | |
495 | set_smp_cross_call(gic_raise_softirq); | |
496 | register_cpu_notifier(&gic_cpu_notifier); | |
497 | } | |
498 | ||
499 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |
500 | bool force) | |
501 | { | |
502 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | |
503 | void __iomem *reg; | |
504 | int enabled; | |
505 | u64 val; | |
506 | ||
507 | if (gic_irq_in_rdist(d)) | |
508 | return -EINVAL; | |
509 | ||
510 | /* If interrupt was enabled, disable it first */ | |
511 | enabled = gic_peek_irq(d, GICD_ISENABLER); | |
512 | if (enabled) | |
513 | gic_mask_irq(d); | |
514 | ||
515 | reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); | |
516 | val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); | |
517 | ||
518 | writeq_relaxed(val, reg); | |
519 | ||
520 | /* | |
521 | * If the interrupt was enabled, enabled it again. Otherwise, | |
522 | * just wait for the distributor to have digested our changes. | |
523 | */ | |
524 | if (enabled) | |
525 | gic_unmask_irq(d); | |
526 | else | |
527 | gic_dist_wait_for_rwp(); | |
528 | ||
529 | return IRQ_SET_MASK_OK; | |
530 | } | |
531 | #else | |
532 | #define gic_set_affinity NULL | |
533 | #define gic_smp_init() do { } while(0) | |
534 | #endif | |
535 | ||
536 | static struct irq_chip gic_chip = { | |
537 | .name = "GICv3", | |
538 | .irq_mask = gic_mask_irq, | |
539 | .irq_unmask = gic_unmask_irq, | |
540 | .irq_eoi = gic_eoi_irq, | |
541 | .irq_set_type = gic_set_type, | |
542 | .irq_set_affinity = gic_set_affinity, | |
543 | }; | |
544 | ||
545 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, | |
546 | irq_hw_number_t hw) | |
547 | { | |
548 | /* SGIs are private to the core kernel */ | |
549 | if (hw < 16) | |
550 | return -EPERM; | |
551 | /* PPIs */ | |
552 | if (hw < 32) { | |
553 | irq_set_percpu_devid(irq); | |
554 | irq_set_chip_and_handler(irq, &gic_chip, | |
555 | handle_percpu_devid_irq); | |
556 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | |
557 | } | |
558 | /* SPIs */ | |
559 | if (hw >= 32 && hw < gic_data.irq_nr) { | |
560 | irq_set_chip_and_handler(irq, &gic_chip, | |
561 | handle_fasteoi_irq); | |
562 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | |
563 | } | |
564 | irq_set_chip_data(irq, d->host_data); | |
565 | return 0; | |
566 | } | |
567 | ||
568 | static int gic_irq_domain_xlate(struct irq_domain *d, | |
569 | struct device_node *controller, | |
570 | const u32 *intspec, unsigned int intsize, | |
571 | unsigned long *out_hwirq, unsigned int *out_type) | |
572 | { | |
573 | if (d->of_node != controller) | |
574 | return -EINVAL; | |
575 | if (intsize < 3) | |
576 | return -EINVAL; | |
577 | ||
578 | switch(intspec[0]) { | |
579 | case 0: /* SPI */ | |
580 | *out_hwirq = intspec[1] + 32; | |
581 | break; | |
582 | case 1: /* PPI */ | |
583 | *out_hwirq = intspec[1] + 16; | |
584 | break; | |
585 | default: | |
586 | return -EINVAL; | |
587 | } | |
588 | ||
589 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | |
590 | return 0; | |
591 | } | |
592 | ||
593 | static const struct irq_domain_ops gic_irq_domain_ops = { | |
594 | .map = gic_irq_domain_map, | |
595 | .xlate = gic_irq_domain_xlate, | |
596 | }; | |
597 | ||
598 | static int __init gic_of_init(struct device_node *node, struct device_node *parent) | |
599 | { | |
600 | void __iomem *dist_base; | |
601 | void __iomem **redist_base; | |
602 | u64 redist_stride; | |
603 | u32 redist_regions; | |
604 | u32 reg; | |
605 | int gic_irqs; | |
606 | int err; | |
607 | int i; | |
608 | ||
609 | dist_base = of_iomap(node, 0); | |
610 | if (!dist_base) { | |
611 | pr_err("%s: unable to map gic dist registers\n", | |
612 | node->full_name); | |
613 | return -ENXIO; | |
614 | } | |
615 | ||
616 | reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; | |
617 | if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) { | |
618 | pr_err("%s: no distributor detected, giving up\n", | |
619 | node->full_name); | |
620 | err = -ENODEV; | |
621 | goto out_unmap_dist; | |
622 | } | |
623 | ||
624 | if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) | |
625 | redist_regions = 1; | |
626 | ||
627 | redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); | |
628 | if (!redist_base) { | |
629 | err = -ENOMEM; | |
630 | goto out_unmap_dist; | |
631 | } | |
632 | ||
633 | for (i = 0; i < redist_regions; i++) { | |
634 | redist_base[i] = of_iomap(node, 1 + i); | |
635 | if (!redist_base[i]) { | |
636 | pr_err("%s: couldn't map region %d\n", | |
637 | node->full_name, i); | |
638 | err = -ENODEV; | |
639 | goto out_unmap_rdist; | |
640 | } | |
641 | } | |
642 | ||
643 | if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) | |
644 | redist_stride = 0; | |
645 | ||
646 | gic_data.dist_base = dist_base; | |
647 | gic_data.redist_base = redist_base; | |
648 | gic_data.redist_regions = redist_regions; | |
649 | gic_data.redist_stride = redist_stride; | |
650 | ||
651 | /* | |
652 | * Find out how many interrupts are supported. | |
653 | * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) | |
654 | */ | |
655 | gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; | |
656 | gic_irqs = (gic_irqs + 1) * 32; | |
657 | if (gic_irqs > 1020) | |
658 | gic_irqs = 1020; | |
659 | gic_data.irq_nr = gic_irqs; | |
660 | ||
661 | gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, | |
662 | &gic_data); | |
663 | gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); | |
664 | ||
665 | if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { | |
666 | err = -ENOMEM; | |
667 | goto out_free; | |
668 | } | |
669 | ||
670 | set_handle_irq(gic_handle_irq); | |
671 | ||
672 | gic_smp_init(); | |
673 | gic_dist_init(); | |
674 | gic_cpu_init(); | |
675 | ||
676 | return 0; | |
677 | ||
678 | out_free: | |
679 | if (gic_data.domain) | |
680 | irq_domain_remove(gic_data.domain); | |
681 | free_percpu(gic_data.rdist); | |
682 | out_unmap_rdist: | |
683 | for (i = 0; i < redist_regions; i++) | |
684 | if (redist_base[i]) | |
685 | iounmap(redist_base[i]); | |
686 | kfree(redist_base); | |
687 | out_unmap_dist: | |
688 | iounmap(dist_base); | |
689 | return err; | |
690 | } | |
691 | ||
692 | IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); |