]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/irqchip/irq-gic-v3.c
irqchip: GICv3: ITS: DT probing and initialization
[mirror_ubuntu-artful-kernel.git] / drivers / irqchip / irq-gic-v3.c
CommitLineData
021f6537
MZ
1/*
2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
3708d52f 19#include <linux/cpu_pm.h>
021f6537
MZ
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/percpu.h>
26#include <linux/slab.h>
27
28#include <linux/irqchip/arm-gic-v3.h>
29
30#include <asm/cputype.h>
31#include <asm/exception.h>
32#include <asm/smp_plat.h>
33
34#include "irq-gic-common.h"
35#include "irqchip.h"
36
f5c1434c
MZ
37struct redist_region {
38 void __iomem *redist_base;
39 phys_addr_t phys_base;
40};
41
021f6537
MZ
42struct gic_chip_data {
43 void __iomem *dist_base;
f5c1434c
MZ
44 struct redist_region *redist_regions;
45 struct rdists rdists;
021f6537
MZ
46 struct irq_domain *domain;
47 u64 redist_stride;
f5c1434c 48 u32 nr_redist_regions;
021f6537
MZ
49 unsigned int irq_nr;
50};
51
52static struct gic_chip_data gic_data __read_mostly;
53
f5c1434c
MZ
54#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
55#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
021f6537
MZ
56#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
57
58/* Our default, arbitrary priority value. Linux only uses one anyway. */
59#define DEFAULT_PMR_VALUE 0xf0
60
61static inline unsigned int gic_irq(struct irq_data *d)
62{
63 return d->hwirq;
64}
65
66static inline int gic_irq_in_rdist(struct irq_data *d)
67{
68 return gic_irq(d) < 32;
69}
70
71static inline void __iomem *gic_dist_base(struct irq_data *d)
72{
73 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
74 return gic_data_rdist_sgi_base();
75
76 if (d->hwirq <= 1023) /* SPI -> dist_base */
77 return gic_data.dist_base;
78
79 if (d->hwirq >= 8192)
80 BUG(); /* LPI Detected!!! */
81
82 return NULL;
83}
84
85static void gic_do_wait_for_rwp(void __iomem *base)
86{
87 u32 count = 1000000; /* 1s! */
88
89 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
90 count--;
91 if (!count) {
92 pr_err_ratelimited("RWP timeout, gone fishing\n");
93 return;
94 }
95 cpu_relax();
96 udelay(1);
97 };
98}
99
100/* Wait for completion of a distributor change */
101static void gic_dist_wait_for_rwp(void)
102{
103 gic_do_wait_for_rwp(gic_data.dist_base);
104}
105
106/* Wait for completion of a redistributor change */
107static void gic_redist_wait_for_rwp(void)
108{
109 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
110}
111
112/* Low level accessors */
c44e9d77 113static u64 __maybe_unused gic_read_iar(void)
021f6537
MZ
114{
115 u64 irqstat;
116
72c58395 117 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
021f6537
MZ
118 return irqstat;
119}
120
c44e9d77 121static void __maybe_unused gic_write_pmr(u64 val)
021f6537 122{
72c58395 123 asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
021f6537
MZ
124}
125
c44e9d77 126static void __maybe_unused gic_write_ctlr(u64 val)
021f6537 127{
72c58395 128 asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
021f6537
MZ
129 isb();
130}
131
c44e9d77 132static void __maybe_unused gic_write_grpen1(u64 val)
021f6537 133{
72c58395 134 asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
021f6537
MZ
135 isb();
136}
137
c44e9d77 138static void __maybe_unused gic_write_sgi1r(u64 val)
021f6537 139{
72c58395 140 asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
021f6537
MZ
141}
142
143static void gic_enable_sre(void)
144{
145 u64 val;
146
72c58395 147 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
021f6537 148 val |= ICC_SRE_EL1_SRE;
72c58395 149 asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
021f6537
MZ
150 isb();
151
152 /*
153 * Need to check that the SRE bit has actually been set. If
154 * not, it means that SRE is disabled at EL2. We're going to
155 * die painfully, and there is nothing we can do about it.
156 *
157 * Kindly inform the luser.
158 */
72c58395 159 asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
021f6537
MZ
160 if (!(val & ICC_SRE_EL1_SRE))
161 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
162}
163
a2c22510 164static void gic_enable_redist(bool enable)
021f6537
MZ
165{
166 void __iomem *rbase;
167 u32 count = 1000000; /* 1s! */
168 u32 val;
169
170 rbase = gic_data_rdist_rd_base();
171
021f6537 172 val = readl_relaxed(rbase + GICR_WAKER);
a2c22510
SH
173 if (enable)
174 /* Wake up this CPU redistributor */
175 val &= ~GICR_WAKER_ProcessorSleep;
176 else
177 val |= GICR_WAKER_ProcessorSleep;
021f6537
MZ
178 writel_relaxed(val, rbase + GICR_WAKER);
179
a2c22510
SH
180 if (!enable) { /* Check that GICR_WAKER is writeable */
181 val = readl_relaxed(rbase + GICR_WAKER);
182 if (!(val & GICR_WAKER_ProcessorSleep))
183 return; /* No PM support in this redistributor */
184 }
185
186 while (count--) {
187 val = readl_relaxed(rbase + GICR_WAKER);
188 if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
189 break;
021f6537
MZ
190 cpu_relax();
191 udelay(1);
192 };
a2c22510
SH
193 if (!count)
194 pr_err_ratelimited("redistributor failed to %s...\n",
195 enable ? "wakeup" : "sleep");
021f6537
MZ
196}
197
198/*
199 * Routines to disable, enable, EOI and route interrupts
200 */
201static void gic_poke_irq(struct irq_data *d, u32 offset)
202{
203 u32 mask = 1 << (gic_irq(d) % 32);
204 void (*rwp_wait)(void);
205 void __iomem *base;
206
207 if (gic_irq_in_rdist(d)) {
208 base = gic_data_rdist_sgi_base();
209 rwp_wait = gic_redist_wait_for_rwp;
210 } else {
211 base = gic_data.dist_base;
212 rwp_wait = gic_dist_wait_for_rwp;
213 }
214
215 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
216 rwp_wait();
217}
218
021f6537
MZ
219static void gic_mask_irq(struct irq_data *d)
220{
221 gic_poke_irq(d, GICD_ICENABLER);
222}
223
224static void gic_unmask_irq(struct irq_data *d)
225{
226 gic_poke_irq(d, GICD_ISENABLER);
227}
228
229static void gic_eoi_irq(struct irq_data *d)
230{
231 gic_write_eoir(gic_irq(d));
232}
233
234static int gic_set_type(struct irq_data *d, unsigned int type)
235{
236 unsigned int irq = gic_irq(d);
237 void (*rwp_wait)(void);
238 void __iomem *base;
239
240 /* Interrupt configuration for SGIs can't be changed */
241 if (irq < 16)
242 return -EINVAL;
243
244 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
245 return -EINVAL;
246
247 if (gic_irq_in_rdist(d)) {
248 base = gic_data_rdist_sgi_base();
249 rwp_wait = gic_redist_wait_for_rwp;
250 } else {
251 base = gic_data.dist_base;
252 rwp_wait = gic_dist_wait_for_rwp;
253 }
254
255 gic_configure_irq(irq, type, base, rwp_wait);
256
257 return 0;
258}
259
260static u64 gic_mpidr_to_affinity(u64 mpidr)
261{
262 u64 aff;
263
264 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
265 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
266 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
267 MPIDR_AFFINITY_LEVEL(mpidr, 0));
268
269 return aff;
270}
271
272static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
273{
274 u64 irqnr;
275
276 do {
277 irqnr = gic_read_iar();
278
279 if (likely(irqnr > 15 && irqnr < 1020)) {
ebc6de00
MZ
280 int err;
281 err = handle_domain_irq(gic_data.domain, irqnr, regs);
282 if (err) {
283 WARN_ONCE(true, "Unexpected SPI received!\n");
284 gic_write_eoir(irqnr);
021f6537 285 }
ebc6de00 286 continue;
021f6537
MZ
287 }
288 if (irqnr < 16) {
289 gic_write_eoir(irqnr);
290#ifdef CONFIG_SMP
291 handle_IPI(irqnr, regs);
292#else
293 WARN_ONCE(true, "Unexpected SGI received!\n");
294#endif
295 continue;
296 }
297 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
298}
299
300static void __init gic_dist_init(void)
301{
302 unsigned int i;
303 u64 affinity;
304 void __iomem *base = gic_data.dist_base;
305
306 /* Disable the distributor */
307 writel_relaxed(0, base + GICD_CTLR);
308 gic_dist_wait_for_rwp();
309
310 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
311
312 /* Enable distributor with ARE, Group1 */
313 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
314 base + GICD_CTLR);
315
316 /*
317 * Set all global interrupts to the boot CPU only. ARE must be
318 * enabled.
319 */
320 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
321 for (i = 32; i < gic_data.irq_nr; i++)
322 writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
323}
324
325static int gic_populate_rdist(void)
326{
327 u64 mpidr = cpu_logical_map(smp_processor_id());
328 u64 typer;
329 u32 aff;
330 int i;
331
332 /*
333 * Convert affinity to a 32bit value that can be matched to
334 * GICR_TYPER bits [63:32].
335 */
336 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
337 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
338 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
339 MPIDR_AFFINITY_LEVEL(mpidr, 0));
340
f5c1434c
MZ
341 for (i = 0; i < gic_data.nr_redist_regions; i++) {
342 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
021f6537
MZ
343 u32 reg;
344
345 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
346 if (reg != GIC_PIDR2_ARCH_GICv3 &&
347 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
348 pr_warn("No redistributor present @%p\n", ptr);
349 break;
350 }
351
352 do {
353 typer = readq_relaxed(ptr + GICR_TYPER);
354 if ((typer >> 32) == aff) {
f5c1434c 355 u64 offset = ptr - gic_data.redist_regions[i].redist_base;
021f6537 356 gic_data_rdist_rd_base() = ptr;
f5c1434c
MZ
357 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
358 pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
021f6537 359 smp_processor_id(),
f5c1434c
MZ
360 (unsigned long long)mpidr,
361 i, &gic_data_rdist()->phys_base);
021f6537
MZ
362 return 0;
363 }
364
365 if (gic_data.redist_stride) {
366 ptr += gic_data.redist_stride;
367 } else {
368 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
369 if (typer & GICR_TYPER_VLPIS)
370 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
371 }
372 } while (!(typer & GICR_TYPER_LAST));
373 }
374
375 /* We couldn't even deal with ourselves... */
376 WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
377 smp_processor_id(), (unsigned long long)mpidr);
378 return -ENODEV;
379}
380
3708d52f
SH
381static void gic_cpu_sys_reg_init(void)
382{
383 /* Enable system registers */
384 gic_enable_sre();
385
386 /* Set priority mask register */
387 gic_write_pmr(DEFAULT_PMR_VALUE);
388
389 /* EOI deactivates interrupt too (mode 0) */
390 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
391
392 /* ... and let's hit the road... */
393 gic_write_grpen1(1);
394}
395
021f6537
MZ
396static void gic_cpu_init(void)
397{
398 void __iomem *rbase;
399
400 /* Register ourselves with the rest of the world */
401 if (gic_populate_rdist())
402 return;
403
a2c22510 404 gic_enable_redist(true);
021f6537
MZ
405
406 rbase = gic_data_rdist_sgi_base();
407
408 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
409
3708d52f
SH
410 /* initialise system registers */
411 gic_cpu_sys_reg_init();
021f6537
MZ
412}
413
414#ifdef CONFIG_SMP
ddc86821
MB
415static int gic_peek_irq(struct irq_data *d, u32 offset)
416{
417 u32 mask = 1 << (gic_irq(d) % 32);
418 void __iomem *base;
419
420 if (gic_irq_in_rdist(d))
421 base = gic_data_rdist_sgi_base();
422 else
423 base = gic_data.dist_base;
424
425 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
426}
427
021f6537
MZ
428static int gic_secondary_init(struct notifier_block *nfb,
429 unsigned long action, void *hcpu)
430{
431 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
432 gic_cpu_init();
433 return NOTIFY_OK;
434}
435
436/*
437 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
438 * priority because the GIC needs to be up before the ARM generic timers.
439 */
440static struct notifier_block gic_cpu_notifier = {
441 .notifier_call = gic_secondary_init,
442 .priority = 100,
443};
444
445static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
446 u64 cluster_id)
447{
448 int cpu = *base_cpu;
449 u64 mpidr = cpu_logical_map(cpu);
450 u16 tlist = 0;
451
452 while (cpu < nr_cpu_ids) {
453 /*
454 * If we ever get a cluster of more than 16 CPUs, just
455 * scream and skip that CPU.
456 */
457 if (WARN_ON((mpidr & 0xff) >= 16))
458 goto out;
459
460 tlist |= 1 << (mpidr & 0xf);
461
462 cpu = cpumask_next(cpu, mask);
463 if (cpu == nr_cpu_ids)
464 goto out;
465
466 mpidr = cpu_logical_map(cpu);
467
468 if (cluster_id != (mpidr & ~0xffUL)) {
469 cpu--;
470 goto out;
471 }
472 }
473out:
474 *base_cpu = cpu;
475 return tlist;
476}
477
478static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
479{
480 u64 val;
481
482 val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
483 MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
484 irq << 24 |
485 MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
486 tlist);
487
488 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
489 gic_write_sgi1r(val);
490}
491
492static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
493{
494 int cpu;
495
496 if (WARN_ON(irq >= 16))
497 return;
498
499 /*
500 * Ensure that stores to Normal memory are visible to the
501 * other CPUs before issuing the IPI.
502 */
503 smp_wmb();
504
505 for_each_cpu_mask(cpu, *mask) {
506 u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
507 u16 tlist;
508
509 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
510 gic_send_sgi(cluster_id, tlist, irq);
511 }
512
513 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
514 isb();
515}
516
517static void gic_smp_init(void)
518{
519 set_smp_cross_call(gic_raise_softirq);
520 register_cpu_notifier(&gic_cpu_notifier);
521}
522
523static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
524 bool force)
525{
526 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
527 void __iomem *reg;
528 int enabled;
529 u64 val;
530
531 if (gic_irq_in_rdist(d))
532 return -EINVAL;
533
534 /* If interrupt was enabled, disable it first */
535 enabled = gic_peek_irq(d, GICD_ISENABLER);
536 if (enabled)
537 gic_mask_irq(d);
538
539 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
540 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
541
542 writeq_relaxed(val, reg);
543
544 /*
545 * If the interrupt was enabled, enabled it again. Otherwise,
546 * just wait for the distributor to have digested our changes.
547 */
548 if (enabled)
549 gic_unmask_irq(d);
550 else
551 gic_dist_wait_for_rwp();
552
553 return IRQ_SET_MASK_OK;
554}
555#else
556#define gic_set_affinity NULL
557#define gic_smp_init() do { } while(0)
558#endif
559
3708d52f
SH
560#ifdef CONFIG_CPU_PM
561static int gic_cpu_pm_notifier(struct notifier_block *self,
562 unsigned long cmd, void *v)
563{
564 if (cmd == CPU_PM_EXIT) {
565 gic_enable_redist(true);
566 gic_cpu_sys_reg_init();
567 } else if (cmd == CPU_PM_ENTER) {
568 gic_write_grpen1(0);
569 gic_enable_redist(false);
570 }
571 return NOTIFY_OK;
572}
573
574static struct notifier_block gic_cpu_pm_notifier_block = {
575 .notifier_call = gic_cpu_pm_notifier,
576};
577
578static void gic_cpu_pm_init(void)
579{
580 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
581}
582
583#else
584static inline void gic_cpu_pm_init(void) { }
585#endif /* CONFIG_CPU_PM */
586
021f6537
MZ
587static struct irq_chip gic_chip = {
588 .name = "GICv3",
589 .irq_mask = gic_mask_irq,
590 .irq_unmask = gic_unmask_irq,
591 .irq_eoi = gic_eoi_irq,
592 .irq_set_type = gic_set_type,
593 .irq_set_affinity = gic_set_affinity,
594};
595
596static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
597 irq_hw_number_t hw)
598{
599 /* SGIs are private to the core kernel */
600 if (hw < 16)
601 return -EPERM;
602 /* PPIs */
603 if (hw < 32) {
604 irq_set_percpu_devid(irq);
443acc4f
MZ
605 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
606 handle_percpu_devid_irq, NULL, NULL);
021f6537
MZ
607 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
608 }
609 /* SPIs */
610 if (hw >= 32 && hw < gic_data.irq_nr) {
443acc4f
MZ
611 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
612 handle_fasteoi_irq, NULL, NULL);
021f6537
MZ
613 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
614 }
615 irq_set_chip_data(irq, d->host_data);
616 return 0;
617}
618
619static int gic_irq_domain_xlate(struct irq_domain *d,
620 struct device_node *controller,
621 const u32 *intspec, unsigned int intsize,
622 unsigned long *out_hwirq, unsigned int *out_type)
623{
624 if (d->of_node != controller)
625 return -EINVAL;
626 if (intsize < 3)
627 return -EINVAL;
628
629 switch(intspec[0]) {
630 case 0: /* SPI */
631 *out_hwirq = intspec[1] + 32;
632 break;
633 case 1: /* PPI */
634 *out_hwirq = intspec[1] + 16;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
641 return 0;
642}
643
443acc4f
MZ
644static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
645 unsigned int nr_irqs, void *arg)
646{
647 int i, ret;
648 irq_hw_number_t hwirq;
649 unsigned int type = IRQ_TYPE_NONE;
650 struct of_phandle_args *irq_data = arg;
651
652 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
653 irq_data->args_count, &hwirq, &type);
654 if (ret)
655 return ret;
656
657 for (i = 0; i < nr_irqs; i++)
658 gic_irq_domain_map(domain, virq + i, hwirq + i);
659
660 return 0;
661}
662
663static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
664 unsigned int nr_irqs)
665{
666 int i;
667
668 for (i = 0; i < nr_irqs; i++) {
669 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
670 irq_set_handler(virq + i, NULL);
671 irq_domain_reset_irq_data(d);
672 }
673}
674
021f6537 675static const struct irq_domain_ops gic_irq_domain_ops = {
021f6537 676 .xlate = gic_irq_domain_xlate,
443acc4f
MZ
677 .alloc = gic_irq_domain_alloc,
678 .free = gic_irq_domain_free,
021f6537
MZ
679};
680
681static int __init gic_of_init(struct device_node *node, struct device_node *parent)
682{
683 void __iomem *dist_base;
f5c1434c 684 struct redist_region *rdist_regs;
021f6537 685 u64 redist_stride;
f5c1434c
MZ
686 u32 nr_redist_regions;
687 u32 typer;
021f6537
MZ
688 u32 reg;
689 int gic_irqs;
690 int err;
691 int i;
692
693 dist_base = of_iomap(node, 0);
694 if (!dist_base) {
695 pr_err("%s: unable to map gic dist registers\n",
696 node->full_name);
697 return -ENXIO;
698 }
699
700 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
701 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
702 pr_err("%s: no distributor detected, giving up\n",
703 node->full_name);
704 err = -ENODEV;
705 goto out_unmap_dist;
706 }
707
f5c1434c
MZ
708 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
709 nr_redist_regions = 1;
021f6537 710
f5c1434c
MZ
711 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
712 if (!rdist_regs) {
021f6537
MZ
713 err = -ENOMEM;
714 goto out_unmap_dist;
715 }
716
f5c1434c
MZ
717 for (i = 0; i < nr_redist_regions; i++) {
718 struct resource res;
719 int ret;
720
721 ret = of_address_to_resource(node, 1 + i, &res);
722 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
723 if (ret || !rdist_regs[i].redist_base) {
021f6537
MZ
724 pr_err("%s: couldn't map region %d\n",
725 node->full_name, i);
726 err = -ENODEV;
727 goto out_unmap_rdist;
728 }
f5c1434c 729 rdist_regs[i].phys_base = res.start;
021f6537
MZ
730 }
731
732 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
733 redist_stride = 0;
734
735 gic_data.dist_base = dist_base;
f5c1434c
MZ
736 gic_data.redist_regions = rdist_regs;
737 gic_data.nr_redist_regions = nr_redist_regions;
021f6537
MZ
738 gic_data.redist_stride = redist_stride;
739
740 /*
741 * Find out how many interrupts are supported.
742 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
743 */
f5c1434c
MZ
744 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
745 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
746 gic_irqs = GICD_TYPER_IRQS(typer);
021f6537
MZ
747 if (gic_irqs > 1020)
748 gic_irqs = 1020;
749 gic_data.irq_nr = gic_irqs;
750
751 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
752 &gic_data);
f5c1434c 753 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
021f6537 754
f5c1434c 755 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
021f6537
MZ
756 err = -ENOMEM;
757 goto out_free;
758 }
759
760 set_handle_irq(gic_handle_irq);
761
762 gic_smp_init();
763 gic_dist_init();
764 gic_cpu_init();
3708d52f 765 gic_cpu_pm_init();
021f6537
MZ
766
767 return 0;
768
769out_free:
770 if (gic_data.domain)
771 irq_domain_remove(gic_data.domain);
f5c1434c 772 free_percpu(gic_data.rdists.rdist);
021f6537 773out_unmap_rdist:
f5c1434c
MZ
774 for (i = 0; i < nr_redist_regions; i++)
775 if (rdist_regs[i].redist_base)
776 iounmap(rdist_regs[i].redist_base);
777 kfree(rdist_regs);
021f6537
MZ
778out_unmap_dist:
779 iounmap(dist_base);
780 return err;
781}
782
783IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);