]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm/common/gic.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[mirror_ubuntu-bionic-kernel.git] / arch / arm / common / gic.c
1 /*
2 * linux/arch/arm/common/gic.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Interrupt architecture for the GIC:
11 *
12 * o There is one Interrupt Distributor, which receives interrupts
13 * from system devices and sends them to the Interrupt Controllers.
14 *
15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
20 *
21 * Note that IRQs 0-31 are special - they are local to each CPU.
22 * As such, the enable set/clear, pending set/clear and active bit
23 * registers are banked per-cpu for these sources.
24 */
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/list.h>
30 #include <linux/smp.h>
31 #include <linux/cpu_pm.h>
32 #include <linux/cpumask.h>
33 #include <linux/io.h>
34 #include <linux/of.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/irqdomain.h>
38 #include <linux/interrupt.h>
39 #include <linux/percpu.h>
40 #include <linux/slab.h>
41
42 #include <asm/irq.h>
43 #include <asm/exception.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach/irq.h>
46 #include <asm/hardware/gic.h>
47
48 union gic_base {
49 void __iomem *common_base;
50 void __percpu __iomem **percpu_base;
51 };
52
53 struct gic_chip_data {
54 union gic_base dist_base;
55 union gic_base cpu_base;
56 #ifdef CONFIG_CPU_PM
57 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
58 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
59 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
60 u32 __percpu *saved_ppi_enable;
61 u32 __percpu *saved_ppi_conf;
62 #endif
63 struct irq_domain *domain;
64 unsigned int gic_irqs;
65 #ifdef CONFIG_GIC_NON_BANKED
66 void __iomem *(*get_base)(union gic_base *);
67 #endif
68 };
69
70 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
71
72 /*
73 * The GIC mapping of CPU interfaces does not necessarily match
74 * the logical CPU numbering. Let's use a mapping as returned
75 * by the GIC itself.
76 */
77 #define NR_GIC_CPU_IF 8
78 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
79
80 /*
81 * Supported arch specific GIC irq extension.
82 * Default make them NULL.
83 */
84 struct irq_chip gic_arch_extn = {
85 .irq_eoi = NULL,
86 .irq_mask = NULL,
87 .irq_unmask = NULL,
88 .irq_retrigger = NULL,
89 .irq_set_type = NULL,
90 .irq_set_wake = NULL,
91 };
92
93 #ifndef MAX_GIC_NR
94 #define MAX_GIC_NR 1
95 #endif
96
97 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
98
99 #ifdef CONFIG_GIC_NON_BANKED
100 static void __iomem *gic_get_percpu_base(union gic_base *base)
101 {
102 return *__this_cpu_ptr(base->percpu_base);
103 }
104
105 static void __iomem *gic_get_common_base(union gic_base *base)
106 {
107 return base->common_base;
108 }
109
110 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
111 {
112 return data->get_base(&data->dist_base);
113 }
114
115 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
116 {
117 return data->get_base(&data->cpu_base);
118 }
119
120 static inline void gic_set_base_accessor(struct gic_chip_data *data,
121 void __iomem *(*f)(union gic_base *))
122 {
123 data->get_base = f;
124 }
125 #else
126 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
127 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
128 #define gic_set_base_accessor(d,f)
129 #endif
130
131 static inline void __iomem *gic_dist_base(struct irq_data *d)
132 {
133 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
134 return gic_data_dist_base(gic_data);
135 }
136
137 static inline void __iomem *gic_cpu_base(struct irq_data *d)
138 {
139 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
140 return gic_data_cpu_base(gic_data);
141 }
142
143 static inline unsigned int gic_irq(struct irq_data *d)
144 {
145 return d->hwirq;
146 }
147
148 /*
149 * Routines to acknowledge, disable and enable interrupts
150 */
151 static void gic_mask_irq(struct irq_data *d)
152 {
153 u32 mask = 1 << (gic_irq(d) % 32);
154
155 raw_spin_lock(&irq_controller_lock);
156 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
157 if (gic_arch_extn.irq_mask)
158 gic_arch_extn.irq_mask(d);
159 raw_spin_unlock(&irq_controller_lock);
160 }
161
162 static void gic_unmask_irq(struct irq_data *d)
163 {
164 u32 mask = 1 << (gic_irq(d) % 32);
165
166 raw_spin_lock(&irq_controller_lock);
167 if (gic_arch_extn.irq_unmask)
168 gic_arch_extn.irq_unmask(d);
169 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
170 raw_spin_unlock(&irq_controller_lock);
171 }
172
173 static void gic_eoi_irq(struct irq_data *d)
174 {
175 if (gic_arch_extn.irq_eoi) {
176 raw_spin_lock(&irq_controller_lock);
177 gic_arch_extn.irq_eoi(d);
178 raw_spin_unlock(&irq_controller_lock);
179 }
180
181 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
182 }
183
184 static int gic_set_type(struct irq_data *d, unsigned int type)
185 {
186 void __iomem *base = gic_dist_base(d);
187 unsigned int gicirq = gic_irq(d);
188 u32 enablemask = 1 << (gicirq % 32);
189 u32 enableoff = (gicirq / 32) * 4;
190 u32 confmask = 0x2 << ((gicirq % 16) * 2);
191 u32 confoff = (gicirq / 16) * 4;
192 bool enabled = false;
193 u32 val;
194
195 /* Interrupt configuration for SGIs can't be changed */
196 if (gicirq < 16)
197 return -EINVAL;
198
199 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
200 return -EINVAL;
201
202 raw_spin_lock(&irq_controller_lock);
203
204 if (gic_arch_extn.irq_set_type)
205 gic_arch_extn.irq_set_type(d, type);
206
207 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
208 if (type == IRQ_TYPE_LEVEL_HIGH)
209 val &= ~confmask;
210 else if (type == IRQ_TYPE_EDGE_RISING)
211 val |= confmask;
212
213 /*
214 * As recommended by the spec, disable the interrupt before changing
215 * the configuration
216 */
217 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
218 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
219 enabled = true;
220 }
221
222 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
223
224 if (enabled)
225 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
226
227 raw_spin_unlock(&irq_controller_lock);
228
229 return 0;
230 }
231
232 static int gic_retrigger(struct irq_data *d)
233 {
234 if (gic_arch_extn.irq_retrigger)
235 return gic_arch_extn.irq_retrigger(d);
236
237 return -ENXIO;
238 }
239
240 #ifdef CONFIG_SMP
241 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
242 bool force)
243 {
244 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
245 unsigned int shift = (gic_irq(d) % 4) * 8;
246 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
247 u32 val, mask, bit;
248
249 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
250 return -EINVAL;
251
252 mask = 0xff << shift;
253 bit = gic_cpu_map[cpu] << shift;
254
255 raw_spin_lock(&irq_controller_lock);
256 val = readl_relaxed(reg) & ~mask;
257 writel_relaxed(val | bit, reg);
258 raw_spin_unlock(&irq_controller_lock);
259
260 return IRQ_SET_MASK_OK;
261 }
262 #endif
263
264 #ifdef CONFIG_PM
265 static int gic_set_wake(struct irq_data *d, unsigned int on)
266 {
267 int ret = -ENXIO;
268
269 if (gic_arch_extn.irq_set_wake)
270 ret = gic_arch_extn.irq_set_wake(d, on);
271
272 return ret;
273 }
274
275 #else
276 #define gic_set_wake NULL
277 #endif
278
279 asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
280 {
281 u32 irqstat, irqnr;
282 struct gic_chip_data *gic = &gic_data[0];
283 void __iomem *cpu_base = gic_data_cpu_base(gic);
284
285 do {
286 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
287 irqnr = irqstat & ~0x1c00;
288
289 if (likely(irqnr > 15 && irqnr < 1021)) {
290 irqnr = irq_find_mapping(gic->domain, irqnr);
291 handle_IRQ(irqnr, regs);
292 continue;
293 }
294 if (irqnr < 16) {
295 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
296 #ifdef CONFIG_SMP
297 handle_IPI(irqnr, regs);
298 #endif
299 continue;
300 }
301 break;
302 } while (1);
303 }
304
305 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
306 {
307 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
308 struct irq_chip *chip = irq_get_chip(irq);
309 unsigned int cascade_irq, gic_irq;
310 unsigned long status;
311
312 chained_irq_enter(chip, desc);
313
314 raw_spin_lock(&irq_controller_lock);
315 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
316 raw_spin_unlock(&irq_controller_lock);
317
318 gic_irq = (status & 0x3ff);
319 if (gic_irq == 1023)
320 goto out;
321
322 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
323 if (unlikely(gic_irq < 32 || gic_irq > 1020))
324 do_bad_IRQ(cascade_irq, desc);
325 else
326 generic_handle_irq(cascade_irq);
327
328 out:
329 chained_irq_exit(chip, desc);
330 }
331
332 static struct irq_chip gic_chip = {
333 .name = "GIC",
334 .irq_mask = gic_mask_irq,
335 .irq_unmask = gic_unmask_irq,
336 .irq_eoi = gic_eoi_irq,
337 .irq_set_type = gic_set_type,
338 .irq_retrigger = gic_retrigger,
339 #ifdef CONFIG_SMP
340 .irq_set_affinity = gic_set_affinity,
341 #endif
342 .irq_set_wake = gic_set_wake,
343 };
344
345 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
346 {
347 if (gic_nr >= MAX_GIC_NR)
348 BUG();
349 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
350 BUG();
351 irq_set_chained_handler(irq, gic_handle_cascade_irq);
352 }
353
354 static u8 gic_get_cpumask(struct gic_chip_data *gic)
355 {
356 void __iomem *base = gic_data_dist_base(gic);
357 u32 mask, i;
358
359 for (i = mask = 0; i < 32; i += 4) {
360 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
361 mask |= mask >> 16;
362 mask |= mask >> 8;
363 if (mask)
364 break;
365 }
366
367 if (!mask)
368 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
369
370 return mask;
371 }
372
373 static void __init gic_dist_init(struct gic_chip_data *gic)
374 {
375 unsigned int i;
376 u32 cpumask;
377 unsigned int gic_irqs = gic->gic_irqs;
378 void __iomem *base = gic_data_dist_base(gic);
379
380 writel_relaxed(0, base + GIC_DIST_CTRL);
381
382 /*
383 * Set all global interrupts to be level triggered, active low.
384 */
385 for (i = 32; i < gic_irqs; i += 16)
386 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
387
388 /*
389 * Set all global interrupts to this CPU only.
390 */
391 cpumask = gic_get_cpumask(gic);
392 cpumask |= cpumask << 8;
393 cpumask |= cpumask << 16;
394 for (i = 32; i < gic_irqs; i += 4)
395 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
396
397 /*
398 * Set priority on all global interrupts.
399 */
400 for (i = 32; i < gic_irqs; i += 4)
401 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
402
403 /*
404 * Disable all interrupts. Leave the PPI and SGIs alone
405 * as these enables are banked registers.
406 */
407 for (i = 32; i < gic_irqs; i += 32)
408 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
409
410 writel_relaxed(1, base + GIC_DIST_CTRL);
411 }
412
413 static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
414 {
415 void __iomem *dist_base = gic_data_dist_base(gic);
416 void __iomem *base = gic_data_cpu_base(gic);
417 unsigned int cpu_mask, cpu = smp_processor_id();
418 int i;
419
420 /*
421 * Get what the GIC says our CPU mask is.
422 */
423 BUG_ON(cpu >= NR_GIC_CPU_IF);
424 cpu_mask = gic_get_cpumask(gic);
425 gic_cpu_map[cpu] = cpu_mask;
426
427 /*
428 * Clear our mask from the other map entries in case they're
429 * still undefined.
430 */
431 for (i = 0; i < NR_GIC_CPU_IF; i++)
432 if (i != cpu)
433 gic_cpu_map[i] &= ~cpu_mask;
434
435 /*
436 * Deal with the banked PPI and SGI interrupts - disable all
437 * PPI interrupts, ensure all SGI interrupts are enabled.
438 */
439 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
440 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
441
442 /*
443 * Set priority on PPI and SGI interrupts
444 */
445 for (i = 0; i < 32; i += 4)
446 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
447
448 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
449 writel_relaxed(1, base + GIC_CPU_CTRL);
450 }
451
452 #ifdef CONFIG_CPU_PM
453 /*
454 * Saves the GIC distributor registers during suspend or idle. Must be called
455 * with interrupts disabled but before powering down the GIC. After calling
456 * this function, no interrupts will be delivered by the GIC, and another
457 * platform-specific wakeup source must be enabled.
458 */
459 static void gic_dist_save(unsigned int gic_nr)
460 {
461 unsigned int gic_irqs;
462 void __iomem *dist_base;
463 int i;
464
465 if (gic_nr >= MAX_GIC_NR)
466 BUG();
467
468 gic_irqs = gic_data[gic_nr].gic_irqs;
469 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
470
471 if (!dist_base)
472 return;
473
474 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
475 gic_data[gic_nr].saved_spi_conf[i] =
476 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
477
478 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
479 gic_data[gic_nr].saved_spi_target[i] =
480 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
481
482 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
483 gic_data[gic_nr].saved_spi_enable[i] =
484 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
485 }
486
487 /*
488 * Restores the GIC distributor registers during resume or when coming out of
489 * idle. Must be called before enabling interrupts. If a level interrupt
490 * that occured while the GIC was suspended is still present, it will be
491 * handled normally, but any edge interrupts that occured will not be seen by
492 * the GIC and need to be handled by the platform-specific wakeup source.
493 */
494 static void gic_dist_restore(unsigned int gic_nr)
495 {
496 unsigned int gic_irqs;
497 unsigned int i;
498 void __iomem *dist_base;
499
500 if (gic_nr >= MAX_GIC_NR)
501 BUG();
502
503 gic_irqs = gic_data[gic_nr].gic_irqs;
504 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
505
506 if (!dist_base)
507 return;
508
509 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
510
511 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
512 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
513 dist_base + GIC_DIST_CONFIG + i * 4);
514
515 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
516 writel_relaxed(0xa0a0a0a0,
517 dist_base + GIC_DIST_PRI + i * 4);
518
519 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
520 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
521 dist_base + GIC_DIST_TARGET + i * 4);
522
523 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
524 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
525 dist_base + GIC_DIST_ENABLE_SET + i * 4);
526
527 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
528 }
529
530 static void gic_cpu_save(unsigned int gic_nr)
531 {
532 int i;
533 u32 *ptr;
534 void __iomem *dist_base;
535 void __iomem *cpu_base;
536
537 if (gic_nr >= MAX_GIC_NR)
538 BUG();
539
540 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
541 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
542
543 if (!dist_base || !cpu_base)
544 return;
545
546 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
547 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
548 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
549
550 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
551 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
552 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
553
554 }
555
556 static void gic_cpu_restore(unsigned int gic_nr)
557 {
558 int i;
559 u32 *ptr;
560 void __iomem *dist_base;
561 void __iomem *cpu_base;
562
563 if (gic_nr >= MAX_GIC_NR)
564 BUG();
565
566 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
567 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
568
569 if (!dist_base || !cpu_base)
570 return;
571
572 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
573 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
574 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
575
576 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
577 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
578 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
579
580 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
581 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
582
583 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
584 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
585 }
586
587 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
588 {
589 int i;
590
591 for (i = 0; i < MAX_GIC_NR; i++) {
592 #ifdef CONFIG_GIC_NON_BANKED
593 /* Skip over unused GICs */
594 if (!gic_data[i].get_base)
595 continue;
596 #endif
597 switch (cmd) {
598 case CPU_PM_ENTER:
599 gic_cpu_save(i);
600 break;
601 case CPU_PM_ENTER_FAILED:
602 case CPU_PM_EXIT:
603 gic_cpu_restore(i);
604 break;
605 case CPU_CLUSTER_PM_ENTER:
606 gic_dist_save(i);
607 break;
608 case CPU_CLUSTER_PM_ENTER_FAILED:
609 case CPU_CLUSTER_PM_EXIT:
610 gic_dist_restore(i);
611 break;
612 }
613 }
614
615 return NOTIFY_OK;
616 }
617
618 static struct notifier_block gic_notifier_block = {
619 .notifier_call = gic_notifier,
620 };
621
622 static void __init gic_pm_init(struct gic_chip_data *gic)
623 {
624 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
625 sizeof(u32));
626 BUG_ON(!gic->saved_ppi_enable);
627
628 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
629 sizeof(u32));
630 BUG_ON(!gic->saved_ppi_conf);
631
632 if (gic == &gic_data[0])
633 cpu_pm_register_notifier(&gic_notifier_block);
634 }
635 #else
636 static void __init gic_pm_init(struct gic_chip_data *gic)
637 {
638 }
639 #endif
640
641 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
642 irq_hw_number_t hw)
643 {
644 if (hw < 32) {
645 irq_set_percpu_devid(irq);
646 irq_set_chip_and_handler(irq, &gic_chip,
647 handle_percpu_devid_irq);
648 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
649 } else {
650 irq_set_chip_and_handler(irq, &gic_chip,
651 handle_fasteoi_irq);
652 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
653 }
654 irq_set_chip_data(irq, d->host_data);
655 return 0;
656 }
657
658 static int gic_irq_domain_xlate(struct irq_domain *d,
659 struct device_node *controller,
660 const u32 *intspec, unsigned int intsize,
661 unsigned long *out_hwirq, unsigned int *out_type)
662 {
663 if (d->of_node != controller)
664 return -EINVAL;
665 if (intsize < 3)
666 return -EINVAL;
667
668 /* Get the interrupt number and add 16 to skip over SGIs */
669 *out_hwirq = intspec[1] + 16;
670
671 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
672 if (!intspec[0])
673 *out_hwirq += 16;
674
675 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
676 return 0;
677 }
678
679 const struct irq_domain_ops gic_irq_domain_ops = {
680 .map = gic_irq_domain_map,
681 .xlate = gic_irq_domain_xlate,
682 };
683
684 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
685 void __iomem *dist_base, void __iomem *cpu_base,
686 u32 percpu_offset, struct device_node *node)
687 {
688 irq_hw_number_t hwirq_base;
689 struct gic_chip_data *gic;
690 int gic_irqs, irq_base, i;
691
692 BUG_ON(gic_nr >= MAX_GIC_NR);
693
694 gic = &gic_data[gic_nr];
695 #ifdef CONFIG_GIC_NON_BANKED
696 if (percpu_offset) { /* Frankein-GIC without banked registers... */
697 unsigned int cpu;
698
699 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
700 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
701 if (WARN_ON(!gic->dist_base.percpu_base ||
702 !gic->cpu_base.percpu_base)) {
703 free_percpu(gic->dist_base.percpu_base);
704 free_percpu(gic->cpu_base.percpu_base);
705 return;
706 }
707
708 for_each_possible_cpu(cpu) {
709 unsigned long offset = percpu_offset * cpu_logical_map(cpu);
710 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
711 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
712 }
713
714 gic_set_base_accessor(gic, gic_get_percpu_base);
715 } else
716 #endif
717 { /* Normal, sane GIC... */
718 WARN(percpu_offset,
719 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
720 percpu_offset);
721 gic->dist_base.common_base = dist_base;
722 gic->cpu_base.common_base = cpu_base;
723 gic_set_base_accessor(gic, gic_get_common_base);
724 }
725
726 /*
727 * Initialize the CPU interface map to all CPUs.
728 * It will be refined as each CPU probes its ID.
729 */
730 for (i = 0; i < NR_GIC_CPU_IF; i++)
731 gic_cpu_map[i] = 0xff;
732
733 /*
734 * For primary GICs, skip over SGIs.
735 * For secondary GICs, skip over PPIs, too.
736 */
737 if (gic_nr == 0 && (irq_start & 31) > 0) {
738 hwirq_base = 16;
739 if (irq_start != -1)
740 irq_start = (irq_start & ~31) + 16;
741 } else {
742 hwirq_base = 32;
743 }
744
745 /*
746 * Find out how many interrupts are supported.
747 * The GIC only supports up to 1020 interrupt sources.
748 */
749 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
750 gic_irqs = (gic_irqs + 1) * 32;
751 if (gic_irqs > 1020)
752 gic_irqs = 1020;
753 gic->gic_irqs = gic_irqs;
754
755 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
756 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
757 if (IS_ERR_VALUE(irq_base)) {
758 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
759 irq_start);
760 irq_base = irq_start;
761 }
762 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
763 hwirq_base, &gic_irq_domain_ops, gic);
764 if (WARN_ON(!gic->domain))
765 return;
766
767 gic_chip.flags |= gic_arch_extn.flags;
768 gic_dist_init(gic);
769 gic_cpu_init(gic);
770 gic_pm_init(gic);
771 }
772
773 void __cpuinit gic_secondary_init(unsigned int gic_nr)
774 {
775 BUG_ON(gic_nr >= MAX_GIC_NR);
776
777 gic_cpu_init(&gic_data[gic_nr]);
778 }
779
780 #ifdef CONFIG_SMP
781 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
782 {
783 int cpu;
784 unsigned long map = 0;
785
786 /* Convert our logical CPU mask into a physical one. */
787 for_each_cpu(cpu, mask)
788 map |= gic_cpu_map[cpu];
789
790 /*
791 * Ensure that stores to Normal memory are visible to the
792 * other CPUs before issuing the IPI.
793 */
794 dsb();
795
796 /* this always happens on GIC0 */
797 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
798 }
799 #endif
800
801 #ifdef CONFIG_OF
802 static int gic_cnt __initdata = 0;
803
804 int __init gic_of_init(struct device_node *node, struct device_node *parent)
805 {
806 void __iomem *cpu_base;
807 void __iomem *dist_base;
808 u32 percpu_offset;
809 int irq;
810
811 if (WARN_ON(!node))
812 return -ENODEV;
813
814 dist_base = of_iomap(node, 0);
815 WARN(!dist_base, "unable to map gic dist registers\n");
816
817 cpu_base = of_iomap(node, 1);
818 WARN(!cpu_base, "unable to map gic cpu registers\n");
819
820 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
821 percpu_offset = 0;
822
823 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
824
825 if (parent) {
826 irq = irq_of_parse_and_map(node, 0);
827 gic_cascade_irq(gic_cnt, irq);
828 }
829 gic_cnt++;
830 return 0;
831 }
832 #endif