2 * Marvell Armada 370 and Armada XP SoC IRQ handling
4 * Copyright (C) 2012 Marvell
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/irqdomain.h>
25 #include <asm/mach/arch.h>
26 #include <asm/exception.h>
27 #include <asm/smp_plat.h>
28 #include <asm/hardware/cache-l2x0.h>
30 /* Interrupt Controller Registers Map */
31 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
32 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
34 #define ARMADA_370_XP_INT_CONTROL (0x00)
35 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
36 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
37 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
39 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
41 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
42 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
43 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
45 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
47 #define ACTIVE_DOORBELLS (8)
49 static DEFINE_RAW_SPINLOCK(irq_controller_lock
);
51 static void __iomem
*per_cpu_int_base
;
52 static void __iomem
*main_int_base
;
53 static struct irq_domain
*armada_370_xp_mpic_domain
;
57 * For shared global interrupts, mask/unmask global enable bit
58 * For CPU interrtups, mask/unmask the calling CPU's bit
60 static void armada_370_xp_irq_mask(struct irq_data
*d
)
63 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
65 if (hwirq
> ARMADA_370_XP_MAX_PER_CPU_IRQS
)
66 writel(hwirq
, main_int_base
+
67 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS
);
69 writel(hwirq
, per_cpu_int_base
+
70 ARMADA_370_XP_INT_SET_MASK_OFFS
);
72 writel(irqd_to_hwirq(d
),
73 per_cpu_int_base
+ ARMADA_370_XP_INT_SET_MASK_OFFS
);
77 static void armada_370_xp_irq_unmask(struct irq_data
*d
)
80 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
82 if (hwirq
> ARMADA_370_XP_MAX_PER_CPU_IRQS
)
83 writel(hwirq
, main_int_base
+
84 ARMADA_370_XP_INT_SET_ENABLE_OFFS
);
86 writel(hwirq
, per_cpu_int_base
+
87 ARMADA_370_XP_INT_CLEAR_MASK_OFFS
);
89 writel(irqd_to_hwirq(d
),
90 per_cpu_int_base
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS
);
95 static int armada_xp_set_affinity(struct irq_data
*d
,
96 const struct cpumask
*mask_val
, bool force
)
99 unsigned long new_mask
= 0;
100 unsigned long online_mask
= 0;
101 unsigned long count
= 0;
102 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
105 for_each_cpu(cpu
, mask_val
) {
106 new_mask
|= 1 << cpu_logical_map(cpu
);
111 * Forbid mutlicore interrupt affinity
112 * This is required since the MPIC HW doesn't limit
113 * several CPUs from acknowledging the same interrupt.
118 for_each_cpu(cpu
, cpu_online_mask
)
119 online_mask
|= 1 << cpu_logical_map(cpu
);
121 raw_spin_lock(&irq_controller_lock
);
123 reg
= readl(main_int_base
+ ARMADA_370_XP_INT_SOURCE_CTL(hwirq
));
124 reg
= (reg
& (~online_mask
)) | new_mask
;
125 writel(reg
, main_int_base
+ ARMADA_370_XP_INT_SOURCE_CTL(hwirq
));
127 raw_spin_unlock(&irq_controller_lock
);
133 static struct irq_chip armada_370_xp_irq_chip
= {
134 .name
= "armada_370_xp_irq",
135 .irq_mask
= armada_370_xp_irq_mask
,
136 .irq_mask_ack
= armada_370_xp_irq_mask
,
137 .irq_unmask
= armada_370_xp_irq_unmask
,
139 .irq_set_affinity
= armada_xp_set_affinity
,
143 static int armada_370_xp_mpic_irq_map(struct irq_domain
*h
,
144 unsigned int virq
, irq_hw_number_t hw
)
146 armada_370_xp_irq_mask(irq_get_irq_data(virq
));
147 writel(hw
, main_int_base
+ ARMADA_370_XP_INT_SET_ENABLE_OFFS
);
148 irq_set_status_flags(virq
, IRQ_LEVEL
);
150 if (hw
< ARMADA_370_XP_MAX_PER_CPU_IRQS
) {
151 irq_set_percpu_devid(virq
);
152 irq_set_chip_and_handler(virq
, &armada_370_xp_irq_chip
,
153 handle_percpu_devid_irq
);
156 irq_set_chip_and_handler(virq
, &armada_370_xp_irq_chip
,
159 set_irq_flags(virq
, IRQF_VALID
| IRQF_PROBE
);
165 void armada_mpic_send_doorbell(const struct cpumask
*mask
, unsigned int irq
)
168 unsigned long map
= 0;
170 /* Convert our logical CPU mask into a physical one. */
171 for_each_cpu(cpu
, mask
)
172 map
|= 1 << cpu_logical_map(cpu
);
175 * Ensure that stores to Normal memory are visible to the
176 * other CPUs before issuing the IPI.
181 writel((map
<< 8) | irq
, main_int_base
+
182 ARMADA_370_XP_SW_TRIG_INT_OFFS
);
185 void armada_xp_mpic_smp_cpu_init(void)
187 /* Clear pending IPIs */
188 writel(0, per_cpu_int_base
+ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
);
190 /* Enable first 8 IPIs */
191 writel((1 << ACTIVE_DOORBELLS
) - 1, per_cpu_int_base
+
192 ARMADA_370_XP_IN_DRBEL_MSK_OFFS
);
194 /* Unmask IPI interrupt */
195 writel(0, per_cpu_int_base
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS
);
197 #endif /* CONFIG_SMP */
199 static struct irq_domain_ops armada_370_xp_mpic_irq_ops
= {
200 .map
= armada_370_xp_mpic_irq_map
,
201 .xlate
= irq_domain_xlate_onecell
,
204 static int __init
armada_370_xp_mpic_of_init(struct device_node
*node
,
205 struct device_node
*parent
)
209 main_int_base
= of_iomap(node
, 0);
210 per_cpu_int_base
= of_iomap(node
, 1);
212 BUG_ON(!main_int_base
);
213 BUG_ON(!per_cpu_int_base
);
215 control
= readl(main_int_base
+ ARMADA_370_XP_INT_CONTROL
);
217 armada_370_xp_mpic_domain
=
218 irq_domain_add_linear(node
, (control
>> 2) & 0x3ff,
219 &armada_370_xp_mpic_irq_ops
, NULL
);
221 if (!armada_370_xp_mpic_domain
)
222 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
224 irq_set_default_host(armada_370_xp_mpic_domain
);
227 armada_xp_mpic_smp_cpu_init();
230 * Set the default affinity from all CPUs to the boot cpu.
231 * This is required since the MPIC doesn't limit several CPUs
232 * from acknowledging the same interrupt.
234 cpumask_clear(irq_default_affinity
);
235 cpumask_set_cpu(smp_processor_id(), irq_default_affinity
);
242 asmlinkage
void __exception_irq_entry
armada_370_xp_handle_irq(struct pt_regs
248 irqstat
= readl_relaxed(per_cpu_int_base
+
249 ARMADA_370_XP_CPU_INTACK_OFFS
);
250 irqnr
= irqstat
& 0x3FF;
256 irqnr
= irq_find_mapping(armada_370_xp_mpic_domain
,
258 handle_IRQ(irqnr
, regs
);
266 ipimask
= readl_relaxed(per_cpu_int_base
+
267 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
)
270 writel(0x0, per_cpu_int_base
+
271 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS
);
273 /* Handle all pending doorbells */
274 for (ipinr
= 0; ipinr
< ACTIVE_DOORBELLS
; ipinr
++) {
275 if (ipimask
& (0x1 << ipinr
))
276 handle_IPI(ipinr
, regs
);
285 static const struct of_device_id mpic_of_match
[] __initconst
= {
286 {.compatible
= "marvell,mpic", .data
= armada_370_xp_mpic_of_init
},
290 void __init
armada_370_xp_init_irq(void)
292 of_irq_init(mpic_of_match
);
293 #ifdef CONFIG_CACHE_L2X0
294 l2x0_of_init(0, ~0UL);