]>
Commit | Line | Data |
---|---|---|
ac4c244d VG |
1 | /* |
2 | * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | */ | |
9 | ||
10 | #include <linux/interrupt.h> | |
c93d8b8c | 11 | #include <linux/irqchip.h> |
03a6d28c | 12 | #include <asm/mach_desc.h> |
286130eb | 13 | #include <asm/smp.h> |
bacdf480 | 14 | |
bacdf480 VG |
15 | /* |
16 | * Late Interrupt system init called from start_kernel for Boot CPU only | |
17 | * | |
18 | * Since slab must already be initialized, platforms can start doing any | |
19 | * needed request_irq( )s | |
20 | */ | |
21 | void __init init_IRQ(void) | |
22 | { | |
4c82f286 VG |
23 | /* |
24 | * process the entire interrupt tree in one go | |
25 | * Any external intc will be setup provided DT chains them | |
26 | * properly | |
27 | */ | |
c93d8b8c VG |
28 | irqchip_init(); |
29 | ||
41195d23 | 30 | #ifdef CONFIG_SMP |
286130eb | 31 | /* a SMP H/w block could do IPI IRQ request here */ |
b474a023 NC |
32 | if (plat_smp_ops.init_per_cpu) |
33 | plat_smp_ops.init_per_cpu(smp_processor_id()); | |
286130eb | 34 | |
575a9d4e VG |
35 | if (machine_desc->init_per_cpu) |
36 | machine_desc->init_per_cpu(smp_processor_id()); | |
41195d23 | 37 | #endif |
bacdf480 VG |
38 | } |
39 | ||
40 | /* | |
41 | * "C" Entry point for any ARC ISR, called from low level vector handler | |
42 | * @irq is the vector number read from ICAUSE reg of on-chip intc | |
43 | */ | |
44 | void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) | |
45 | { | |
46 | struct pt_regs *old_regs = set_irq_regs(regs); | |
47 | ||
48 | irq_enter(); | |
49 | generic_handle_irq(irq); | |
50 | irq_exit(); | |
51 | set_irq_regs(old_regs); | |
52 | } | |
53 | ||
c512c6ba VG |
54 | /* |
55 | * API called for requesting percpu interrupts - called by each CPU | |
56 | * - For boot CPU, actually request the IRQ with genirq core + enables | |
57 | * - For subsequent callers only enable called locally | |
58 | * | |
59 | * Relies on being called by boot cpu first (i.e. request called ahead) of | |
60 | * any enable as expected by genirq. Hence Suitable only for TIMER, IPI | |
61 | * which are guaranteed to be setup on boot core first. | |
62 | * Late probed peripherals such as perf can't use this as there no guarantee | |
63 | * of being called on boot CPU first. | |
64 | */ | |
65 | ||
2b75c0f9 VG |
66 | void arc_request_percpu_irq(int irq, int cpu, |
67 | irqreturn_t (*isr)(int irq, void *dev), | |
68 | const char *irq_nm, | |
69 | void *percpu_dev) | |
70 | { | |
71 | /* Boot cpu calls request, all call enable */ | |
72 | if (!cpu) { | |
73 | int rc; | |
74 | ||
5bf704c2 | 75 | #ifdef CONFIG_ISA_ARCOMPACT |
2b75c0f9 | 76 | /* |
5bf704c2 VG |
77 | * A subsequent request_percpu_irq() fails if percpu_devid is |
78 | * not set. That in turns sets NOAUTOEN, meaning each core needs | |
79 | * to call enable_percpu_irq() | |
80 | * | |
81 | * For ARCv2, this is done in irq map function since we know | |
82 | * which irqs are strictly per cpu | |
2b75c0f9 VG |
83 | */ |
84 | irq_set_percpu_devid(irq); | |
5bf704c2 | 85 | #endif |
2b75c0f9 VG |
86 | |
87 | rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); | |
88 | if (rc) | |
89 | panic("Percpu IRQ request failed for %d\n", irq); | |
90 | } | |
91 | ||
92 | enable_percpu_irq(irq, 0); | |
93 | } |