]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/sh/kernel/irq.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / sh / kernel / irq.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a6a31139 2/*
1da177e4
LT
3 * linux/arch/sh/kernel/irq.c
4 *
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 *
7 *
8 * SuperH version: Copyright (C) 1999 Niibe Yutaka
9 */
bf3a00f8 10#include <linux/irq.h>
1da177e4 11#include <linux/interrupt.h>
a6a31139 12#include <linux/module.h>
bf3a00f8 13#include <linux/kernel_stat.h>
1da177e4 14#include <linux/seq_file.h>
ba93483f 15#include <linux/ftrace.h>
763142d1 16#include <linux/delay.h>
9ab3a15d 17#include <linux/ratelimit.h>
bf3a00f8 18#include <asm/processor.h>
be782df5 19#include <asm/machvec.h>
7c0f6ba6 20#include <linux/uaccess.h>
a6a31139 21#include <asm/thread_info.h>
f15cbe6f 22#include <cpu/mmu_context.h>
1da177e4 23
35f3c518
PM
24atomic_t irq_err_count;
25
1da177e4
LT
26/*
27 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves, it doesn't deserve
29 * a generic callback i think.
30 */
31void ack_bad_irq(unsigned int irq)
32{
baf4326e 33 atomic_inc(&irq_err_count);
1da177e4
LT
34 printk("unexpected IRQ trap at vector %02x\n", irq);
35}
36
37#if defined(CONFIG_PROC_FS)
fa1d43ab 38/*
3d44ae40 39 * /proc/interrupts printing for arch specific interrupts
fa1d43ab 40 */
3d44ae40 41int arch_show_interrupts(struct seq_file *p, int prec)
fa1d43ab 42{
731ba330
PM
43 int j;
44
45 seq_printf(p, "%*s: ", prec, "NMI");
46 for_each_online_cpu(j)
47 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
48 seq_printf(p, " Non-maskable interrupts\n");
49
fa1d43ab 50 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
731ba330 51
fa1d43ab
PM
52 return 0;
53}
1da177e4
LT
54#endif
55
110ed282 56#ifdef CONFIG_IRQSTACKS
a6a31139
PM
57/*
58 * per-CPU IRQ handling contexts (thread information and stack)
59 */
60union irq_ctx {
61 struct thread_info tinfo;
62 u32 stack[THREAD_SIZE/sizeof(u32)];
63};
64
1dc41e58
PM
65static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
66static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
a6a31139 67
dc825b17
PM
68static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
69static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
70
71static inline void handle_one_irq(unsigned int irq)
bf3a00f8 72{
a6a31139 73 union irq_ctx *curctx, *irqctx;
a6a31139 74
a6a31139
PM
75 curctx = (union irq_ctx *)current_thread_info();
76 irqctx = hardirq_ctx[smp_processor_id()];
77
78 /*
79 * this is where we switch to the IRQ stack. However, if we are
80 * already using the IRQ stack (because we interrupted a hardirq
81 * handler) we can't do that and just have to keep using the
82 * current stack (which is the irq stack already after all)
83 */
84 if (curctx != irqctx) {
85 u32 *isp;
86
87 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
88 irqctx->tinfo.task = curctx->tinfo.task;
89 irqctx->tinfo.previous_sp = current_stack_pointer;
90
1dc41e58
PM
91 /*
92 * Copy the softirq bits in preempt_count so that the
93 * softirq checks work in the hardirq context.
94 */
95 irqctx->tinfo.preempt_count =
96 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
97 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
98
a6a31139
PM
99 __asm__ __volatile__ (
100 "mov %0, r4 \n"
1dc41e58 101 "mov r15, r8 \n"
baf4326e 102 "jsr @%1 \n"
08a7e621 103 /* switch to the irq stack */
baf4326e 104 " mov %2, r15 \n"
a6a31139 105 /* restore the stack (ring zero) */
1dc41e58 106 "mov r8, r15 \n"
a6a31139 107 : /* no outputs */
35f3c518 108 : "r" (irq), "r" (generic_handle_irq), "r" (isp)
a6a31139
PM
109 : "memory", "r0", "r1", "r2", "r3", "r4",
110 "r5", "r6", "r7", "r8", "t", "pr"
111 );
112 } else
35f3c518 113 generic_handle_irq(irq);
1da177e4 114}
a6a31139 115
a6a31139
PM
116/*
117 * allocate per-cpu stacks for hardirq and for softirq processing
118 */
119void irq_ctx_init(int cpu)
120{
121 union irq_ctx *irqctx;
122
123 if (hardirq_ctx[cpu])
124 return;
125
126 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
127 irqctx->tinfo.task = NULL;
a6a31139
PM
128 irqctx->tinfo.cpu = cpu;
129 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
130 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
131
132 hardirq_ctx[cpu] = irqctx;
133
134 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
135 irqctx->tinfo.task = NULL;
a6a31139 136 irqctx->tinfo.cpu = cpu;
1dc41e58 137 irqctx->tinfo.preempt_count = 0;
a6a31139
PM
138 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
139
140 softirq_ctx[cpu] = irqctx;
141
142 printk("CPU %u irqstacks, hard=%p soft=%p\n",
143 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
144}
145
146void irq_ctx_exit(int cpu)
147{
148 hardirq_ctx[cpu] = NULL;
149}
150
7d65f4a6 151void do_softirq_own_stack(void)
a6a31139 152{
a6a31139
PM
153 struct thread_info *curctx;
154 union irq_ctx *irqctx;
155 u32 *isp;
156
7d65f4a6
FW
157 curctx = current_thread_info();
158 irqctx = softirq_ctx[smp_processor_id()];
159 irqctx->tinfo.task = curctx->task;
160 irqctx->tinfo.previous_sp = current_stack_pointer;
161
162 /* build the stack frame on the softirq stack */
163 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
164
165 __asm__ __volatile__ (
166 "mov r15, r9 \n"
167 "jsr @%0 \n"
168 /* switch to the softirq stack */
169 " mov %1, r15 \n"
170 /* restore the thread stack */
171 "mov r9, r15 \n"
172 : /* no outputs */
173 : "r" (__do_softirq), "r" (isp)
174 : "memory", "r0", "r1", "r2", "r3", "r4",
175 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
176 );
a6a31139 177}
dc825b17
PM
178#else
179static inline void handle_one_irq(unsigned int irq)
180{
181 generic_handle_irq(irq);
182}
a6a31139 183#endif
ea0f8fea 184
dc825b17
PM
185asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
186{
187 struct pt_regs *old_regs = set_irq_regs(regs);
188
189 irq_enter();
190
191 irq = irq_demux(irq_lookup(irq));
192
193 if (irq != NO_IRQ_IGNORE) {
194 handle_one_irq(irq);
195 irq_finish(irq);
196 }
197
198 irq_exit();
199
200 set_irq_regs(old_regs);
201
202 return IRQ_HANDLED;
203}
204
ea0f8fea
JL
205void __init init_IRQ(void)
206{
90015c89 207 plat_irq_setup();
ea0f8fea
JL
208
209 /* Perform the machine specific initialisation */
210 if (sh_mv.mv_init_irq)
211 sh_mv.mv_init_irq();
212
c1e30ad9
PM
213 intc_finalize();
214
ea0f8fea
JL
215 irq_ctx_init(smp_processor_id());
216}
d8586ba6 217
763142d1 218#ifdef CONFIG_HOTPLUG_CPU
763142d1
PM
219/*
220 * The CPU has been marked offline. Migrate IRQs off this CPU. If
221 * the affinity settings do not allow other CPUs, force them onto any
222 * available CPU.
223 */
224void migrate_irqs(void)
225{
763142d1
PM
226 unsigned int irq, cpu = smp_processor_id();
227
fb41a49d
PM
228 for_each_active_irq(irq) {
229 struct irq_data *data = irq_get_irq_data(irq);
230
cde5c275 231 if (irq_data_get_node(data) == cpu) {
8b8149df
TG
232 struct cpumask *mask = irq_data_get_affinity_mask(data);
233 unsigned int newcpu = cpumask_any_and(mask,
763142d1
PM
234 cpu_online_mask);
235 if (newcpu >= nr_cpu_ids) {
9ab3a15d
PM
236 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
237 irq, cpu);
763142d1 238
8b8149df 239 cpumask_setall(mask);
763142d1 240 }
8b8149df 241 irq_set_affinity(irq, mask);
763142d1
PM
242 }
243 }
244}
245#endif