]>
Commit | Line | Data |
---|---|---|
767a67b0 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/kernel/softirq.c | |
4 | * | |
5 | * Copyright (C) 1992 Linus Torvalds | |
6 | * | |
b10db7f0 | 7 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
1da177e4 LT |
8 | */ |
9 | ||
40322764 JP |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
9984de1a | 12 | #include <linux/export.h> |
1da177e4 LT |
13 | #include <linux/kernel_stat.h> |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/notifier.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/cpu.h> | |
83144186 | 20 | #include <linux/freezer.h> |
1da177e4 LT |
21 | #include <linux/kthread.h> |
22 | #include <linux/rcupdate.h> | |
7e49fcce | 23 | #include <linux/ftrace.h> |
78eef01b | 24 | #include <linux/smp.h> |
3e339b5d | 25 | #include <linux/smpboot.h> |
79bf2bb3 | 26 | #include <linux/tick.h> |
d532676c | 27 | #include <linux/irq.h> |
a0e39ed3 | 28 | |
db1cc7ae TG |
29 | #include <asm/softirq_stack.h> |
30 | ||
a0e39ed3 | 31 | #define CREATE_TRACE_POINTS |
ad8d75ff | 32 | #include <trace/events/irq.h> |
1da177e4 | 33 | |
1da177e4 LT |
34 | /* |
35 | - No shared variables, all the data are CPU local. | |
36 | - If a softirq needs serialization, let it serialize itself | |
37 | by its own spinlocks. | |
38 | - Even if softirq is serialized, only local cpu is marked for | |
39 | execution. Hence, we get something sort of weak cpu binding. | |
40 | Though it is still not clear, will it result in better locality | |
41 | or will not. | |
42 | ||
43 | Examples: | |
44 | - NET RX softirq. It is multithreaded and does not require | |
45 | any global serialization. | |
46 | - NET TX softirq. It kicks software netdevice queues, hence | |
47 | it is logically serialized per device, but this serialization | |
48 | is invisible to common code. | |
49 | - Tasklets: serialized wrt itself. | |
50 | */ | |
51 | ||
52 | #ifndef __ARCH_IRQ_STAT | |
0f6f47ba FW |
53 | DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); |
54 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
1da177e4 LT |
55 | #endif |
56 | ||
978b0116 | 57 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4 | 58 | |
4dd53d89 | 59 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4 | 60 | |
ce85b4f2 | 61 | const char * const softirq_to_name[NR_SOFTIRQS] = { |
f660f606 | 62 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
09223371 | 63 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44 JB |
64 | }; |
65 | ||
1da177e4 LT |
66 | /* |
67 | * we cannot loop indefinitely here to avoid userspace starvation, | |
68 | * but we also don't want to introduce a worst case 1/HZ latency | |
69 | * to the pending events, so lets the scheduler to balance | |
70 | * the softirq load for us. | |
71 | */ | |
676cb02d | 72 | static void wakeup_softirqd(void) |
1da177e4 LT |
73 | { |
74 | /* Interrupts are disabled: no need to stop preemption */ | |
909ea964 | 75 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4 LT |
76 | |
77 | if (tsk && tsk->state != TASK_RUNNING) | |
78 | wake_up_process(tsk); | |
79 | } | |
80 | ||
4cd13c21 ED |
81 | /* |
82 | * If ksoftirqd is scheduled, we do not want to process pending softirqs | |
3c53776e LT |
83 | * right now. Let ksoftirqd handle this at its own rate, to get fairness, |
84 | * unless we're doing some of the synchronous softirqs. | |
4cd13c21 | 85 | */ |
3c53776e LT |
86 | #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) |
87 | static bool ksoftirqd_running(unsigned long pending) | |
4cd13c21 ED |
88 | { |
89 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | |
90 | ||
3c53776e LT |
91 | if (pending & SOFTIRQ_NOW_MASK) |
92 | return false; | |
1342d808 MK |
93 | return tsk && (tsk->state == TASK_RUNNING) && |
94 | !__kthread_should_park(tsk); | |
4cd13c21 ED |
95 | } |
96 | ||
ae9ef589 TG |
97 | #ifdef CONFIG_TRACE_IRQFLAGS |
98 | DEFINE_PER_CPU(int, hardirqs_enabled); | |
99 | DEFINE_PER_CPU(int, hardirq_context); | |
100 | EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); | |
101 | EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); | |
102 | #endif | |
103 | ||
75e1056f VP |
104 | /* |
105 | * preempt_count and SOFTIRQ_OFFSET usage: | |
106 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | |
107 | * softirq processing. | |
108 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | |
109 | * on local_bh_disable or local_bh_enable. | |
110 | * This lets us distinguish between whether we are currently processing | |
111 | * softirq and whether we just have bh disabled. | |
112 | */ | |
113 | ||
ae9ef589 | 114 | #ifdef CONFIG_TRACE_IRQFLAGS |
de30a2b3 | 115 | /* |
ae9ef589 TG |
116 | * This is for softirq.c-internal use, where hardirqs are disabled |
117 | * legitimately: | |
de30a2b3 | 118 | */ |
0bd3a173 | 119 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
de30a2b3 IM |
120 | { |
121 | unsigned long flags; | |
122 | ||
123 | WARN_ON_ONCE(in_irq()); | |
124 | ||
125 | raw_local_irq_save(flags); | |
7e49fcce | 126 | /* |
bdb43806 | 127 | * The preempt tracer hooks into preempt_count_add and will break |
7e49fcce SR |
128 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
129 | * is set and before current->softirq_enabled is cleared. | |
130 | * We must manually increment preempt_count here and manually | |
131 | * call the trace_preempt_off later. | |
132 | */ | |
bdb43806 | 133 | __preempt_count_add(cnt); |
de30a2b3 IM |
134 | /* |
135 | * Were softirqs turned off above: | |
136 | */ | |
9ea4c380 | 137 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
0d38453c | 138 | lockdep_softirqs_off(ip); |
de30a2b3 | 139 | raw_local_irq_restore(flags); |
7e49fcce | 140 | |
0f1ba9a2 HC |
141 | if (preempt_count() == cnt) { |
142 | #ifdef CONFIG_DEBUG_PREEMPT | |
f904f582 | 143 | current->preempt_disable_ip = get_lock_parent_ip(); |
0f1ba9a2 | 144 | #endif |
f904f582 | 145 | trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); |
0f1ba9a2 | 146 | } |
de30a2b3 | 147 | } |
0bd3a173 | 148 | EXPORT_SYMBOL(__local_bh_disable_ip); |
3c829c36 | 149 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
de30a2b3 | 150 | |
75e1056f VP |
151 | static void __local_bh_enable(unsigned int cnt) |
152 | { | |
f71b74bc | 153 | lockdep_assert_irqs_disabled(); |
75e1056f | 154 | |
1a63dcd8 JFG |
155 | if (preempt_count() == cnt) |
156 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); | |
157 | ||
9ea4c380 | 158 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
0d38453c | 159 | lockdep_softirqs_on(_RET_IP_); |
1a63dcd8 JFG |
160 | |
161 | __preempt_count_sub(cnt); | |
75e1056f VP |
162 | } |
163 | ||
de30a2b3 | 164 | /* |
c3442697 | 165 | * Special-case - softirqs can safely be enabled by __do_softirq(), |
de30a2b3 IM |
166 | * without processing still-pending softirqs: |
167 | */ | |
168 | void _local_bh_enable(void) | |
169 | { | |
5d60d3e7 | 170 | WARN_ON_ONCE(in_irq()); |
75e1056f | 171 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b3 | 172 | } |
de30a2b3 IM |
173 | EXPORT_SYMBOL(_local_bh_enable); |
174 | ||
0bd3a173 | 175 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
de30a2b3 | 176 | { |
f71b74bc FW |
177 | WARN_ON_ONCE(in_irq()); |
178 | lockdep_assert_irqs_enabled(); | |
3c829c36 | 179 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 180 | local_irq_disable(); |
3c829c36 | 181 | #endif |
de30a2b3 IM |
182 | /* |
183 | * Are softirqs going to be turned on now: | |
184 | */ | |
75e1056f | 185 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0d38453c | 186 | lockdep_softirqs_on(ip); |
de30a2b3 IM |
187 | /* |
188 | * Keep preemption disabled until we are done with | |
189 | * softirq processing: | |
ce85b4f2 | 190 | */ |
91ea62d5 | 191 | __preempt_count_sub(cnt - 1); |
de30a2b3 | 192 | |
0bed698a FW |
193 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
194 | /* | |
195 | * Run softirq if any pending. And do it in its own stack | |
196 | * as we may be calling this deep in a task call stack already. | |
197 | */ | |
de30a2b3 | 198 | do_softirq(); |
0bed698a | 199 | } |
de30a2b3 | 200 | |
bdb43806 | 201 | preempt_count_dec(); |
3c829c36 | 202 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 203 | local_irq_enable(); |
3c829c36 | 204 | #endif |
de30a2b3 IM |
205 | preempt_check_resched(); |
206 | } | |
0bd3a173 | 207 | EXPORT_SYMBOL(__local_bh_enable_ip); |
de30a2b3 | 208 | |
ae9ef589 TG |
209 | static inline void invoke_softirq(void) |
210 | { | |
211 | if (ksoftirqd_running(local_softirq_pending())) | |
212 | return; | |
213 | ||
214 | if (!force_irqthreads) { | |
215 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | |
216 | /* | |
217 | * We can safely execute softirq on the current stack if | |
218 | * it is the irq stack, because it should be near empty | |
219 | * at this stage. | |
220 | */ | |
221 | __do_softirq(); | |
222 | #else | |
223 | /* | |
224 | * Otherwise, irq_exit() is called on the task stack that can | |
225 | * be potentially deep already. So call softirq in its own stack | |
226 | * to prevent from any overrun. | |
227 | */ | |
228 | do_softirq_own_stack(); | |
229 | #endif | |
230 | } else { | |
231 | wakeup_softirqd(); | |
232 | } | |
233 | } | |
234 | ||
235 | asmlinkage __visible void do_softirq(void) | |
236 | { | |
237 | __u32 pending; | |
238 | unsigned long flags; | |
239 | ||
240 | if (in_interrupt()) | |
241 | return; | |
242 | ||
243 | local_irq_save(flags); | |
244 | ||
245 | pending = local_softirq_pending(); | |
246 | ||
247 | if (pending && !ksoftirqd_running(pending)) | |
248 | do_softirq_own_stack(); | |
249 | ||
250 | local_irq_restore(flags); | |
251 | } | |
252 | ||
1da177e4 | 253 | /* |
34376a50 BG |
254 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
255 | * but break the loop if need_resched() is set or after 2 ms. | |
256 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in | |
257 | * certain cases, such as stop_machine(), jiffies may cease to | |
258 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as | |
259 | * well to make sure we eventually return from this method. | |
1da177e4 | 260 | * |
c10d7367 | 261 | * These limits have been established via experimentation. |
1da177e4 LT |
262 | * The two things to balance is latency against fairness - |
263 | * we want to handle softirqs as soon as possible, but they | |
264 | * should not be able to lock up the box. | |
265 | */ | |
c10d7367 | 266 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50 | 267 | #define MAX_SOFTIRQ_RESTART 10 |
1da177e4 | 268 | |
f1a83e65 PZ |
269 | #ifdef CONFIG_TRACE_IRQFLAGS |
270 | /* | |
f1a83e65 PZ |
271 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need |
272 | * to keep the lockdep irq context tracking as tight as possible in order to | |
273 | * not miss-qualify lock contexts and miss possible deadlocks. | |
274 | */ | |
f1a83e65 | 275 | |
5c4853b6 | 276 | static inline bool lockdep_softirq_start(void) |
f1a83e65 | 277 | { |
5c4853b6 | 278 | bool in_hardirq = false; |
f1a83e65 | 279 | |
f9ad4a5f | 280 | if (lockdep_hardirq_context()) { |
5c4853b6 | 281 | in_hardirq = true; |
2502ec37 | 282 | lockdep_hardirq_exit(); |
5c4853b6 FW |
283 | } |
284 | ||
f1a83e65 | 285 | lockdep_softirq_enter(); |
5c4853b6 FW |
286 | |
287 | return in_hardirq; | |
f1a83e65 PZ |
288 | } |
289 | ||
5c4853b6 | 290 | static inline void lockdep_softirq_end(bool in_hardirq) |
f1a83e65 PZ |
291 | { |
292 | lockdep_softirq_exit(); | |
5c4853b6 FW |
293 | |
294 | if (in_hardirq) | |
2502ec37 | 295 | lockdep_hardirq_enter(); |
f1a83e65 | 296 | } |
f1a83e65 | 297 | #else |
5c4853b6 FW |
298 | static inline bool lockdep_softirq_start(void) { return false; } |
299 | static inline void lockdep_softirq_end(bool in_hardirq) { } | |
f1a83e65 PZ |
300 | #endif |
301 | ||
be7635e7 | 302 | asmlinkage __visible void __softirq_entry __do_softirq(void) |
1da177e4 | 303 | { |
c10d7367 | 304 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
907aed48 | 305 | unsigned long old_flags = current->flags; |
34376a50 | 306 | int max_restart = MAX_SOFTIRQ_RESTART; |
f1a83e65 | 307 | struct softirq_action *h; |
5c4853b6 | 308 | bool in_hardirq; |
f1a83e65 | 309 | __u32 pending; |
2e702b9f | 310 | int softirq_bit; |
907aed48 MG |
311 | |
312 | /* | |
e45506ac YL |
313 | * Mask out PF_MEMALLOC as the current task context is borrowed for the |
314 | * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC | |
315 | * again if the socket is related to swapping. | |
907aed48 MG |
316 | */ |
317 | current->flags &= ~PF_MEMALLOC; | |
1da177e4 LT |
318 | |
319 | pending = local_softirq_pending(); | |
829035fd | 320 | |
0bd3a173 | 321 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
5c4853b6 | 322 | in_hardirq = lockdep_softirq_start(); |
d3759e71 | 323 | account_softirq_enter(current); |
1da177e4 | 324 | |
1da177e4 LT |
325 | restart: |
326 | /* Reset the pending bitmask before enabling irqs */ | |
3f74478b | 327 | set_softirq_pending(0); |
1da177e4 | 328 | |
c70f5d66 | 329 | local_irq_enable(); |
1da177e4 LT |
330 | |
331 | h = softirq_vec; | |
332 | ||
2e702b9f JP |
333 | while ((softirq_bit = ffs(pending))) { |
334 | unsigned int vec_nr; | |
335 | int prev_count; | |
336 | ||
337 | h += softirq_bit - 1; | |
338 | ||
339 | vec_nr = h - softirq_vec; | |
340 | prev_count = preempt_count(); | |
341 | ||
342 | kstat_incr_softirqs_this_cpu(vec_nr); | |
343 | ||
344 | trace_softirq_entry(vec_nr); | |
345 | h->action(h); | |
346 | trace_softirq_exit(vec_nr); | |
347 | if (unlikely(prev_count != preempt_count())) { | |
40322764 | 348 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", |
2e702b9f JP |
349 | vec_nr, softirq_to_name[vec_nr], h->action, |
350 | prev_count, preempt_count()); | |
351 | preempt_count_set(prev_count); | |
1da177e4 LT |
352 | } |
353 | h++; | |
2e702b9f JP |
354 | pending >>= softirq_bit; |
355 | } | |
1da177e4 | 356 | |
d28139c4 PM |
357 | if (__this_cpu_read(ksoftirqd) == current) |
358 | rcu_softirq_qs(); | |
c70f5d66 | 359 | local_irq_disable(); |
1da177e4 LT |
360 | |
361 | pending = local_softirq_pending(); | |
c10d7367 | 362 | if (pending) { |
34376a50 BG |
363 | if (time_before(jiffies, end) && !need_resched() && |
364 | --max_restart) | |
c10d7367 | 365 | goto restart; |
1da177e4 | 366 | |
1da177e4 | 367 | wakeup_softirqd(); |
c10d7367 | 368 | } |
1da177e4 | 369 | |
d3759e71 | 370 | account_softirq_exit(current); |
5c4853b6 | 371 | lockdep_softirq_end(in_hardirq); |
75e1056f | 372 | __local_bh_enable(SOFTIRQ_OFFSET); |
5d60d3e7 | 373 | WARN_ON_ONCE(in_interrupt()); |
717a94b5 | 374 | current_restore_flags(old_flags, PF_MEMALLOC); |
1da177e4 LT |
375 | } |
376 | ||
8a6bc478 TG |
377 | /** |
378 | * irq_enter_rcu - Enter an interrupt context with RCU watching | |
dde4b2b5 | 379 | */ |
8a6bc478 | 380 | void irq_enter_rcu(void) |
dde4b2b5 | 381 | { |
d14ce74f FW |
382 | __irq_enter_raw(); |
383 | ||
384 | if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) | |
5acac1be | 385 | tick_irq_enter(); |
d14ce74f FW |
386 | |
387 | account_hardirq_enter(current); | |
dde4b2b5 IM |
388 | } |
389 | ||
8a6bc478 TG |
390 | /** |
391 | * irq_enter - Enter an interrupt context including RCU update | |
392 | */ | |
393 | void irq_enter(void) | |
394 | { | |
395 | rcu_irq_enter(); | |
396 | irq_enter_rcu(); | |
397 | } | |
398 | ||
67826eae FW |
399 | static inline void tick_irq_exit(void) |
400 | { | |
401 | #ifdef CONFIG_NO_HZ_COMMON | |
402 | int cpu = smp_processor_id(); | |
403 | ||
404 | /* Make sure that timer wheel updates are propagated */ | |
405 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { | |
0a0e0829 | 406 | if (!in_irq()) |
67826eae FW |
407 | tick_nohz_irq_exit(); |
408 | } | |
409 | #endif | |
410 | } | |
411 | ||
59bc300b | 412 | static inline void __irq_exit_rcu(void) |
1da177e4 | 413 | { |
74eed016 | 414 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d111 | 415 | local_irq_disable(); |
74eed016 | 416 | #else |
f71b74bc | 417 | lockdep_assert_irqs_disabled(); |
74eed016 | 418 | #endif |
d3759e71 | 419 | account_hardirq_exit(current); |
bdb43806 | 420 | preempt_count_sub(HARDIRQ_OFFSET); |
1da177e4 LT |
421 | if (!in_interrupt() && local_softirq_pending()) |
422 | invoke_softirq(); | |
79bf2bb3 | 423 | |
67826eae | 424 | tick_irq_exit(); |
8a6bc478 TG |
425 | } |
426 | ||
59bc300b PZ |
427 | /** |
428 | * irq_exit_rcu() - Exit an interrupt context without updating RCU | |
429 | * | |
430 | * Also processes softirqs if needed and possible. | |
431 | */ | |
432 | void irq_exit_rcu(void) | |
433 | { | |
434 | __irq_exit_rcu(); | |
435 | /* must be last! */ | |
436 | lockdep_hardirq_exit(); | |
437 | } | |
438 | ||
8a6bc478 TG |
439 | /** |
440 | * irq_exit - Exit an interrupt context, update RCU and lockdep | |
441 | * | |
442 | * Also processes softirqs if needed and possible. | |
443 | */ | |
444 | void irq_exit(void) | |
445 | { | |
59bc300b | 446 | __irq_exit_rcu(); |
416eb33c | 447 | rcu_irq_exit(); |
2502ec37 TG |
448 | /* must be last! */ |
449 | lockdep_hardirq_exit(); | |
1da177e4 LT |
450 | } |
451 | ||
452 | /* | |
453 | * This function must run with irqs disabled! | |
454 | */ | |
7ad5b3a5 | 455 | inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4 LT |
456 | { |
457 | __raise_softirq_irqoff(nr); | |
458 | ||
459 | /* | |
460 | * If we're in an interrupt or softirq, we're done | |
461 | * (this also catches softirq-disabled code). We will | |
462 | * actually run the softirq once we return from | |
463 | * the irq or softirq. | |
464 | * | |
465 | * Otherwise we wake up ksoftirqd to make sure we | |
466 | * schedule the softirq soon. | |
467 | */ | |
468 | if (!in_interrupt()) | |
469 | wakeup_softirqd(); | |
470 | } | |
471 | ||
7ad5b3a5 | 472 | void raise_softirq(unsigned int nr) |
1da177e4 LT |
473 | { |
474 | unsigned long flags; | |
475 | ||
476 | local_irq_save(flags); | |
477 | raise_softirq_irqoff(nr); | |
478 | local_irq_restore(flags); | |
479 | } | |
480 | ||
f069686e SR |
481 | void __raise_softirq_irqoff(unsigned int nr) |
482 | { | |
cdabce2e | 483 | lockdep_assert_irqs_disabled(); |
f069686e SR |
484 | trace_softirq_raise(nr); |
485 | or_softirq_pending(1UL << nr); | |
486 | } | |
487 | ||
962cf36c | 488 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4 | 489 | { |
1da177e4 LT |
490 | softirq_vec[nr].action = action; |
491 | } | |
492 | ||
9ba5f005 PZ |
493 | /* |
494 | * Tasklets | |
495 | */ | |
ce85b4f2 | 496 | struct tasklet_head { |
48f20a9a OJ |
497 | struct tasklet_struct *head; |
498 | struct tasklet_struct **tail; | |
1da177e4 LT |
499 | }; |
500 | ||
4620b49f VN |
501 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
502 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | |
1da177e4 | 503 | |
6498ddad IM |
504 | static void __tasklet_schedule_common(struct tasklet_struct *t, |
505 | struct tasklet_head __percpu *headp, | |
506 | unsigned int softirq_nr) | |
1da177e4 | 507 | { |
6498ddad | 508 | struct tasklet_head *head; |
1da177e4 LT |
509 | unsigned long flags; |
510 | ||
511 | local_irq_save(flags); | |
6498ddad | 512 | head = this_cpu_ptr(headp); |
48f20a9a | 513 | t->next = NULL; |
6498ddad IM |
514 | *head->tail = t; |
515 | head->tail = &(t->next); | |
516 | raise_softirq_irqoff(softirq_nr); | |
1da177e4 LT |
517 | local_irq_restore(flags); |
518 | } | |
6498ddad IM |
519 | |
520 | void __tasklet_schedule(struct tasklet_struct *t) | |
521 | { | |
522 | __tasklet_schedule_common(t, &tasklet_vec, | |
523 | TASKLET_SOFTIRQ); | |
524 | } | |
1da177e4 LT |
525 | EXPORT_SYMBOL(__tasklet_schedule); |
526 | ||
7ad5b3a5 | 527 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4 | 528 | { |
6498ddad IM |
529 | __tasklet_schedule_common(t, &tasklet_hi_vec, |
530 | HI_SOFTIRQ); | |
1da177e4 | 531 | } |
1da177e4 LT |
532 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
533 | ||
82b691be IM |
534 | static void tasklet_action_common(struct softirq_action *a, |
535 | struct tasklet_head *tl_head, | |
536 | unsigned int softirq_nr) | |
1da177e4 LT |
537 | { |
538 | struct tasklet_struct *list; | |
539 | ||
540 | local_irq_disable(); | |
82b691be IM |
541 | list = tl_head->head; |
542 | tl_head->head = NULL; | |
543 | tl_head->tail = &tl_head->head; | |
1da177e4 LT |
544 | local_irq_enable(); |
545 | ||
546 | while (list) { | |
547 | struct tasklet_struct *t = list; | |
548 | ||
549 | list = list->next; | |
550 | ||
551 | if (tasklet_trylock(t)) { | |
552 | if (!atomic_read(&t->count)) { | |
ce85b4f2 JP |
553 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, |
554 | &t->state)) | |
1da177e4 | 555 | BUG(); |
12cc923f RP |
556 | if (t->use_callback) |
557 | t->callback(t); | |
558 | else | |
559 | t->func(t->data); | |
1da177e4 LT |
560 | tasklet_unlock(t); |
561 | continue; | |
562 | } | |
563 | tasklet_unlock(t); | |
564 | } | |
565 | ||
566 | local_irq_disable(); | |
48f20a9a | 567 | t->next = NULL; |
82b691be IM |
568 | *tl_head->tail = t; |
569 | tl_head->tail = &t->next; | |
570 | __raise_softirq_irqoff(softirq_nr); | |
1da177e4 LT |
571 | local_irq_enable(); |
572 | } | |
573 | } | |
574 | ||
82b691be | 575 | static __latent_entropy void tasklet_action(struct softirq_action *a) |
1da177e4 | 576 | { |
82b691be IM |
577 | tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); |
578 | } | |
1da177e4 | 579 | |
82b691be IM |
580 | static __latent_entropy void tasklet_hi_action(struct softirq_action *a) |
581 | { | |
582 | tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); | |
1da177e4 LT |
583 | } |
584 | ||
12cc923f RP |
585 | void tasklet_setup(struct tasklet_struct *t, |
586 | void (*callback)(struct tasklet_struct *)) | |
587 | { | |
588 | t->next = NULL; | |
589 | t->state = 0; | |
590 | atomic_set(&t->count, 0); | |
591 | t->callback = callback; | |
592 | t->use_callback = true; | |
593 | t->data = 0; | |
594 | } | |
595 | EXPORT_SYMBOL(tasklet_setup); | |
596 | ||
1da177e4 LT |
597 | void tasklet_init(struct tasklet_struct *t, |
598 | void (*func)(unsigned long), unsigned long data) | |
599 | { | |
600 | t->next = NULL; | |
601 | t->state = 0; | |
602 | atomic_set(&t->count, 0); | |
603 | t->func = func; | |
12cc923f | 604 | t->use_callback = false; |
1da177e4 LT |
605 | t->data = data; |
606 | } | |
1da177e4 LT |
607 | EXPORT_SYMBOL(tasklet_init); |
608 | ||
609 | void tasklet_kill(struct tasklet_struct *t) | |
610 | { | |
611 | if (in_interrupt()) | |
40322764 | 612 | pr_notice("Attempt to kill tasklet from interrupt\n"); |
1da177e4 LT |
613 | |
614 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
79d381c9 | 615 | do { |
1da177e4 | 616 | yield(); |
79d381c9 | 617 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4 LT |
618 | } |
619 | tasklet_unlock_wait(t); | |
620 | clear_bit(TASKLET_STATE_SCHED, &t->state); | |
621 | } | |
1da177e4 LT |
622 | EXPORT_SYMBOL(tasklet_kill); |
623 | ||
624 | void __init softirq_init(void) | |
625 | { | |
48f20a9a OJ |
626 | int cpu; |
627 | ||
628 | for_each_possible_cpu(cpu) { | |
629 | per_cpu(tasklet_vec, cpu).tail = | |
630 | &per_cpu(tasklet_vec, cpu).head; | |
631 | per_cpu(tasklet_hi_vec, cpu).tail = | |
632 | &per_cpu(tasklet_hi_vec, cpu).head; | |
633 | } | |
634 | ||
962cf36c CM |
635 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
636 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | |
1da177e4 LT |
637 | } |
638 | ||
3e339b5d | 639 | static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4 | 640 | { |
3e339b5d TG |
641 | return local_softirq_pending(); |
642 | } | |
1da177e4 | 643 | |
3e339b5d TG |
644 | static void run_ksoftirqd(unsigned int cpu) |
645 | { | |
646 | local_irq_disable(); | |
647 | if (local_softirq_pending()) { | |
0bed698a FW |
648 | /* |
649 | * We can safely run softirq on inline stack, as we are not deep | |
650 | * in the task stack here. | |
651 | */ | |
3e339b5d | 652 | __do_softirq(); |
3e339b5d | 653 | local_irq_enable(); |
edf22f4c | 654 | cond_resched(); |
3e339b5d | 655 | return; |
1da177e4 | 656 | } |
3e339b5d | 657 | local_irq_enable(); |
1da177e4 LT |
658 | } |
659 | ||
660 | #ifdef CONFIG_HOTPLUG_CPU | |
661 | /* | |
662 | * tasklet_kill_immediate is called to remove a tasklet which can already be | |
663 | * scheduled for execution on @cpu. | |
664 | * | |
665 | * Unlike tasklet_kill, this function removes the tasklet | |
666 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | |
667 | * | |
668 | * When this function is called, @cpu must be in the CPU_DEAD state. | |
669 | */ | |
670 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |
671 | { | |
672 | struct tasklet_struct **i; | |
673 | ||
674 | BUG_ON(cpu_online(cpu)); | |
675 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | |
676 | ||
677 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | |
678 | return; | |
679 | ||
680 | /* CPU is dead, so no lock needed. */ | |
48f20a9a | 681 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4 LT |
682 | if (*i == t) { |
683 | *i = t->next; | |
48f20a9a OJ |
684 | /* If this was the tail element, move the tail ptr */ |
685 | if (*i == NULL) | |
686 | per_cpu(tasklet_vec, cpu).tail = i; | |
1da177e4 LT |
687 | return; |
688 | } | |
689 | } | |
690 | BUG(); | |
691 | } | |
692 | ||
c4544dbc | 693 | static int takeover_tasklets(unsigned int cpu) |
1da177e4 | 694 | { |
1da177e4 LT |
695 | /* CPU is dead, so no lock needed. */ |
696 | local_irq_disable(); | |
697 | ||
698 | /* Find end, append list for that CPU. */ | |
e5e41723 | 699 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea964 | 700 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
8afecaa6 | 701 | __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
e5e41723 CB |
702 | per_cpu(tasklet_vec, cpu).head = NULL; |
703 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | |
704 | } | |
1da177e4 LT |
705 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
706 | ||
e5e41723 | 707 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea964 CL |
708 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
709 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); | |
e5e41723 CB |
710 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
711 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | |
712 | } | |
1da177e4 LT |
713 | raise_softirq_irqoff(HI_SOFTIRQ); |
714 | ||
715 | local_irq_enable(); | |
c4544dbc | 716 | return 0; |
1da177e4 | 717 | } |
c4544dbc SAS |
718 | #else |
719 | #define takeover_tasklets NULL | |
1da177e4 LT |
720 | #endif /* CONFIG_HOTPLUG_CPU */ |
721 | ||
3e339b5d TG |
722 | static struct smp_hotplug_thread softirq_threads = { |
723 | .store = &ksoftirqd, | |
724 | .thread_should_run = ksoftirqd_should_run, | |
725 | .thread_fn = run_ksoftirqd, | |
726 | .thread_comm = "ksoftirqd/%u", | |
727 | }; | |
728 | ||
7babe8db | 729 | static __init int spawn_ksoftirqd(void) |
1da177e4 | 730 | { |
c4544dbc SAS |
731 | cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, |
732 | takeover_tasklets); | |
3e339b5d TG |
733 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
734 | ||
1da177e4 LT |
735 | return 0; |
736 | } | |
7babe8db | 737 | early_initcall(spawn_ksoftirqd); |
78eef01b | 738 | |
43a25632 YL |
739 | /* |
740 | * [ These __weak aliases are kept in a separate compilation unit, so that | |
741 | * GCC does not inline them incorrectly. ] | |
742 | */ | |
743 | ||
744 | int __init __weak early_irq_init(void) | |
745 | { | |
746 | return 0; | |
747 | } | |
748 | ||
4a046d17 YL |
749 | int __init __weak arch_probe_nr_irqs(void) |
750 | { | |
b683de2b | 751 | return NR_IRQS_LEGACY; |
4a046d17 YL |
752 | } |
753 | ||
43a25632 YL |
754 | int __init __weak arch_early_irq_init(void) |
755 | { | |
756 | return 0; | |
757 | } | |
62a08ae2 TG |
758 | |
759 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) | |
760 | { | |
761 | return from; | |
762 | } |