]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/softirq.c
Merge tag 'tag-chrome-platform-for-v5.11' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-jammy-kernel.git] / kernel / softirq.c
CommitLineData
767a67b0 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
b10db7f0 7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
8 */
9
40322764
JP
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
9984de1a 12#include <linux/export.h>
1da177e4
LT
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
83144186 20#include <linux/freezer.h>
1da177e4
LT
21#include <linux/kthread.h>
22#include <linux/rcupdate.h>
7e49fcce 23#include <linux/ftrace.h>
78eef01b 24#include <linux/smp.h>
3e339b5d 25#include <linux/smpboot.h>
79bf2bb3 26#include <linux/tick.h>
d532676c 27#include <linux/irq.h>
a0e39ed3
HC
28
29#define CREATE_TRACE_POINTS
ad8d75ff 30#include <trace/events/irq.h>
1da177e4 31
1da177e4
LT
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
0f6f47ba
FW
51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52EXPORT_PER_CPU_SYMBOL(irq_stat);
1da177e4
LT
53#endif
54
978b0116 55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 56
4dd53d89 57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 58
ce85b4f2 59const char * const softirq_to_name[NR_SOFTIRQS] = {
f660f606 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
09223371 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
62};
63
1da177e4
LT
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
676cb02d 70static void wakeup_softirqd(void)
1da177e4
LT
71{
72 /* Interrupts are disabled: no need to stop preemption */
909ea964 73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
4cd13c21
ED
79/*
80 * If ksoftirqd is scheduled, we do not want to process pending softirqs
3c53776e
LT
81 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82 * unless we're doing some of the synchronous softirqs.
4cd13c21 83 */
3c53776e
LT
84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85static bool ksoftirqd_running(unsigned long pending)
4cd13c21
ED
86{
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88
3c53776e
LT
89 if (pending & SOFTIRQ_NOW_MASK)
90 return false;
1342d808
MK
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
4cd13c21
ED
93}
94
ae9ef589
TG
95#ifdef CONFIG_TRACE_IRQFLAGS
96DEFINE_PER_CPU(int, hardirqs_enabled);
97DEFINE_PER_CPU(int, hardirq_context);
98EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
99EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
100#endif
101
75e1056f
VP
102/*
103 * preempt_count and SOFTIRQ_OFFSET usage:
104 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
105 * softirq processing.
106 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
107 * on local_bh_disable or local_bh_enable.
108 * This lets us distinguish between whether we are currently processing
109 * softirq and whether we just have bh disabled.
110 */
111
ae9ef589 112#ifdef CONFIG_TRACE_IRQFLAGS
de30a2b3 113/*
ae9ef589
TG
114 * This is for softirq.c-internal use, where hardirqs are disabled
115 * legitimately:
de30a2b3 116 */
0bd3a173 117void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
118{
119 unsigned long flags;
120
121 WARN_ON_ONCE(in_irq());
122
123 raw_local_irq_save(flags);
7e49fcce 124 /*
bdb43806 125 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
126 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
127 * is set and before current->softirq_enabled is cleared.
128 * We must manually increment preempt_count here and manually
129 * call the trace_preempt_off later.
130 */
bdb43806 131 __preempt_count_add(cnt);
de30a2b3
IM
132 /*
133 * Were softirqs turned off above:
134 */
9ea4c380 135 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0d38453c 136 lockdep_softirqs_off(ip);
de30a2b3 137 raw_local_irq_restore(flags);
7e49fcce 138
0f1ba9a2
HC
139 if (preempt_count() == cnt) {
140#ifdef CONFIG_DEBUG_PREEMPT
f904f582 141 current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2 142#endif
f904f582 143 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2 144 }
de30a2b3 145}
0bd3a173 146EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 147#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 148
75e1056f
VP
149static void __local_bh_enable(unsigned int cnt)
150{
f71b74bc 151 lockdep_assert_irqs_disabled();
75e1056f 152
1a63dcd8
JFG
153 if (preempt_count() == cnt)
154 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
155
9ea4c380 156 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0d38453c 157 lockdep_softirqs_on(_RET_IP_);
1a63dcd8
JFG
158
159 __preempt_count_sub(cnt);
75e1056f
VP
160}
161
de30a2b3 162/*
c3442697 163 * Special-case - softirqs can safely be enabled by __do_softirq(),
de30a2b3
IM
164 * without processing still-pending softirqs:
165 */
166void _local_bh_enable(void)
167{
5d60d3e7 168 WARN_ON_ONCE(in_irq());
75e1056f 169 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3 170}
de30a2b3
IM
171EXPORT_SYMBOL(_local_bh_enable);
172
0bd3a173 173void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 174{
f71b74bc
FW
175 WARN_ON_ONCE(in_irq());
176 lockdep_assert_irqs_enabled();
3c829c36 177#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 178 local_irq_disable();
3c829c36 179#endif
de30a2b3
IM
180 /*
181 * Are softirqs going to be turned on now:
182 */
75e1056f 183 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0d38453c 184 lockdep_softirqs_on(ip);
de30a2b3
IM
185 /*
186 * Keep preemption disabled until we are done with
187 * softirq processing:
ce85b4f2 188 */
0bd3a173 189 preempt_count_sub(cnt - 1);
de30a2b3 190
0bed698a
FW
191 if (unlikely(!in_interrupt() && local_softirq_pending())) {
192 /*
193 * Run softirq if any pending. And do it in its own stack
194 * as we may be calling this deep in a task call stack already.
195 */
de30a2b3 196 do_softirq();
0bed698a 197 }
de30a2b3 198
bdb43806 199 preempt_count_dec();
3c829c36 200#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 201 local_irq_enable();
3c829c36 202#endif
de30a2b3
IM
203 preempt_check_resched();
204}
0bd3a173 205EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 206
ae9ef589
TG
207static inline void invoke_softirq(void)
208{
209 if (ksoftirqd_running(local_softirq_pending()))
210 return;
211
212 if (!force_irqthreads) {
213#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
214 /*
215 * We can safely execute softirq on the current stack if
216 * it is the irq stack, because it should be near empty
217 * at this stage.
218 */
219 __do_softirq();
220#else
221 /*
222 * Otherwise, irq_exit() is called on the task stack that can
223 * be potentially deep already. So call softirq in its own stack
224 * to prevent from any overrun.
225 */
226 do_softirq_own_stack();
227#endif
228 } else {
229 wakeup_softirqd();
230 }
231}
232
233asmlinkage __visible void do_softirq(void)
234{
235 __u32 pending;
236 unsigned long flags;
237
238 if (in_interrupt())
239 return;
240
241 local_irq_save(flags);
242
243 pending = local_softirq_pending();
244
245 if (pending && !ksoftirqd_running(pending))
246 do_softirq_own_stack();
247
248 local_irq_restore(flags);
249}
250
1da177e4 251/*
34376a50
BG
252 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
253 * but break the loop if need_resched() is set or after 2 ms.
254 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
255 * certain cases, such as stop_machine(), jiffies may cease to
256 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
257 * well to make sure we eventually return from this method.
1da177e4 258 *
c10d7367 259 * These limits have been established via experimentation.
1da177e4
LT
260 * The two things to balance is latency against fairness -
261 * we want to handle softirqs as soon as possible, but they
262 * should not be able to lock up the box.
263 */
c10d7367 264#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 265#define MAX_SOFTIRQ_RESTART 10
1da177e4 266
f1a83e65
PZ
267#ifdef CONFIG_TRACE_IRQFLAGS
268/*
f1a83e65
PZ
269 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
270 * to keep the lockdep irq context tracking as tight as possible in order to
271 * not miss-qualify lock contexts and miss possible deadlocks.
272 */
f1a83e65 273
5c4853b6 274static inline bool lockdep_softirq_start(void)
f1a83e65 275{
5c4853b6 276 bool in_hardirq = false;
f1a83e65 277
f9ad4a5f 278 if (lockdep_hardirq_context()) {
5c4853b6 279 in_hardirq = true;
2502ec37 280 lockdep_hardirq_exit();
5c4853b6
FW
281 }
282
f1a83e65 283 lockdep_softirq_enter();
5c4853b6
FW
284
285 return in_hardirq;
f1a83e65
PZ
286}
287
5c4853b6 288static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
289{
290 lockdep_softirq_exit();
5c4853b6
FW
291
292 if (in_hardirq)
2502ec37 293 lockdep_hardirq_enter();
f1a83e65 294}
f1a83e65 295#else
5c4853b6
FW
296static inline bool lockdep_softirq_start(void) { return false; }
297static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
298#endif
299
be7635e7 300asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4 301{
c10d7367 302 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 303 unsigned long old_flags = current->flags;
34376a50 304 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 305 struct softirq_action *h;
5c4853b6 306 bool in_hardirq;
f1a83e65 307 __u32 pending;
2e702b9f 308 int softirq_bit;
907aed48
MG
309
310 /*
e45506ac
YL
311 * Mask out PF_MEMALLOC as the current task context is borrowed for the
312 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
313 * again if the socket is related to swapping.
907aed48
MG
314 */
315 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
316
317 pending = local_softirq_pending();
829035fd 318
0bd3a173 319 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b6 320 in_hardirq = lockdep_softirq_start();
d3759e71 321 account_softirq_enter(current);
1da177e4 322
1da177e4
LT
323restart:
324 /* Reset the pending bitmask before enabling irqs */
3f74478b 325 set_softirq_pending(0);
1da177e4 326
c70f5d66 327 local_irq_enable();
1da177e4
LT
328
329 h = softirq_vec;
330
2e702b9f
JP
331 while ((softirq_bit = ffs(pending))) {
332 unsigned int vec_nr;
333 int prev_count;
334
335 h += softirq_bit - 1;
336
337 vec_nr = h - softirq_vec;
338 prev_count = preempt_count();
339
340 kstat_incr_softirqs_this_cpu(vec_nr);
341
342 trace_softirq_entry(vec_nr);
343 h->action(h);
344 trace_softirq_exit(vec_nr);
345 if (unlikely(prev_count != preempt_count())) {
40322764 346 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
2e702b9f
JP
347 vec_nr, softirq_to_name[vec_nr], h->action,
348 prev_count, preempt_count());
349 preempt_count_set(prev_count);
1da177e4
LT
350 }
351 h++;
2e702b9f
JP
352 pending >>= softirq_bit;
353 }
1da177e4 354
d28139c4
PM
355 if (__this_cpu_read(ksoftirqd) == current)
356 rcu_softirq_qs();
c70f5d66 357 local_irq_disable();
1da177e4
LT
358
359 pending = local_softirq_pending();
c10d7367 360 if (pending) {
34376a50
BG
361 if (time_before(jiffies, end) && !need_resched() &&
362 --max_restart)
c10d7367 363 goto restart;
1da177e4 364
1da177e4 365 wakeup_softirqd();
c10d7367 366 }
1da177e4 367
d3759e71 368 account_softirq_exit(current);
5c4853b6 369 lockdep_softirq_end(in_hardirq);
75e1056f 370 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 371 WARN_ON_ONCE(in_interrupt());
717a94b5 372 current_restore_flags(old_flags, PF_MEMALLOC);
1da177e4
LT
373}
374
8a6bc478
TG
375/**
376 * irq_enter_rcu - Enter an interrupt context with RCU watching
dde4b2b5 377 */
8a6bc478 378void irq_enter_rcu(void)
dde4b2b5 379{
d14ce74f
FW
380 __irq_enter_raw();
381
382 if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
5acac1be 383 tick_irq_enter();
d14ce74f
FW
384
385 account_hardirq_enter(current);
dde4b2b5
IM
386}
387
8a6bc478
TG
388/**
389 * irq_enter - Enter an interrupt context including RCU update
390 */
391void irq_enter(void)
392{
393 rcu_irq_enter();
394 irq_enter_rcu();
395}
396
67826eae
FW
397static inline void tick_irq_exit(void)
398{
399#ifdef CONFIG_NO_HZ_COMMON
400 int cpu = smp_processor_id();
401
402 /* Make sure that timer wheel updates are propagated */
403 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
0a0e0829 404 if (!in_irq())
67826eae
FW
405 tick_nohz_irq_exit();
406 }
407#endif
408}
409
59bc300b 410static inline void __irq_exit_rcu(void)
1da177e4 411{
74eed016 412#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 413 local_irq_disable();
74eed016 414#else
f71b74bc 415 lockdep_assert_irqs_disabled();
74eed016 416#endif
d3759e71 417 account_hardirq_exit(current);
bdb43806 418 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
419 if (!in_interrupt() && local_softirq_pending())
420 invoke_softirq();
79bf2bb3 421
67826eae 422 tick_irq_exit();
8a6bc478
TG
423}
424
59bc300b
PZ
425/**
426 * irq_exit_rcu() - Exit an interrupt context without updating RCU
427 *
428 * Also processes softirqs if needed and possible.
429 */
430void irq_exit_rcu(void)
431{
432 __irq_exit_rcu();
433 /* must be last! */
434 lockdep_hardirq_exit();
435}
436
8a6bc478
TG
437/**
438 * irq_exit - Exit an interrupt context, update RCU and lockdep
439 *
440 * Also processes softirqs if needed and possible.
441 */
442void irq_exit(void)
443{
59bc300b 444 __irq_exit_rcu();
416eb33c 445 rcu_irq_exit();
2502ec37
TG
446 /* must be last! */
447 lockdep_hardirq_exit();
1da177e4
LT
448}
449
450/*
451 * This function must run with irqs disabled!
452 */
7ad5b3a5 453inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
454{
455 __raise_softirq_irqoff(nr);
456
457 /*
458 * If we're in an interrupt or softirq, we're done
459 * (this also catches softirq-disabled code). We will
460 * actually run the softirq once we return from
461 * the irq or softirq.
462 *
463 * Otherwise we wake up ksoftirqd to make sure we
464 * schedule the softirq soon.
465 */
466 if (!in_interrupt())
467 wakeup_softirqd();
468}
469
7ad5b3a5 470void raise_softirq(unsigned int nr)
1da177e4
LT
471{
472 unsigned long flags;
473
474 local_irq_save(flags);
475 raise_softirq_irqoff(nr);
476 local_irq_restore(flags);
477}
478
f069686e
SR
479void __raise_softirq_irqoff(unsigned int nr)
480{
cdabce2e 481 lockdep_assert_irqs_disabled();
f069686e
SR
482 trace_softirq_raise(nr);
483 or_softirq_pending(1UL << nr);
484}
485
962cf36c 486void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 487{
1da177e4
LT
488 softirq_vec[nr].action = action;
489}
490
9ba5f005
PZ
491/*
492 * Tasklets
493 */
ce85b4f2 494struct tasklet_head {
48f20a9a
OJ
495 struct tasklet_struct *head;
496 struct tasklet_struct **tail;
1da177e4
LT
497};
498
4620b49f
VN
499static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
500static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 501
6498ddad
IM
502static void __tasklet_schedule_common(struct tasklet_struct *t,
503 struct tasklet_head __percpu *headp,
504 unsigned int softirq_nr)
1da177e4 505{
6498ddad 506 struct tasklet_head *head;
1da177e4
LT
507 unsigned long flags;
508
509 local_irq_save(flags);
6498ddad 510 head = this_cpu_ptr(headp);
48f20a9a 511 t->next = NULL;
6498ddad
IM
512 *head->tail = t;
513 head->tail = &(t->next);
514 raise_softirq_irqoff(softirq_nr);
1da177e4
LT
515 local_irq_restore(flags);
516}
6498ddad
IM
517
518void __tasklet_schedule(struct tasklet_struct *t)
519{
520 __tasklet_schedule_common(t, &tasklet_vec,
521 TASKLET_SOFTIRQ);
522}
1da177e4
LT
523EXPORT_SYMBOL(__tasklet_schedule);
524
7ad5b3a5 525void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4 526{
6498ddad
IM
527 __tasklet_schedule_common(t, &tasklet_hi_vec,
528 HI_SOFTIRQ);
1da177e4 529}
1da177e4
LT
530EXPORT_SYMBOL(__tasklet_hi_schedule);
531
82b691be
IM
532static void tasklet_action_common(struct softirq_action *a,
533 struct tasklet_head *tl_head,
534 unsigned int softirq_nr)
1da177e4
LT
535{
536 struct tasklet_struct *list;
537
538 local_irq_disable();
82b691be
IM
539 list = tl_head->head;
540 tl_head->head = NULL;
541 tl_head->tail = &tl_head->head;
1da177e4
LT
542 local_irq_enable();
543
544 while (list) {
545 struct tasklet_struct *t = list;
546
547 list = list->next;
548
549 if (tasklet_trylock(t)) {
550 if (!atomic_read(&t->count)) {
ce85b4f2
JP
551 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
552 &t->state))
1da177e4 553 BUG();
12cc923f
RP
554 if (t->use_callback)
555 t->callback(t);
556 else
557 t->func(t->data);
1da177e4
LT
558 tasklet_unlock(t);
559 continue;
560 }
561 tasklet_unlock(t);
562 }
563
564 local_irq_disable();
48f20a9a 565 t->next = NULL;
82b691be
IM
566 *tl_head->tail = t;
567 tl_head->tail = &t->next;
568 __raise_softirq_irqoff(softirq_nr);
1da177e4
LT
569 local_irq_enable();
570 }
571}
572
82b691be 573static __latent_entropy void tasklet_action(struct softirq_action *a)
1da177e4 574{
82b691be
IM
575 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
576}
1da177e4 577
82b691be
IM
578static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
579{
580 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
1da177e4
LT
581}
582
12cc923f
RP
583void tasklet_setup(struct tasklet_struct *t,
584 void (*callback)(struct tasklet_struct *))
585{
586 t->next = NULL;
587 t->state = 0;
588 atomic_set(&t->count, 0);
589 t->callback = callback;
590 t->use_callback = true;
591 t->data = 0;
592}
593EXPORT_SYMBOL(tasklet_setup);
594
1da177e4
LT
595void tasklet_init(struct tasklet_struct *t,
596 void (*func)(unsigned long), unsigned long data)
597{
598 t->next = NULL;
599 t->state = 0;
600 atomic_set(&t->count, 0);
601 t->func = func;
12cc923f 602 t->use_callback = false;
1da177e4
LT
603 t->data = data;
604}
1da177e4
LT
605EXPORT_SYMBOL(tasklet_init);
606
607void tasklet_kill(struct tasklet_struct *t)
608{
609 if (in_interrupt())
40322764 610 pr_notice("Attempt to kill tasklet from interrupt\n");
1da177e4
LT
611
612 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 613 do {
1da177e4 614 yield();
79d381c9 615 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
616 }
617 tasklet_unlock_wait(t);
618 clear_bit(TASKLET_STATE_SCHED, &t->state);
619}
1da177e4
LT
620EXPORT_SYMBOL(tasklet_kill);
621
622void __init softirq_init(void)
623{
48f20a9a
OJ
624 int cpu;
625
626 for_each_possible_cpu(cpu) {
627 per_cpu(tasklet_vec, cpu).tail =
628 &per_cpu(tasklet_vec, cpu).head;
629 per_cpu(tasklet_hi_vec, cpu).tail =
630 &per_cpu(tasklet_hi_vec, cpu).head;
631 }
632
962cf36c
CM
633 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
634 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
635}
636
3e339b5d 637static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 638{
3e339b5d
TG
639 return local_softirq_pending();
640}
1da177e4 641
3e339b5d
TG
642static void run_ksoftirqd(unsigned int cpu)
643{
644 local_irq_disable();
645 if (local_softirq_pending()) {
0bed698a
FW
646 /*
647 * We can safely run softirq on inline stack, as we are not deep
648 * in the task stack here.
649 */
3e339b5d 650 __do_softirq();
3e339b5d 651 local_irq_enable();
edf22f4c 652 cond_resched();
3e339b5d 653 return;
1da177e4 654 }
3e339b5d 655 local_irq_enable();
1da177e4
LT
656}
657
658#ifdef CONFIG_HOTPLUG_CPU
659/*
660 * tasklet_kill_immediate is called to remove a tasklet which can already be
661 * scheduled for execution on @cpu.
662 *
663 * Unlike tasklet_kill, this function removes the tasklet
664 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
665 *
666 * When this function is called, @cpu must be in the CPU_DEAD state.
667 */
668void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
669{
670 struct tasklet_struct **i;
671
672 BUG_ON(cpu_online(cpu));
673 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
674
675 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
676 return;
677
678 /* CPU is dead, so no lock needed. */
48f20a9a 679 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
680 if (*i == t) {
681 *i = t->next;
48f20a9a
OJ
682 /* If this was the tail element, move the tail ptr */
683 if (*i == NULL)
684 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
685 return;
686 }
687 }
688 BUG();
689}
690
c4544dbc 691static int takeover_tasklets(unsigned int cpu)
1da177e4 692{
1da177e4
LT
693 /* CPU is dead, so no lock needed. */
694 local_irq_disable();
695
696 /* Find end, append list for that CPU. */
e5e41723 697 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964 698 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
8afecaa6 699 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
700 per_cpu(tasklet_vec, cpu).head = NULL;
701 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
702 }
1da177e4
LT
703 raise_softirq_irqoff(TASKLET_SOFTIRQ);
704
e5e41723 705 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
706 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
707 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
708 per_cpu(tasklet_hi_vec, cpu).head = NULL;
709 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
710 }
1da177e4
LT
711 raise_softirq_irqoff(HI_SOFTIRQ);
712
713 local_irq_enable();
c4544dbc 714 return 0;
1da177e4 715}
c4544dbc
SAS
716#else
717#define takeover_tasklets NULL
1da177e4
LT
718#endif /* CONFIG_HOTPLUG_CPU */
719
3e339b5d
TG
720static struct smp_hotplug_thread softirq_threads = {
721 .store = &ksoftirqd,
722 .thread_should_run = ksoftirqd_should_run,
723 .thread_fn = run_ksoftirqd,
724 .thread_comm = "ksoftirqd/%u",
725};
726
7babe8db 727static __init int spawn_ksoftirqd(void)
1da177e4 728{
c4544dbc
SAS
729 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
730 takeover_tasklets);
3e339b5d
TG
731 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
732
1da177e4
LT
733 return 0;
734}
7babe8db 735early_initcall(spawn_ksoftirqd);
78eef01b 736
43a25632
YL
737/*
738 * [ These __weak aliases are kept in a separate compilation unit, so that
739 * GCC does not inline them incorrectly. ]
740 */
741
742int __init __weak early_irq_init(void)
743{
744 return 0;
745}
746
4a046d17
YL
747int __init __weak arch_probe_nr_irqs(void)
748{
b683de2b 749 return NR_IRQS_LEGACY;
4a046d17
YL
750}
751
43a25632
YL
752int __init __weak arch_early_irq_init(void)
753{
754 return 0;
755}
62a08ae2
TG
756
757unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
758{
759 return from;
760}