2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/irq.h>
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
51 #ifndef __ARCH_IRQ_STAT
52 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
53 EXPORT_SYMBOL(irq_stat
);
56 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
58 DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
60 const char * const softirq_to_name
[NR_SOFTIRQS
] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
71 static void wakeup_softirqd(void)
73 /* Interrupts are disabled: no need to stop preemption */
74 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
76 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
83 * unless we're doing some of the synchronous softirqs.
85 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
86 static bool ksoftirqd_running(unsigned long pending
)
88 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
90 if (pending
& SOFTIRQ_NOW_MASK
)
92 return tsk
&& (tsk
->state
== TASK_RUNNING
);
96 * preempt_count and SOFTIRQ_OFFSET usage:
97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
101 * This lets us distinguish between whether we are currently processing
102 * softirq and whether we just have bh disabled.
106 * This one is for softirq.c-internal use,
107 * where hardirqs are disabled legitimately:
109 #ifdef CONFIG_TRACE_IRQFLAGS
110 void __local_bh_disable_ip(unsigned long ip
, unsigned int cnt
)
114 WARN_ON_ONCE(in_irq());
116 raw_local_irq_save(flags
);
118 * The preempt tracer hooks into preempt_count_add and will break
119 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
120 * is set and before current->softirq_enabled is cleared.
121 * We must manually increment preempt_count here and manually
122 * call the trace_preempt_off later.
124 __preempt_count_add(cnt
);
126 * Were softirqs turned off above:
128 if (softirq_count() == (cnt
& SOFTIRQ_MASK
))
129 trace_softirqs_off(ip
);
130 raw_local_irq_restore(flags
);
132 if (preempt_count() == cnt
) {
133 #ifdef CONFIG_DEBUG_PREEMPT
134 current
->preempt_disable_ip
= get_lock_parent_ip();
136 trace_preempt_off(CALLER_ADDR0
, get_lock_parent_ip());
139 EXPORT_SYMBOL(__local_bh_disable_ip
);
140 #endif /* CONFIG_TRACE_IRQFLAGS */
142 static void __local_bh_enable(unsigned int cnt
)
144 lockdep_assert_irqs_disabled();
146 if (preempt_count() == cnt
)
147 trace_preempt_on(CALLER_ADDR0
, get_lock_parent_ip());
149 if (softirq_count() == (cnt
& SOFTIRQ_MASK
))
150 trace_softirqs_on(_RET_IP_
);
152 __preempt_count_sub(cnt
);
156 * Special-case - softirqs can safely be enabled in
157 * cond_resched_softirq(), or by __do_softirq(),
158 * without processing still-pending softirqs:
160 void _local_bh_enable(void)
162 WARN_ON_ONCE(in_irq());
163 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET
);
165 EXPORT_SYMBOL(_local_bh_enable
);
167 void __local_bh_enable_ip(unsigned long ip
, unsigned int cnt
)
169 WARN_ON_ONCE(in_irq());
170 lockdep_assert_irqs_enabled();
171 #ifdef CONFIG_TRACE_IRQFLAGS
175 * Are softirqs going to be turned on now:
177 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET
)
178 trace_softirqs_on(ip
);
180 * Keep preemption disabled until we are done with
181 * softirq processing:
183 preempt_count_sub(cnt
- 1);
185 if (unlikely(!in_interrupt() && local_softirq_pending())) {
187 * Run softirq if any pending. And do it in its own stack
188 * as we may be calling this deep in a task call stack already.
194 #ifdef CONFIG_TRACE_IRQFLAGS
197 preempt_check_resched();
199 EXPORT_SYMBOL(__local_bh_enable_ip
);
202 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
203 * but break the loop if need_resched() is set or after 2 ms.
204 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
205 * certain cases, such as stop_machine(), jiffies may cease to
206 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
207 * well to make sure we eventually return from this method.
209 * These limits have been established via experimentation.
210 * The two things to balance is latency against fairness -
211 * we want to handle softirqs as soon as possible, but they
212 * should not be able to lock up the box.
214 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
215 #define MAX_SOFTIRQ_RESTART 10
217 #ifdef CONFIG_TRACE_IRQFLAGS
219 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
220 * to keep the lockdep irq context tracking as tight as possible in order to
221 * not miss-qualify lock contexts and miss possible deadlocks.
224 static inline bool lockdep_softirq_start(void)
226 bool in_hardirq
= false;
228 if (trace_hardirq_context(current
)) {
230 trace_hardirq_exit();
233 lockdep_softirq_enter();
238 static inline void lockdep_softirq_end(bool in_hardirq
)
240 lockdep_softirq_exit();
243 trace_hardirq_enter();
246 static inline bool lockdep_softirq_start(void) { return false; }
247 static inline void lockdep_softirq_end(bool in_hardirq
) { }
250 asmlinkage __visible
void __softirq_entry
__do_softirq(void)
252 unsigned long end
= jiffies
+ MAX_SOFTIRQ_TIME
;
253 unsigned long old_flags
= current
->flags
;
254 int max_restart
= MAX_SOFTIRQ_RESTART
;
255 struct softirq_action
*h
;
261 * Mask out PF_MEMALLOC s current task context is borrowed for the
262 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
263 * again if the socket is related to swap
265 current
->flags
&= ~PF_MEMALLOC
;
267 pending
= local_softirq_pending();
268 account_irq_enter_time(current
);
270 __local_bh_disable_ip(_RET_IP_
, SOFTIRQ_OFFSET
);
271 in_hardirq
= lockdep_softirq_start();
274 /* Reset the pending bitmask before enabling irqs */
275 set_softirq_pending(0);
281 while ((softirq_bit
= ffs(pending
))) {
285 h
+= softirq_bit
- 1;
287 vec_nr
= h
- softirq_vec
;
288 prev_count
= preempt_count();
290 kstat_incr_softirqs_this_cpu(vec_nr
);
292 trace_softirq_entry(vec_nr
);
294 trace_softirq_exit(vec_nr
);
295 if (unlikely(prev_count
!= preempt_count())) {
296 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
297 vec_nr
, softirq_to_name
[vec_nr
], h
->action
,
298 prev_count
, preempt_count());
299 preempt_count_set(prev_count
);
302 pending
>>= softirq_bit
;
308 pending
= local_softirq_pending();
310 if (time_before(jiffies
, end
) && !need_resched() &&
317 lockdep_softirq_end(in_hardirq
);
318 account_irq_exit_time(current
);
319 __local_bh_enable(SOFTIRQ_OFFSET
);
320 WARN_ON_ONCE(in_interrupt());
321 current_restore_flags(old_flags
, PF_MEMALLOC
);
324 asmlinkage __visible
void do_softirq(void)
332 local_irq_save(flags
);
334 pending
= local_softirq_pending();
336 if (pending
&& !ksoftirqd_running(pending
))
337 do_softirq_own_stack();
339 local_irq_restore(flags
);
343 * Enter an interrupt context.
348 if (is_idle_task(current
) && !in_interrupt()) {
350 * Prevent raise_softirq from needlessly waking up ksoftirqd
351 * here, as softirq will be serviced on return from interrupt.
361 static inline void invoke_softirq(void)
363 if (ksoftirqd_running(local_softirq_pending()))
366 if (!force_irqthreads
) {
367 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
369 * We can safely execute softirq on the current stack if
370 * it is the irq stack, because it should be near empty
376 * Otherwise, irq_exit() is called on the task stack that can
377 * be potentially deep already. So call softirq in its own stack
378 * to prevent from any overrun.
380 do_softirq_own_stack();
387 static inline void tick_irq_exit(void)
389 #ifdef CONFIG_NO_HZ_COMMON
390 int cpu
= smp_processor_id();
392 /* Make sure that timer wheel updates are propagated */
393 if ((idle_cpu(cpu
) && !need_resched()) || tick_nohz_full_cpu(cpu
)) {
395 tick_nohz_irq_exit();
401 * Exit an interrupt context. Process softirqs if needed and possible:
405 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
408 lockdep_assert_irqs_disabled();
410 account_irq_exit_time(current
);
411 preempt_count_sub(HARDIRQ_OFFSET
);
412 if (!in_interrupt() && local_softirq_pending())
417 trace_hardirq_exit(); /* must be last! */
421 * This function must run with irqs disabled!
423 inline void raise_softirq_irqoff(unsigned int nr
)
425 __raise_softirq_irqoff(nr
);
428 * If we're in an interrupt or softirq, we're done
429 * (this also catches softirq-disabled code). We will
430 * actually run the softirq once we return from
431 * the irq or softirq.
433 * Otherwise we wake up ksoftirqd to make sure we
434 * schedule the softirq soon.
440 void raise_softirq(unsigned int nr
)
444 local_irq_save(flags
);
445 raise_softirq_irqoff(nr
);
446 local_irq_restore(flags
);
449 void __raise_softirq_irqoff(unsigned int nr
)
451 trace_softirq_raise(nr
);
452 or_softirq_pending(1UL << nr
);
455 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
457 softirq_vec
[nr
].action
= action
;
463 struct tasklet_head
{
464 struct tasklet_struct
*head
;
465 struct tasklet_struct
**tail
;
468 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
469 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
471 void __tasklet_schedule(struct tasklet_struct
*t
)
475 local_irq_save(flags
);
477 *__this_cpu_read(tasklet_vec
.tail
) = t
;
478 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
479 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
480 local_irq_restore(flags
);
482 EXPORT_SYMBOL(__tasklet_schedule
);
484 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
488 local_irq_save(flags
);
490 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
491 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
492 raise_softirq_irqoff(HI_SOFTIRQ
);
493 local_irq_restore(flags
);
495 EXPORT_SYMBOL(__tasklet_hi_schedule
);
497 static __latent_entropy
void tasklet_action(struct softirq_action
*a
)
499 struct tasklet_struct
*list
;
502 list
= __this_cpu_read(tasklet_vec
.head
);
503 __this_cpu_write(tasklet_vec
.head
, NULL
);
504 __this_cpu_write(tasklet_vec
.tail
, this_cpu_ptr(&tasklet_vec
.head
));
508 struct tasklet_struct
*t
= list
;
512 if (tasklet_trylock(t
)) {
513 if (!atomic_read(&t
->count
)) {
514 if (!test_and_clear_bit(TASKLET_STATE_SCHED
,
526 *__this_cpu_read(tasklet_vec
.tail
) = t
;
527 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
528 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
533 static __latent_entropy
void tasklet_hi_action(struct softirq_action
*a
)
535 struct tasklet_struct
*list
;
538 list
= __this_cpu_read(tasklet_hi_vec
.head
);
539 __this_cpu_write(tasklet_hi_vec
.head
, NULL
);
540 __this_cpu_write(tasklet_hi_vec
.tail
, this_cpu_ptr(&tasklet_hi_vec
.head
));
544 struct tasklet_struct
*t
= list
;
548 if (tasklet_trylock(t
)) {
549 if (!atomic_read(&t
->count
)) {
550 if (!test_and_clear_bit(TASKLET_STATE_SCHED
,
562 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
563 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
564 __raise_softirq_irqoff(HI_SOFTIRQ
);
569 void tasklet_init(struct tasklet_struct
*t
,
570 void (*func
)(unsigned long), unsigned long data
)
574 atomic_set(&t
->count
, 0);
578 EXPORT_SYMBOL(tasklet_init
);
580 void tasklet_kill(struct tasklet_struct
*t
)
583 pr_notice("Attempt to kill tasklet from interrupt\n");
585 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
588 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
590 tasklet_unlock_wait(t
);
591 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
593 EXPORT_SYMBOL(tasklet_kill
);
600 * The trampoline is called when the hrtimer expires. It schedules a tasklet
601 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
602 * hrtimer callback, but from softirq context.
604 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
606 struct tasklet_hrtimer
*ttimer
=
607 container_of(timer
, struct tasklet_hrtimer
, timer
);
609 tasklet_hi_schedule(&ttimer
->tasklet
);
610 return HRTIMER_NORESTART
;
614 * Helper function which calls the hrtimer callback from
615 * tasklet/softirq context
617 static void __tasklet_hrtimer_trampoline(unsigned long data
)
619 struct tasklet_hrtimer
*ttimer
= (void *)data
;
620 enum hrtimer_restart restart
;
622 restart
= ttimer
->function(&ttimer
->timer
);
623 if (restart
!= HRTIMER_NORESTART
)
624 hrtimer_restart(&ttimer
->timer
);
628 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
629 * @ttimer: tasklet_hrtimer which is initialized
630 * @function: hrtimer callback function which gets called from softirq context
631 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
632 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
634 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
635 enum hrtimer_restart (*function
)(struct hrtimer
*),
636 clockid_t which_clock
, enum hrtimer_mode mode
)
638 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
639 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
640 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
641 (unsigned long)ttimer
);
642 ttimer
->function
= function
;
644 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
646 void __init
softirq_init(void)
650 for_each_possible_cpu(cpu
) {
651 per_cpu(tasklet_vec
, cpu
).tail
=
652 &per_cpu(tasklet_vec
, cpu
).head
;
653 per_cpu(tasklet_hi_vec
, cpu
).tail
=
654 &per_cpu(tasklet_hi_vec
, cpu
).head
;
657 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
658 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
661 static int ksoftirqd_should_run(unsigned int cpu
)
663 return local_softirq_pending();
666 static void run_ksoftirqd(unsigned int cpu
)
669 if (local_softirq_pending()) {
671 * We can safely run softirq on inline stack, as we are not deep
672 * in the task stack here.
676 cond_resched_rcu_qs();
682 #ifdef CONFIG_HOTPLUG_CPU
684 * tasklet_kill_immediate is called to remove a tasklet which can already be
685 * scheduled for execution on @cpu.
687 * Unlike tasklet_kill, this function removes the tasklet
688 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
690 * When this function is called, @cpu must be in the CPU_DEAD state.
692 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
694 struct tasklet_struct
**i
;
696 BUG_ON(cpu_online(cpu
));
697 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
699 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
702 /* CPU is dead, so no lock needed. */
703 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
706 /* If this was the tail element, move the tail ptr */
708 per_cpu(tasklet_vec
, cpu
).tail
= i
;
715 static int takeover_tasklets(unsigned int cpu
)
717 /* CPU is dead, so no lock needed. */
720 /* Find end, append list for that CPU. */
721 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
722 *__this_cpu_read(tasklet_vec
.tail
) = per_cpu(tasklet_vec
, cpu
).head
;
723 this_cpu_write(tasklet_vec
.tail
, per_cpu(tasklet_vec
, cpu
).tail
);
724 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
725 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
727 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
729 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
730 *__this_cpu_read(tasklet_hi_vec
.tail
) = per_cpu(tasklet_hi_vec
, cpu
).head
;
731 __this_cpu_write(tasklet_hi_vec
.tail
, per_cpu(tasklet_hi_vec
, cpu
).tail
);
732 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
733 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
735 raise_softirq_irqoff(HI_SOFTIRQ
);
741 #define takeover_tasklets NULL
742 #endif /* CONFIG_HOTPLUG_CPU */
744 static struct smp_hotplug_thread softirq_threads
= {
746 .thread_should_run
= ksoftirqd_should_run
,
747 .thread_fn
= run_ksoftirqd
,
748 .thread_comm
= "ksoftirqd/%u",
751 static __init
int spawn_ksoftirqd(void)
753 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD
, "softirq:dead", NULL
,
755 BUG_ON(smpboot_register_percpu_thread(&softirq_threads
));
759 early_initcall(spawn_ksoftirqd
);
762 * [ These __weak aliases are kept in a separate compilation unit, so that
763 * GCC does not inline them incorrectly. ]
766 int __init __weak
early_irq_init(void)
771 int __init __weak
arch_probe_nr_irqs(void)
773 return NR_IRQS_LEGACY
;
776 int __init __weak
arch_early_irq_init(void)
781 unsigned int __weak
arch_dynirq_lower_bound(unsigned int from
)