2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/irq.h>
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
51 #ifndef __ARCH_IRQ_STAT
52 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
53 EXPORT_SYMBOL(irq_stat
);
56 static struct softirq_action softirq_vec
[NR_SOFTIRQS
] __cacheline_aligned_in_smp
;
58 DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
60 const char * const softirq_to_name
[NR_SOFTIRQS
] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU"
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
71 static void wakeup_softirqd(void)
73 /* Interrupts are disabled: no need to stop preemption */
74 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
76 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
84 static bool ksoftirqd_running(void)
86 struct task_struct
*tsk
= __this_cpu_read(ksoftirqd
);
88 return tsk
&& (tsk
->state
== TASK_RUNNING
);
92 * preempt_count and SOFTIRQ_OFFSET usage:
93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
96 * on local_bh_disable or local_bh_enable.
97 * This lets us distinguish between whether we are currently processing
98 * softirq and whether we just have bh disabled.
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
105 #ifdef CONFIG_TRACE_IRQFLAGS
106 void __local_bh_disable_ip(unsigned long ip
, unsigned int cnt
)
110 WARN_ON_ONCE(in_irq());
112 raw_local_irq_save(flags
);
114 * The preempt tracer hooks into preempt_count_add and will break
115 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116 * is set and before current->softirq_enabled is cleared.
117 * We must manually increment preempt_count here and manually
118 * call the trace_preempt_off later.
120 __preempt_count_add(cnt
);
122 * Were softirqs turned off above:
124 if (softirq_count() == (cnt
& SOFTIRQ_MASK
))
125 trace_softirqs_off(ip
);
126 raw_local_irq_restore(flags
);
128 if (preempt_count() == cnt
) {
129 #ifdef CONFIG_DEBUG_PREEMPT
130 current
->preempt_disable_ip
= get_lock_parent_ip();
132 trace_preempt_off(CALLER_ADDR0
, get_lock_parent_ip());
135 EXPORT_SYMBOL(__local_bh_disable_ip
);
136 #endif /* CONFIG_TRACE_IRQFLAGS */
138 static void __local_bh_enable(unsigned int cnt
)
140 lockdep_assert_irqs_disabled();
142 if (preempt_count() == cnt
)
143 trace_preempt_on(CALLER_ADDR0
, get_lock_parent_ip());
145 if (softirq_count() == (cnt
& SOFTIRQ_MASK
))
146 trace_softirqs_on(_RET_IP_
);
148 __preempt_count_sub(cnt
);
152 * Special-case - softirqs can safely be enabled in
153 * cond_resched_softirq(), or by __do_softirq(),
154 * without processing still-pending softirqs:
156 void _local_bh_enable(void)
158 WARN_ON_ONCE(in_irq());
159 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET
);
161 EXPORT_SYMBOL(_local_bh_enable
);
163 void __local_bh_enable_ip(unsigned long ip
, unsigned int cnt
)
165 WARN_ON_ONCE(in_irq());
166 lockdep_assert_irqs_enabled();
167 #ifdef CONFIG_TRACE_IRQFLAGS
171 * Are softirqs going to be turned on now:
173 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET
)
174 trace_softirqs_on(ip
);
176 * Keep preemption disabled until we are done with
177 * softirq processing:
179 preempt_count_sub(cnt
- 1);
181 if (unlikely(!in_interrupt() && local_softirq_pending())) {
183 * Run softirq if any pending. And do it in its own stack
184 * as we may be calling this deep in a task call stack already.
190 #ifdef CONFIG_TRACE_IRQFLAGS
193 preempt_check_resched();
195 EXPORT_SYMBOL(__local_bh_enable_ip
);
198 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
199 * but break the loop if need_resched() is set or after 2 ms.
200 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
201 * certain cases, such as stop_machine(), jiffies may cease to
202 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
203 * well to make sure we eventually return from this method.
205 * These limits have been established via experimentation.
206 * The two things to balance is latency against fairness -
207 * we want to handle softirqs as soon as possible, but they
208 * should not be able to lock up the box.
210 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
211 #define MAX_SOFTIRQ_RESTART 10
213 #ifdef CONFIG_TRACE_IRQFLAGS
215 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
216 * to keep the lockdep irq context tracking as tight as possible in order to
217 * not miss-qualify lock contexts and miss possible deadlocks.
220 static inline bool lockdep_softirq_start(void)
222 bool in_hardirq
= false;
224 if (trace_hardirq_context(current
)) {
226 trace_hardirq_exit();
229 lockdep_softirq_enter();
234 static inline void lockdep_softirq_end(bool in_hardirq
)
236 lockdep_softirq_exit();
239 trace_hardirq_enter();
242 static inline bool lockdep_softirq_start(void) { return false; }
243 static inline void lockdep_softirq_end(bool in_hardirq
) { }
246 asmlinkage __visible
void __softirq_entry
__do_softirq(void)
248 unsigned long end
= jiffies
+ MAX_SOFTIRQ_TIME
;
249 unsigned long old_flags
= current
->flags
;
250 int max_restart
= MAX_SOFTIRQ_RESTART
;
251 struct softirq_action
*h
;
257 * Mask out PF_MEMALLOC s current task context is borrowed for the
258 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
259 * again if the socket is related to swap
261 current
->flags
&= ~PF_MEMALLOC
;
263 pending
= local_softirq_pending();
264 account_irq_enter_time(current
);
266 __local_bh_disable_ip(_RET_IP_
, SOFTIRQ_OFFSET
);
267 in_hardirq
= lockdep_softirq_start();
270 /* Reset the pending bitmask before enabling irqs */
271 set_softirq_pending(0);
277 while ((softirq_bit
= ffs(pending
))) {
281 h
+= softirq_bit
- 1;
283 vec_nr
= h
- softirq_vec
;
284 prev_count
= preempt_count();
286 kstat_incr_softirqs_this_cpu(vec_nr
);
288 trace_softirq_entry(vec_nr
);
290 trace_softirq_exit(vec_nr
);
291 if (unlikely(prev_count
!= preempt_count())) {
292 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
293 vec_nr
, softirq_to_name
[vec_nr
], h
->action
,
294 prev_count
, preempt_count());
295 preempt_count_set(prev_count
);
298 pending
>>= softirq_bit
;
304 pending
= local_softirq_pending();
306 if (time_before(jiffies
, end
) && !need_resched() &&
313 lockdep_softirq_end(in_hardirq
);
314 account_irq_exit_time(current
);
315 __local_bh_enable(SOFTIRQ_OFFSET
);
316 WARN_ON_ONCE(in_interrupt());
317 current_restore_flags(old_flags
, PF_MEMALLOC
);
320 asmlinkage __visible
void do_softirq(void)
328 local_irq_save(flags
);
330 pending
= local_softirq_pending();
332 if (pending
&& !ksoftirqd_running())
333 do_softirq_own_stack();
335 local_irq_restore(flags
);
339 * Enter an interrupt context.
344 if (is_idle_task(current
) && !in_interrupt()) {
346 * Prevent raise_softirq from needlessly waking up ksoftirqd
347 * here, as softirq will be serviced on return from interrupt.
357 static inline void invoke_softirq(void)
359 if (ksoftirqd_running())
362 if (!force_irqthreads
) {
363 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
365 * We can safely execute softirq on the current stack if
366 * it is the irq stack, because it should be near empty
372 * Otherwise, irq_exit() is called on the task stack that can
373 * be potentially deep already. So call softirq in its own stack
374 * to prevent from any overrun.
376 do_softirq_own_stack();
383 static inline void tick_irq_exit(void)
385 #ifdef CONFIG_NO_HZ_COMMON
386 int cpu
= smp_processor_id();
388 /* Make sure that timer wheel updates are propagated */
389 if ((idle_cpu(cpu
) && !need_resched()) || tick_nohz_full_cpu(cpu
)) {
391 tick_nohz_irq_exit();
397 * Exit an interrupt context. Process softirqs if needed and possible:
401 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
404 lockdep_assert_irqs_disabled();
406 account_irq_exit_time(current
);
407 preempt_count_sub(HARDIRQ_OFFSET
);
408 if (!in_interrupt() && local_softirq_pending())
413 trace_hardirq_exit(); /* must be last! */
417 * This function must run with irqs disabled!
419 inline void raise_softirq_irqoff(unsigned int nr
)
421 __raise_softirq_irqoff(nr
);
424 * If we're in an interrupt or softirq, we're done
425 * (this also catches softirq-disabled code). We will
426 * actually run the softirq once we return from
427 * the irq or softirq.
429 * Otherwise we wake up ksoftirqd to make sure we
430 * schedule the softirq soon.
436 void raise_softirq(unsigned int nr
)
440 local_irq_save(flags
);
441 raise_softirq_irqoff(nr
);
442 local_irq_restore(flags
);
445 void __raise_softirq_irqoff(unsigned int nr
)
447 trace_softirq_raise(nr
);
448 or_softirq_pending(1UL << nr
);
451 void open_softirq(int nr
, void (*action
)(struct softirq_action
*))
453 softirq_vec
[nr
].action
= action
;
459 struct tasklet_head
{
460 struct tasklet_struct
*head
;
461 struct tasklet_struct
**tail
;
464 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
);
465 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
);
467 void __tasklet_schedule(struct tasklet_struct
*t
)
471 local_irq_save(flags
);
473 *__this_cpu_read(tasklet_vec
.tail
) = t
;
474 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
475 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
476 local_irq_restore(flags
);
478 EXPORT_SYMBOL(__tasklet_schedule
);
480 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
484 local_irq_save(flags
);
486 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
487 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
488 raise_softirq_irqoff(HI_SOFTIRQ
);
489 local_irq_restore(flags
);
491 EXPORT_SYMBOL(__tasklet_hi_schedule
);
493 static __latent_entropy
void tasklet_action(struct softirq_action
*a
)
495 struct tasklet_struct
*list
;
498 list
= __this_cpu_read(tasklet_vec
.head
);
499 __this_cpu_write(tasklet_vec
.head
, NULL
);
500 __this_cpu_write(tasklet_vec
.tail
, this_cpu_ptr(&tasklet_vec
.head
));
504 struct tasklet_struct
*t
= list
;
508 if (tasklet_trylock(t
)) {
509 if (!atomic_read(&t
->count
)) {
510 if (!test_and_clear_bit(TASKLET_STATE_SCHED
,
522 *__this_cpu_read(tasklet_vec
.tail
) = t
;
523 __this_cpu_write(tasklet_vec
.tail
, &(t
->next
));
524 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
529 static __latent_entropy
void tasklet_hi_action(struct softirq_action
*a
)
531 struct tasklet_struct
*list
;
534 list
= __this_cpu_read(tasklet_hi_vec
.head
);
535 __this_cpu_write(tasklet_hi_vec
.head
, NULL
);
536 __this_cpu_write(tasklet_hi_vec
.tail
, this_cpu_ptr(&tasklet_hi_vec
.head
));
540 struct tasklet_struct
*t
= list
;
544 if (tasklet_trylock(t
)) {
545 if (!atomic_read(&t
->count
)) {
546 if (!test_and_clear_bit(TASKLET_STATE_SCHED
,
558 *__this_cpu_read(tasklet_hi_vec
.tail
) = t
;
559 __this_cpu_write(tasklet_hi_vec
.tail
, &(t
->next
));
560 __raise_softirq_irqoff(HI_SOFTIRQ
);
565 void tasklet_init(struct tasklet_struct
*t
,
566 void (*func
)(unsigned long), unsigned long data
)
570 atomic_set(&t
->count
, 0);
574 EXPORT_SYMBOL(tasklet_init
);
576 void tasklet_kill(struct tasklet_struct
*t
)
579 pr_notice("Attempt to kill tasklet from interrupt\n");
581 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
584 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
586 tasklet_unlock_wait(t
);
587 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
589 EXPORT_SYMBOL(tasklet_kill
);
596 * The trampoline is called when the hrtimer expires. It schedules a tasklet
597 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
598 * hrtimer callback, but from softirq context.
600 static enum hrtimer_restart
__hrtimer_tasklet_trampoline(struct hrtimer
*timer
)
602 struct tasklet_hrtimer
*ttimer
=
603 container_of(timer
, struct tasklet_hrtimer
, timer
);
605 tasklet_hi_schedule(&ttimer
->tasklet
);
606 return HRTIMER_NORESTART
;
610 * Helper function which calls the hrtimer callback from
611 * tasklet/softirq context
613 static void __tasklet_hrtimer_trampoline(unsigned long data
)
615 struct tasklet_hrtimer
*ttimer
= (void *)data
;
616 enum hrtimer_restart restart
;
618 restart
= ttimer
->function(&ttimer
->timer
);
619 if (restart
!= HRTIMER_NORESTART
)
620 hrtimer_restart(&ttimer
->timer
);
624 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
625 * @ttimer: tasklet_hrtimer which is initialized
626 * @function: hrtimer callback function which gets called from softirq context
627 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
628 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
630 void tasklet_hrtimer_init(struct tasklet_hrtimer
*ttimer
,
631 enum hrtimer_restart (*function
)(struct hrtimer
*),
632 clockid_t which_clock
, enum hrtimer_mode mode
)
634 hrtimer_init(&ttimer
->timer
, which_clock
, mode
);
635 ttimer
->timer
.function
= __hrtimer_tasklet_trampoline
;
636 tasklet_init(&ttimer
->tasklet
, __tasklet_hrtimer_trampoline
,
637 (unsigned long)ttimer
);
638 ttimer
->function
= function
;
640 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init
);
642 void __init
softirq_init(void)
646 for_each_possible_cpu(cpu
) {
647 per_cpu(tasklet_vec
, cpu
).tail
=
648 &per_cpu(tasklet_vec
, cpu
).head
;
649 per_cpu(tasklet_hi_vec
, cpu
).tail
=
650 &per_cpu(tasklet_hi_vec
, cpu
).head
;
653 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
);
654 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
);
657 static int ksoftirqd_should_run(unsigned int cpu
)
659 return local_softirq_pending();
662 static void run_ksoftirqd(unsigned int cpu
)
665 if (local_softirq_pending()) {
667 * We can safely run softirq on inline stack, as we are not deep
668 * in the task stack here.
672 cond_resched_rcu_qs();
678 #ifdef CONFIG_HOTPLUG_CPU
680 * tasklet_kill_immediate is called to remove a tasklet which can already be
681 * scheduled for execution on @cpu.
683 * Unlike tasklet_kill, this function removes the tasklet
684 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
686 * When this function is called, @cpu must be in the CPU_DEAD state.
688 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
690 struct tasklet_struct
**i
;
692 BUG_ON(cpu_online(cpu
));
693 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
695 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
698 /* CPU is dead, so no lock needed. */
699 for (i
= &per_cpu(tasklet_vec
, cpu
).head
; *i
; i
= &(*i
)->next
) {
702 /* If this was the tail element, move the tail ptr */
704 per_cpu(tasklet_vec
, cpu
).tail
= i
;
711 static int takeover_tasklets(unsigned int cpu
)
713 /* CPU is dead, so no lock needed. */
716 /* Find end, append list for that CPU. */
717 if (&per_cpu(tasklet_vec
, cpu
).head
!= per_cpu(tasklet_vec
, cpu
).tail
) {
718 *__this_cpu_read(tasklet_vec
.tail
) = per_cpu(tasklet_vec
, cpu
).head
;
719 this_cpu_write(tasklet_vec
.tail
, per_cpu(tasklet_vec
, cpu
).tail
);
720 per_cpu(tasklet_vec
, cpu
).head
= NULL
;
721 per_cpu(tasklet_vec
, cpu
).tail
= &per_cpu(tasklet_vec
, cpu
).head
;
723 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
725 if (&per_cpu(tasklet_hi_vec
, cpu
).head
!= per_cpu(tasklet_hi_vec
, cpu
).tail
) {
726 *__this_cpu_read(tasklet_hi_vec
.tail
) = per_cpu(tasklet_hi_vec
, cpu
).head
;
727 __this_cpu_write(tasklet_hi_vec
.tail
, per_cpu(tasklet_hi_vec
, cpu
).tail
);
728 per_cpu(tasklet_hi_vec
, cpu
).head
= NULL
;
729 per_cpu(tasklet_hi_vec
, cpu
).tail
= &per_cpu(tasklet_hi_vec
, cpu
).head
;
731 raise_softirq_irqoff(HI_SOFTIRQ
);
737 #define takeover_tasklets NULL
738 #endif /* CONFIG_HOTPLUG_CPU */
740 static struct smp_hotplug_thread softirq_threads
= {
742 .thread_should_run
= ksoftirqd_should_run
,
743 .thread_fn
= run_ksoftirqd
,
744 .thread_comm
= "ksoftirqd/%u",
747 static __init
int spawn_ksoftirqd(void)
749 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD
, "softirq:dead", NULL
,
751 BUG_ON(smpboot_register_percpu_thread(&softirq_threads
));
755 early_initcall(spawn_ksoftirqd
);
758 * [ These __weak aliases are kept in a separate compilation unit, so that
759 * GCC does not inline them incorrectly. ]
762 int __init __weak
early_irq_init(void)
767 int __init __weak
arch_probe_nr_irqs(void)
769 return NR_IRQS_LEGACY
;
772 int __init __weak
arch_early_irq_init(void)
777 unsigned int __weak
arch_dynirq_lower_bound(unsigned int from
)