]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/softirq.c
mac80211: Add support for beacon report radio measurement
[mirror_ubuntu-bionic-kernel.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
9 */
10
40322764
JP
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
9984de1a 13#include <linux/export.h>
1da177e4
LT
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
83144186 21#include <linux/freezer.h>
1da177e4
LT
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
7e49fcce 24#include <linux/ftrace.h>
78eef01b 25#include <linux/smp.h>
3e339b5d 26#include <linux/smpboot.h>
79bf2bb3 27#include <linux/tick.h>
d532676c 28#include <linux/irq.h>
a0e39ed3
HC
29
30#define CREATE_TRACE_POINTS
ad8d75ff 31#include <trace/events/irq.h>
1da177e4 32
1da177e4
LT
33/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
978b0116 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 57
4dd53d89 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 59
ce85b4f2 60const char * const softirq_to_name[NR_SOFTIRQS] = {
5dd4de58 61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
09223371 62 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
63};
64
1da177e4
LT
65/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
676cb02d 71static void wakeup_softirqd(void)
1da177e4
LT
72{
73 /* Interrupts are disabled: no need to stop preemption */
909ea964 74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
75e1056f
VP
80/*
81 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing.
84 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
85 * on local_bh_disable or local_bh_enable.
86 * This lets us distinguish between whether we are currently processing
87 * softirq and whether we just have bh disabled.
88 */
89
de30a2b3
IM
90/*
91 * This one is for softirq.c-internal use,
92 * where hardirqs are disabled legitimately:
93 */
3c829c36 94#ifdef CONFIG_TRACE_IRQFLAGS
0bd3a173 95void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
96{
97 unsigned long flags;
98
99 WARN_ON_ONCE(in_irq());
100
101 raw_local_irq_save(flags);
7e49fcce 102 /*
bdb43806 103 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
104 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
105 * is set and before current->softirq_enabled is cleared.
106 * We must manually increment preempt_count here and manually
107 * call the trace_preempt_off later.
108 */
bdb43806 109 __preempt_count_add(cnt);
de30a2b3
IM
110 /*
111 * Were softirqs turned off above:
112 */
9ea4c380 113 if (softirq_count() == (cnt & SOFTIRQ_MASK))
de30a2b3
IM
114 trace_softirqs_off(ip);
115 raw_local_irq_restore(flags);
7e49fcce 116
0f1ba9a2
HC
117 if (preempt_count() == cnt) {
118#ifdef CONFIG_DEBUG_PREEMPT
f904f582 119 current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2 120#endif
f904f582 121 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2 122 }
de30a2b3 123}
0bd3a173 124EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 125#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 126
75e1056f
VP
127static void __local_bh_enable(unsigned int cnt)
128{
75e1056f
VP
129 WARN_ON_ONCE(!irqs_disabled());
130
9ea4c380 131 if (softirq_count() == (cnt & SOFTIRQ_MASK))
d2e08473 132 trace_softirqs_on(_RET_IP_);
bdb43806 133 preempt_count_sub(cnt);
75e1056f
VP
134}
135
de30a2b3
IM
136/*
137 * Special-case - softirqs can safely be enabled in
138 * cond_resched_softirq(), or by __do_softirq(),
139 * without processing still-pending softirqs:
140 */
141void _local_bh_enable(void)
142{
5d60d3e7 143 WARN_ON_ONCE(in_irq());
75e1056f 144 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3 145}
de30a2b3
IM
146EXPORT_SYMBOL(_local_bh_enable);
147
0bd3a173 148void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 149{
0f476b6d 150 WARN_ON_ONCE(in_irq() || irqs_disabled());
3c829c36 151#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 152 local_irq_disable();
3c829c36 153#endif
de30a2b3
IM
154 /*
155 * Are softirqs going to be turned on now:
156 */
75e1056f 157 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0f476b6d 158 trace_softirqs_on(ip);
de30a2b3
IM
159 /*
160 * Keep preemption disabled until we are done with
161 * softirq processing:
ce85b4f2 162 */
0bd3a173 163 preempt_count_sub(cnt - 1);
de30a2b3 164
0bed698a
FW
165 if (unlikely(!in_interrupt() && local_softirq_pending())) {
166 /*
167 * Run softirq if any pending. And do it in its own stack
168 * as we may be calling this deep in a task call stack already.
169 */
de30a2b3 170 do_softirq();
0bed698a 171 }
de30a2b3 172
bdb43806 173 preempt_count_dec();
3c829c36 174#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 175 local_irq_enable();
3c829c36 176#endif
de30a2b3
IM
177 preempt_check_resched();
178}
0bd3a173 179EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 180
1da177e4 181/*
34376a50
BG
182 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
183 * but break the loop if need_resched() is set or after 2 ms.
184 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
185 * certain cases, such as stop_machine(), jiffies may cease to
186 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
187 * well to make sure we eventually return from this method.
1da177e4 188 *
c10d7367 189 * These limits have been established via experimentation.
1da177e4
LT
190 * The two things to balance is latency against fairness -
191 * we want to handle softirqs as soon as possible, but they
192 * should not be able to lock up the box.
193 */
c10d7367 194#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 195#define MAX_SOFTIRQ_RESTART 10
1da177e4 196
f1a83e65
PZ
197#ifdef CONFIG_TRACE_IRQFLAGS
198/*
f1a83e65
PZ
199 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
200 * to keep the lockdep irq context tracking as tight as possible in order to
201 * not miss-qualify lock contexts and miss possible deadlocks.
202 */
f1a83e65 203
5c4853b6 204static inline bool lockdep_softirq_start(void)
f1a83e65 205{
5c4853b6 206 bool in_hardirq = false;
f1a83e65 207
5c4853b6
FW
208 if (trace_hardirq_context(current)) {
209 in_hardirq = true;
f1a83e65 210 trace_hardirq_exit();
5c4853b6
FW
211 }
212
f1a83e65 213 lockdep_softirq_enter();
5c4853b6
FW
214
215 return in_hardirq;
f1a83e65
PZ
216}
217
5c4853b6 218static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
219{
220 lockdep_softirq_exit();
5c4853b6
FW
221
222 if (in_hardirq)
f1a83e65 223 trace_hardirq_enter();
f1a83e65 224}
f1a83e65 225#else
5c4853b6
FW
226static inline bool lockdep_softirq_start(void) { return false; }
227static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
228#endif
229
be7635e7 230asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4 231{
c10d7367 232 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 233 unsigned long old_flags = current->flags;
34376a50 234 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 235 struct softirq_action *h;
5c4853b6 236 bool in_hardirq;
f1a83e65 237 __u32 pending;
2e702b9f 238 int softirq_bit;
907aed48
MG
239
240 /*
241 * Mask out PF_MEMALLOC s current task context is borrowed for the
242 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
243 * again if the socket is related to swap
244 */
245 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
246
247 pending = local_softirq_pending();
6a61671b 248 account_irq_enter_time(current);
829035fd 249
0bd3a173 250 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b6 251 in_hardirq = lockdep_softirq_start();
1da177e4 252
1da177e4
LT
253restart:
254 /* Reset the pending bitmask before enabling irqs */
3f74478b 255 set_softirq_pending(0);
1da177e4 256
c70f5d66 257 local_irq_enable();
1da177e4
LT
258
259 h = softirq_vec;
260
2e702b9f
JP
261 while ((softirq_bit = ffs(pending))) {
262 unsigned int vec_nr;
263 int prev_count;
264
265 h += softirq_bit - 1;
266
267 vec_nr = h - softirq_vec;
268 prev_count = preempt_count();
269
270 kstat_incr_softirqs_this_cpu(vec_nr);
271
272 trace_softirq_entry(vec_nr);
273 h->action(h);
274 trace_softirq_exit(vec_nr);
275 if (unlikely(prev_count != preempt_count())) {
40322764 276 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
2e702b9f
JP
277 vec_nr, softirq_to_name[vec_nr], h->action,
278 prev_count, preempt_count());
279 preempt_count_set(prev_count);
1da177e4
LT
280 }
281 h++;
2e702b9f
JP
282 pending >>= softirq_bit;
283 }
1da177e4 284
284a8c93 285 rcu_bh_qs();
c70f5d66 286 local_irq_disable();
1da177e4
LT
287
288 pending = local_softirq_pending();
c10d7367 289 if (pending) {
34376a50
BG
290 if (time_before(jiffies, end) && !need_resched() &&
291 --max_restart)
c10d7367 292 goto restart;
1da177e4 293
1da177e4 294 wakeup_softirqd();
c10d7367 295 }
1da177e4 296
5c4853b6 297 lockdep_softirq_end(in_hardirq);
6a61671b 298 account_irq_exit_time(current);
75e1056f 299 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 300 WARN_ON_ONCE(in_interrupt());
907aed48 301 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
1da177e4
LT
302}
303
722a9f92 304asmlinkage __visible void do_softirq(void)
1da177e4
LT
305{
306 __u32 pending;
307 unsigned long flags;
308
309 if (in_interrupt())
310 return;
311
312 local_irq_save(flags);
313
314 pending = local_softirq_pending();
315
316 if (pending)
7d65f4a6 317 do_softirq_own_stack();
1da177e4
LT
318
319 local_irq_restore(flags);
320}
321
dde4b2b5
IM
322/*
323 * Enter an interrupt context.
324 */
325void irq_enter(void)
326{
64db4cff 327 rcu_irq_enter();
0a8a2e78 328 if (is_idle_task(current) && !in_interrupt()) {
d267f87f
VP
329 /*
330 * Prevent raise_softirq from needlessly waking up ksoftirqd
331 * here, as softirq will be serviced on return from interrupt.
332 */
333 local_bh_disable();
5acac1be 334 tick_irq_enter();
d267f87f
VP
335 _local_bh_enable();
336 }
337
338 __irq_enter();
dde4b2b5
IM
339}
340
8d32a307
TG
341static inline void invoke_softirq(void)
342{
ded79754 343 if (!force_irqthreads) {
cc1f0274 344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded79754
FW
345 /*
346 * We can safely execute softirq on the current stack if
347 * it is the irq stack, because it should be near empty
cc1f0274
FW
348 * at this stage.
349 */
350 __do_softirq();
351#else
352 /*
353 * Otherwise, irq_exit() is called on the task stack that can
354 * be potentially deep already. So call softirq in its own stack
355 * to prevent from any overrun.
ded79754 356 */
be6e1016 357 do_softirq_own_stack();
cc1f0274 358#endif
ded79754 359 } else {
8d32a307 360 wakeup_softirqd();
ded79754 361 }
8d32a307 362}
1da177e4 363
67826eae
FW
364static inline void tick_irq_exit(void)
365{
366#ifdef CONFIG_NO_HZ_COMMON
367 int cpu = smp_processor_id();
368
369 /* Make sure that timer wheel updates are propagated */
370 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
371 if (!in_interrupt())
372 tick_nohz_irq_exit();
373 }
374#endif
375}
376
1da177e4
LT
377/*
378 * Exit an interrupt context. Process softirqs if needed and possible:
379 */
380void irq_exit(void)
381{
74eed016 382#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 383 local_irq_disable();
74eed016
TG
384#else
385 WARN_ON_ONCE(!irqs_disabled());
386#endif
387
6a61671b 388 account_irq_exit_time(current);
bdb43806 389 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
390 if (!in_interrupt() && local_softirq_pending())
391 invoke_softirq();
79bf2bb3 392
67826eae 393 tick_irq_exit();
416eb33c 394 rcu_irq_exit();
f1a83e65 395 trace_hardirq_exit(); /* must be last! */
1da177e4
LT
396}
397
398/*
399 * This function must run with irqs disabled!
400 */
7ad5b3a5 401inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
402{
403 __raise_softirq_irqoff(nr);
404
405 /*
406 * If we're in an interrupt or softirq, we're done
407 * (this also catches softirq-disabled code). We will
408 * actually run the softirq once we return from
409 * the irq or softirq.
410 *
411 * Otherwise we wake up ksoftirqd to make sure we
412 * schedule the softirq soon.
413 */
414 if (!in_interrupt())
415 wakeup_softirqd();
416}
417
7ad5b3a5 418void raise_softirq(unsigned int nr)
1da177e4
LT
419{
420 unsigned long flags;
421
422 local_irq_save(flags);
423 raise_softirq_irqoff(nr);
424 local_irq_restore(flags);
425}
426
f069686e
SR
427void __raise_softirq_irqoff(unsigned int nr)
428{
429 trace_softirq_raise(nr);
430 or_softirq_pending(1UL << nr);
431}
432
962cf36c 433void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 434{
1da177e4
LT
435 softirq_vec[nr].action = action;
436}
437
9ba5f005
PZ
438/*
439 * Tasklets
440 */
ce85b4f2 441struct tasklet_head {
48f20a9a
OJ
442 struct tasklet_struct *head;
443 struct tasklet_struct **tail;
1da177e4
LT
444};
445
4620b49f
VN
446static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
447static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 448
7ad5b3a5 449void __tasklet_schedule(struct tasklet_struct *t)
1da177e4
LT
450{
451 unsigned long flags;
452
453 local_irq_save(flags);
48f20a9a 454 t->next = NULL;
909ea964
CL
455 *__this_cpu_read(tasklet_vec.tail) = t;
456 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
457 raise_softirq_irqoff(TASKLET_SOFTIRQ);
458 local_irq_restore(flags);
459}
1da177e4
LT
460EXPORT_SYMBOL(__tasklet_schedule);
461
7ad5b3a5 462void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4
LT
463{
464 unsigned long flags;
465
466 local_irq_save(flags);
48f20a9a 467 t->next = NULL;
909ea964
CL
468 *__this_cpu_read(tasklet_hi_vec.tail) = t;
469 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
470 raise_softirq_irqoff(HI_SOFTIRQ);
471 local_irq_restore(flags);
472}
1da177e4
LT
473EXPORT_SYMBOL(__tasklet_hi_schedule);
474
7c692cba
VN
475void __tasklet_hi_schedule_first(struct tasklet_struct *t)
476{
477 BUG_ON(!irqs_disabled());
478
909ea964
CL
479 t->next = __this_cpu_read(tasklet_hi_vec.head);
480 __this_cpu_write(tasklet_hi_vec.head, t);
7c692cba
VN
481 __raise_softirq_irqoff(HI_SOFTIRQ);
482}
7c692cba
VN
483EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484
1da177e4
LT
485static void tasklet_action(struct softirq_action *a)
486{
487 struct tasklet_struct *list;
488
489 local_irq_disable();
909ea964
CL
490 list = __this_cpu_read(tasklet_vec.head);
491 __this_cpu_write(tasklet_vec.head, NULL);
22127e93 492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
1da177e4
LT
493 local_irq_enable();
494
495 while (list) {
496 struct tasklet_struct *t = list;
497
498 list = list->next;
499
500 if (tasklet_trylock(t)) {
501 if (!atomic_read(&t->count)) {
ce85b4f2
JP
502 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
503 &t->state))
1da177e4
LT
504 BUG();
505 t->func(t->data);
506 tasklet_unlock(t);
507 continue;
508 }
509 tasklet_unlock(t);
510 }
511
512 local_irq_disable();
48f20a9a 513 t->next = NULL;
909ea964
CL
514 *__this_cpu_read(tasklet_vec.tail) = t;
515 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
516 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
517 local_irq_enable();
518 }
519}
520
521static void tasklet_hi_action(struct softirq_action *a)
522{
523 struct tasklet_struct *list;
524
525 local_irq_disable();
909ea964
CL
526 list = __this_cpu_read(tasklet_hi_vec.head);
527 __this_cpu_write(tasklet_hi_vec.head, NULL);
22127e93 528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
1da177e4
LT
529 local_irq_enable();
530
531 while (list) {
532 struct tasklet_struct *t = list;
533
534 list = list->next;
535
536 if (tasklet_trylock(t)) {
537 if (!atomic_read(&t->count)) {
ce85b4f2
JP
538 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
539 &t->state))
1da177e4
LT
540 BUG();
541 t->func(t->data);
542 tasklet_unlock(t);
543 continue;
544 }
545 tasklet_unlock(t);
546 }
547
548 local_irq_disable();
48f20a9a 549 t->next = NULL;
909ea964
CL
550 *__this_cpu_read(tasklet_hi_vec.tail) = t;
551 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
552 __raise_softirq_irqoff(HI_SOFTIRQ);
553 local_irq_enable();
554 }
555}
556
1da177e4
LT
557void tasklet_init(struct tasklet_struct *t,
558 void (*func)(unsigned long), unsigned long data)
559{
560 t->next = NULL;
561 t->state = 0;
562 atomic_set(&t->count, 0);
563 t->func = func;
564 t->data = data;
565}
1da177e4
LT
566EXPORT_SYMBOL(tasklet_init);
567
568void tasklet_kill(struct tasklet_struct *t)
569{
570 if (in_interrupt())
40322764 571 pr_notice("Attempt to kill tasklet from interrupt\n");
1da177e4
LT
572
573 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 574 do {
1da177e4 575 yield();
79d381c9 576 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
577 }
578 tasklet_unlock_wait(t);
579 clear_bit(TASKLET_STATE_SCHED, &t->state);
580}
1da177e4
LT
581EXPORT_SYMBOL(tasklet_kill);
582
9ba5f005
PZ
583/*
584 * tasklet_hrtimer
585 */
586
587/*
b9c30322
PZ
588 * The trampoline is called when the hrtimer expires. It schedules a tasklet
589 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
590 * hrtimer callback, but from softirq context.
9ba5f005
PZ
591 */
592static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
593{
594 struct tasklet_hrtimer *ttimer =
595 container_of(timer, struct tasklet_hrtimer, timer);
596
b9c30322
PZ
597 tasklet_hi_schedule(&ttimer->tasklet);
598 return HRTIMER_NORESTART;
9ba5f005
PZ
599}
600
601/*
602 * Helper function which calls the hrtimer callback from
603 * tasklet/softirq context
604 */
605static void __tasklet_hrtimer_trampoline(unsigned long data)
606{
607 struct tasklet_hrtimer *ttimer = (void *)data;
608 enum hrtimer_restart restart;
609
610 restart = ttimer->function(&ttimer->timer);
611 if (restart != HRTIMER_NORESTART)
612 hrtimer_restart(&ttimer->timer);
613}
614
615/**
616 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
617 * @ttimer: tasklet_hrtimer which is initialized
25985edc 618 * @function: hrtimer callback function which gets called from softirq context
9ba5f005
PZ
619 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
620 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
621 */
622void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
623 enum hrtimer_restart (*function)(struct hrtimer *),
624 clockid_t which_clock, enum hrtimer_mode mode)
625{
626 hrtimer_init(&ttimer->timer, which_clock, mode);
627 ttimer->timer.function = __hrtimer_tasklet_trampoline;
628 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
629 (unsigned long)ttimer);
630 ttimer->function = function;
631}
632EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
633
1da177e4
LT
634void __init softirq_init(void)
635{
48f20a9a
OJ
636 int cpu;
637
638 for_each_possible_cpu(cpu) {
639 per_cpu(tasklet_vec, cpu).tail =
640 &per_cpu(tasklet_vec, cpu).head;
641 per_cpu(tasklet_hi_vec, cpu).tail =
642 &per_cpu(tasklet_hi_vec, cpu).head;
643 }
644
962cf36c
CM
645 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
646 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
647}
648
3e339b5d 649static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 650{
3e339b5d
TG
651 return local_softirq_pending();
652}
1da177e4 653
3e339b5d
TG
654static void run_ksoftirqd(unsigned int cpu)
655{
656 local_irq_disable();
657 if (local_softirq_pending()) {
0bed698a
FW
658 /*
659 * We can safely run softirq on inline stack, as we are not deep
660 * in the task stack here.
661 */
3e339b5d 662 __do_softirq();
3e339b5d 663 local_irq_enable();
60479676 664 cond_resched_rcu_qs();
3e339b5d 665 return;
1da177e4 666 }
3e339b5d 667 local_irq_enable();
1da177e4
LT
668}
669
670#ifdef CONFIG_HOTPLUG_CPU
671/*
672 * tasklet_kill_immediate is called to remove a tasklet which can already be
673 * scheduled for execution on @cpu.
674 *
675 * Unlike tasklet_kill, this function removes the tasklet
676 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
677 *
678 * When this function is called, @cpu must be in the CPU_DEAD state.
679 */
680void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
681{
682 struct tasklet_struct **i;
683
684 BUG_ON(cpu_online(cpu));
685 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
686
687 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
688 return;
689
690 /* CPU is dead, so no lock needed. */
48f20a9a 691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
692 if (*i == t) {
693 *i = t->next;
48f20a9a
OJ
694 /* If this was the tail element, move the tail ptr */
695 if (*i == NULL)
696 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
697 return;
698 }
699 }
700 BUG();
701}
702
703static void takeover_tasklets(unsigned int cpu)
704{
1da177e4
LT
705 /* CPU is dead, so no lock needed. */
706 local_irq_disable();
707
708 /* Find end, append list for that CPU. */
e5e41723 709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964
CL
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
712 per_cpu(tasklet_vec, cpu).head = NULL;
713 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
714 }
1da177e4
LT
715 raise_softirq_irqoff(TASKLET_SOFTIRQ);
716
e5e41723 717 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
719 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
720 per_cpu(tasklet_hi_vec, cpu).head = NULL;
721 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
722 }
1da177e4
LT
723 raise_softirq_irqoff(HI_SOFTIRQ);
724
725 local_irq_enable();
726}
727#endif /* CONFIG_HOTPLUG_CPU */
728
ce85b4f2
JP
729static int cpu_callback(struct notifier_block *nfb, unsigned long action,
730 void *hcpu)
1da177e4 731{
1da177e4 732 switch (action) {
1da177e4 733#ifdef CONFIG_HOTPLUG_CPU
1da177e4 734 case CPU_DEAD:
3e339b5d
TG
735 case CPU_DEAD_FROZEN:
736 takeover_tasklets((unsigned long)hcpu);
1da177e4
LT
737 break;
738#endif /* CONFIG_HOTPLUG_CPU */
3e339b5d 739 }
1da177e4
LT
740 return NOTIFY_OK;
741}
742
0db0628d 743static struct notifier_block cpu_nfb = {
1da177e4
LT
744 .notifier_call = cpu_callback
745};
746
3e339b5d
TG
747static struct smp_hotplug_thread softirq_threads = {
748 .store = &ksoftirqd,
749 .thread_should_run = ksoftirqd_should_run,
750 .thread_fn = run_ksoftirqd,
751 .thread_comm = "ksoftirqd/%u",
752};
753
7babe8db 754static __init int spawn_ksoftirqd(void)
1da177e4 755{
1da177e4 756 register_cpu_notifier(&cpu_nfb);
3e339b5d
TG
757
758 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
759
1da177e4
LT
760 return 0;
761}
7babe8db 762early_initcall(spawn_ksoftirqd);
78eef01b 763
43a25632
YL
764/*
765 * [ These __weak aliases are kept in a separate compilation unit, so that
766 * GCC does not inline them incorrectly. ]
767 */
768
769int __init __weak early_irq_init(void)
770{
771 return 0;
772}
773
4a046d17
YL
774int __init __weak arch_probe_nr_irqs(void)
775{
b683de2b 776 return NR_IRQS_LEGACY;
4a046d17
YL
777}
778
43a25632
YL
779int __init __weak arch_early_irq_init(void)
780{
781 return 0;
782}
62a08ae2
TG
783
784unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
785{
786 return from;
787}