]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/softirq.c
cdrom: Fix info leak/OOB read in cdrom_ioctl_drive_status
[mirror_ubuntu-bionic-kernel.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
9 */
10
40322764
JP
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
9984de1a 13#include <linux/export.h>
1da177e4
LT
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
83144186 21#include <linux/freezer.h>
1da177e4
LT
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
7e49fcce 24#include <linux/ftrace.h>
78eef01b 25#include <linux/smp.h>
3e339b5d 26#include <linux/smpboot.h>
79bf2bb3 27#include <linux/tick.h>
d532676c 28#include <linux/irq.h>
a0e39ed3
HC
29
30#define CREATE_TRACE_POINTS
ad8d75ff 31#include <trace/events/irq.h>
1da177e4 32
1da177e4
LT
33/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
978b0116 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 57
4dd53d89 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 59
ce85b4f2 60const char * const softirq_to_name[NR_SOFTIRQS] = {
f660f606 61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
09223371 62 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
63};
64
1da177e4
LT
65/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
676cb02d 71static void wakeup_softirqd(void)
1da177e4
LT
72{
73 /* Interrupts are disabled: no need to stop preemption */
909ea964 74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
4cd13c21
ED
80/*
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83 */
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
75e1056f
VP
91/*
92 * preempt_count and SOFTIRQ_OFFSET usage:
93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
94 * softirq processing.
95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
96 * on local_bh_disable or local_bh_enable.
97 * This lets us distinguish between whether we are currently processing
98 * softirq and whether we just have bh disabled.
99 */
100
de30a2b3
IM
101/*
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
3c829c36 105#ifdef CONFIG_TRACE_IRQFLAGS
0bd3a173 106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
107{
108 unsigned long flags;
109
110 WARN_ON_ONCE(in_irq());
111
112 raw_local_irq_save(flags);
7e49fcce 113 /*
bdb43806 114 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
115 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116 * is set and before current->softirq_enabled is cleared.
117 * We must manually increment preempt_count here and manually
118 * call the trace_preempt_off later.
119 */
bdb43806 120 __preempt_count_add(cnt);
de30a2b3
IM
121 /*
122 * Were softirqs turned off above:
123 */
9ea4c380 124 if (softirq_count() == (cnt & SOFTIRQ_MASK))
de30a2b3
IM
125 trace_softirqs_off(ip);
126 raw_local_irq_restore(flags);
7e49fcce 127
0f1ba9a2
HC
128 if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
f904f582 130 current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2 131#endif
f904f582 132 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2 133 }
de30a2b3 134}
0bd3a173 135EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 136#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 137
75e1056f
VP
138static void __local_bh_enable(unsigned int cnt)
139{
f71b74bc 140 lockdep_assert_irqs_disabled();
75e1056f 141
9ea4c380 142 if (softirq_count() == (cnt & SOFTIRQ_MASK))
d2e08473 143 trace_softirqs_on(_RET_IP_);
bdb43806 144 preempt_count_sub(cnt);
75e1056f
VP
145}
146
de30a2b3
IM
147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
5d60d3e7 154 WARN_ON_ONCE(in_irq());
75e1056f 155 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3 156}
de30a2b3
IM
157EXPORT_SYMBOL(_local_bh_enable);
158
0bd3a173 159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 160{
f71b74bc
FW
161 WARN_ON_ONCE(in_irq());
162 lockdep_assert_irqs_enabled();
3c829c36 163#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 164 local_irq_disable();
3c829c36 165#endif
de30a2b3
IM
166 /*
167 * Are softirqs going to be turned on now:
168 */
75e1056f 169 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0f476b6d 170 trace_softirqs_on(ip);
de30a2b3
IM
171 /*
172 * Keep preemption disabled until we are done with
173 * softirq processing:
ce85b4f2 174 */
0bd3a173 175 preempt_count_sub(cnt - 1);
de30a2b3 176
0bed698a
FW
177 if (unlikely(!in_interrupt() && local_softirq_pending())) {
178 /*
179 * Run softirq if any pending. And do it in its own stack
180 * as we may be calling this deep in a task call stack already.
181 */
de30a2b3 182 do_softirq();
0bed698a 183 }
de30a2b3 184
bdb43806 185 preempt_count_dec();
3c829c36 186#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 187 local_irq_enable();
3c829c36 188#endif
de30a2b3
IM
189 preempt_check_resched();
190}
0bd3a173 191EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 192
1da177e4 193/*
34376a50
BG
194 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
195 * but break the loop if need_resched() is set or after 2 ms.
196 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
197 * certain cases, such as stop_machine(), jiffies may cease to
198 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
199 * well to make sure we eventually return from this method.
1da177e4 200 *
c10d7367 201 * These limits have been established via experimentation.
1da177e4
LT
202 * The two things to balance is latency against fairness -
203 * we want to handle softirqs as soon as possible, but they
204 * should not be able to lock up the box.
205 */
c10d7367 206#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 207#define MAX_SOFTIRQ_RESTART 10
1da177e4 208
f1a83e65
PZ
209#ifdef CONFIG_TRACE_IRQFLAGS
210/*
f1a83e65
PZ
211 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
212 * to keep the lockdep irq context tracking as tight as possible in order to
213 * not miss-qualify lock contexts and miss possible deadlocks.
214 */
f1a83e65 215
5c4853b6 216static inline bool lockdep_softirq_start(void)
f1a83e65 217{
5c4853b6 218 bool in_hardirq = false;
f1a83e65 219
5c4853b6
FW
220 if (trace_hardirq_context(current)) {
221 in_hardirq = true;
f1a83e65 222 trace_hardirq_exit();
5c4853b6
FW
223 }
224
f1a83e65 225 lockdep_softirq_enter();
5c4853b6
FW
226
227 return in_hardirq;
f1a83e65
PZ
228}
229
5c4853b6 230static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
231{
232 lockdep_softirq_exit();
5c4853b6
FW
233
234 if (in_hardirq)
f1a83e65 235 trace_hardirq_enter();
f1a83e65 236}
f1a83e65 237#else
5c4853b6
FW
238static inline bool lockdep_softirq_start(void) { return false; }
239static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
240#endif
241
be7635e7 242asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4 243{
c10d7367 244 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 245 unsigned long old_flags = current->flags;
34376a50 246 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 247 struct softirq_action *h;
5c4853b6 248 bool in_hardirq;
f1a83e65 249 __u32 pending;
2e702b9f 250 int softirq_bit;
907aed48
MG
251
252 /*
253 * Mask out PF_MEMALLOC s current task context is borrowed for the
254 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
255 * again if the socket is related to swap
256 */
257 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
258
259 pending = local_softirq_pending();
6a61671b 260 account_irq_enter_time(current);
829035fd 261
0bd3a173 262 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b6 263 in_hardirq = lockdep_softirq_start();
1da177e4 264
1da177e4
LT
265restart:
266 /* Reset the pending bitmask before enabling irqs */
3f74478b 267 set_softirq_pending(0);
1da177e4 268
c70f5d66 269 local_irq_enable();
1da177e4
LT
270
271 h = softirq_vec;
272
2e702b9f
JP
273 while ((softirq_bit = ffs(pending))) {
274 unsigned int vec_nr;
275 int prev_count;
276
277 h += softirq_bit - 1;
278
279 vec_nr = h - softirq_vec;
280 prev_count = preempt_count();
281
282 kstat_incr_softirqs_this_cpu(vec_nr);
283
284 trace_softirq_entry(vec_nr);
285 h->action(h);
286 trace_softirq_exit(vec_nr);
287 if (unlikely(prev_count != preempt_count())) {
40322764 288 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
2e702b9f
JP
289 vec_nr, softirq_to_name[vec_nr], h->action,
290 prev_count, preempt_count());
291 preempt_count_set(prev_count);
1da177e4
LT
292 }
293 h++;
2e702b9f
JP
294 pending >>= softirq_bit;
295 }
1da177e4 296
284a8c93 297 rcu_bh_qs();
c70f5d66 298 local_irq_disable();
1da177e4
LT
299
300 pending = local_softirq_pending();
c10d7367 301 if (pending) {
34376a50
BG
302 if (time_before(jiffies, end) && !need_resched() &&
303 --max_restart)
c10d7367 304 goto restart;
1da177e4 305
1da177e4 306 wakeup_softirqd();
c10d7367 307 }
1da177e4 308
5c4853b6 309 lockdep_softirq_end(in_hardirq);
6a61671b 310 account_irq_exit_time(current);
75e1056f 311 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 312 WARN_ON_ONCE(in_interrupt());
717a94b5 313 current_restore_flags(old_flags, PF_MEMALLOC);
1da177e4
LT
314}
315
722a9f92 316asmlinkage __visible void do_softirq(void)
1da177e4
LT
317{
318 __u32 pending;
319 unsigned long flags;
320
321 if (in_interrupt())
322 return;
323
324 local_irq_save(flags);
325
326 pending = local_softirq_pending();
327
4cd13c21 328 if (pending && !ksoftirqd_running())
7d65f4a6 329 do_softirq_own_stack();
1da177e4
LT
330
331 local_irq_restore(flags);
332}
333
dde4b2b5
IM
334/*
335 * Enter an interrupt context.
336 */
337void irq_enter(void)
338{
64db4cff 339 rcu_irq_enter();
0a8a2e78 340 if (is_idle_task(current) && !in_interrupt()) {
d267f87f
VP
341 /*
342 * Prevent raise_softirq from needlessly waking up ksoftirqd
343 * here, as softirq will be serviced on return from interrupt.
344 */
345 local_bh_disable();
5acac1be 346 tick_irq_enter();
d267f87f
VP
347 _local_bh_enable();
348 }
349
350 __irq_enter();
dde4b2b5
IM
351}
352
8d32a307
TG
353static inline void invoke_softirq(void)
354{
4cd13c21
ED
355 if (ksoftirqd_running())
356 return;
357
ded79754 358 if (!force_irqthreads) {
cc1f0274 359#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded79754
FW
360 /*
361 * We can safely execute softirq on the current stack if
362 * it is the irq stack, because it should be near empty
cc1f0274
FW
363 * at this stage.
364 */
365 __do_softirq();
366#else
367 /*
368 * Otherwise, irq_exit() is called on the task stack that can
369 * be potentially deep already. So call softirq in its own stack
370 * to prevent from any overrun.
ded79754 371 */
be6e1016 372 do_softirq_own_stack();
cc1f0274 373#endif
ded79754 374 } else {
8d32a307 375 wakeup_softirqd();
ded79754 376 }
8d32a307 377}
1da177e4 378
67826eae
FW
379static inline void tick_irq_exit(void)
380{
381#ifdef CONFIG_NO_HZ_COMMON
382 int cpu = smp_processor_id();
383
384 /* Make sure that timer wheel updates are propagated */
385 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
386 if (!in_interrupt())
387 tick_nohz_irq_exit();
388 }
389#endif
390}
391
1da177e4
LT
392/*
393 * Exit an interrupt context. Process softirqs if needed and possible:
394 */
395void irq_exit(void)
396{
74eed016 397#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 398 local_irq_disable();
74eed016 399#else
f71b74bc 400 lockdep_assert_irqs_disabled();
74eed016 401#endif
6a61671b 402 account_irq_exit_time(current);
bdb43806 403 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
404 if (!in_interrupt() && local_softirq_pending())
405 invoke_softirq();
79bf2bb3 406
67826eae 407 tick_irq_exit();
416eb33c 408 rcu_irq_exit();
f1a83e65 409 trace_hardirq_exit(); /* must be last! */
1da177e4
LT
410}
411
412/*
413 * This function must run with irqs disabled!
414 */
7ad5b3a5 415inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
416{
417 __raise_softirq_irqoff(nr);
418
419 /*
420 * If we're in an interrupt or softirq, we're done
421 * (this also catches softirq-disabled code). We will
422 * actually run the softirq once we return from
423 * the irq or softirq.
424 *
425 * Otherwise we wake up ksoftirqd to make sure we
426 * schedule the softirq soon.
427 */
428 if (!in_interrupt())
429 wakeup_softirqd();
430}
431
7ad5b3a5 432void raise_softirq(unsigned int nr)
1da177e4
LT
433{
434 unsigned long flags;
435
436 local_irq_save(flags);
437 raise_softirq_irqoff(nr);
438 local_irq_restore(flags);
439}
440
f069686e
SR
441void __raise_softirq_irqoff(unsigned int nr)
442{
443 trace_softirq_raise(nr);
444 or_softirq_pending(1UL << nr);
445}
446
962cf36c 447void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 448{
1da177e4
LT
449 softirq_vec[nr].action = action;
450}
451
9ba5f005
PZ
452/*
453 * Tasklets
454 */
ce85b4f2 455struct tasklet_head {
48f20a9a
OJ
456 struct tasklet_struct *head;
457 struct tasklet_struct **tail;
1da177e4
LT
458};
459
4620b49f
VN
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 462
7ad5b3a5 463void __tasklet_schedule(struct tasklet_struct *t)
1da177e4
LT
464{
465 unsigned long flags;
466
467 local_irq_save(flags);
48f20a9a 468 t->next = NULL;
909ea964
CL
469 *__this_cpu_read(tasklet_vec.tail) = t;
470 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
471 raise_softirq_irqoff(TASKLET_SOFTIRQ);
472 local_irq_restore(flags);
473}
1da177e4
LT
474EXPORT_SYMBOL(__tasklet_schedule);
475
7ad5b3a5 476void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4
LT
477{
478 unsigned long flags;
479
480 local_irq_save(flags);
48f20a9a 481 t->next = NULL;
909ea964
CL
482 *__this_cpu_read(tasklet_hi_vec.tail) = t;
483 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
484 raise_softirq_irqoff(HI_SOFTIRQ);
485 local_irq_restore(flags);
486}
1da177e4
LT
487EXPORT_SYMBOL(__tasklet_hi_schedule);
488
0766f788 489static __latent_entropy void tasklet_action(struct softirq_action *a)
1da177e4
LT
490{
491 struct tasklet_struct *list;
492
493 local_irq_disable();
909ea964
CL
494 list = __this_cpu_read(tasklet_vec.head);
495 __this_cpu_write(tasklet_vec.head, NULL);
22127e93 496 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
1da177e4
LT
497 local_irq_enable();
498
499 while (list) {
500 struct tasklet_struct *t = list;
501
502 list = list->next;
503
504 if (tasklet_trylock(t)) {
505 if (!atomic_read(&t->count)) {
ce85b4f2
JP
506 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
507 &t->state))
1da177e4
LT
508 BUG();
509 t->func(t->data);
510 tasklet_unlock(t);
511 continue;
512 }
513 tasklet_unlock(t);
514 }
515
516 local_irq_disable();
48f20a9a 517 t->next = NULL;
909ea964
CL
518 *__this_cpu_read(tasklet_vec.tail) = t;
519 __this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4
LT
520 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
521 local_irq_enable();
522 }
523}
524
0766f788 525static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
1da177e4
LT
526{
527 struct tasklet_struct *list;
528
529 local_irq_disable();
909ea964
CL
530 list = __this_cpu_read(tasklet_hi_vec.head);
531 __this_cpu_write(tasklet_hi_vec.head, NULL);
22127e93 532 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
1da177e4
LT
533 local_irq_enable();
534
535 while (list) {
536 struct tasklet_struct *t = list;
537
538 list = list->next;
539
540 if (tasklet_trylock(t)) {
541 if (!atomic_read(&t->count)) {
ce85b4f2
JP
542 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
543 &t->state))
1da177e4
LT
544 BUG();
545 t->func(t->data);
546 tasklet_unlock(t);
547 continue;
548 }
549 tasklet_unlock(t);
550 }
551
552 local_irq_disable();
48f20a9a 553 t->next = NULL;
909ea964
CL
554 *__this_cpu_read(tasklet_hi_vec.tail) = t;
555 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4
LT
556 __raise_softirq_irqoff(HI_SOFTIRQ);
557 local_irq_enable();
558 }
559}
560
1da177e4
LT
561void tasklet_init(struct tasklet_struct *t,
562 void (*func)(unsigned long), unsigned long data)
563{
564 t->next = NULL;
565 t->state = 0;
566 atomic_set(&t->count, 0);
567 t->func = func;
568 t->data = data;
569}
1da177e4
LT
570EXPORT_SYMBOL(tasklet_init);
571
572void tasklet_kill(struct tasklet_struct *t)
573{
574 if (in_interrupt())
40322764 575 pr_notice("Attempt to kill tasklet from interrupt\n");
1da177e4
LT
576
577 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 578 do {
1da177e4 579 yield();
79d381c9 580 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
581 }
582 tasklet_unlock_wait(t);
583 clear_bit(TASKLET_STATE_SCHED, &t->state);
584}
1da177e4
LT
585EXPORT_SYMBOL(tasklet_kill);
586
9ba5f005
PZ
587/*
588 * tasklet_hrtimer
589 */
590
591/*
b9c30322
PZ
592 * The trampoline is called when the hrtimer expires. It schedules a tasklet
593 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
594 * hrtimer callback, but from softirq context.
9ba5f005
PZ
595 */
596static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
597{
598 struct tasklet_hrtimer *ttimer =
599 container_of(timer, struct tasklet_hrtimer, timer);
600
b9c30322
PZ
601 tasklet_hi_schedule(&ttimer->tasklet);
602 return HRTIMER_NORESTART;
9ba5f005
PZ
603}
604
605/*
606 * Helper function which calls the hrtimer callback from
607 * tasklet/softirq context
608 */
609static void __tasklet_hrtimer_trampoline(unsigned long data)
610{
611 struct tasklet_hrtimer *ttimer = (void *)data;
612 enum hrtimer_restart restart;
613
614 restart = ttimer->function(&ttimer->timer);
615 if (restart != HRTIMER_NORESTART)
616 hrtimer_restart(&ttimer->timer);
617}
618
619/**
620 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
621 * @ttimer: tasklet_hrtimer which is initialized
25985edc 622 * @function: hrtimer callback function which gets called from softirq context
9ba5f005
PZ
623 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
624 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
625 */
626void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
627 enum hrtimer_restart (*function)(struct hrtimer *),
628 clockid_t which_clock, enum hrtimer_mode mode)
629{
630 hrtimer_init(&ttimer->timer, which_clock, mode);
631 ttimer->timer.function = __hrtimer_tasklet_trampoline;
632 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
633 (unsigned long)ttimer);
634 ttimer->function = function;
635}
636EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
637
1da177e4
LT
638void __init softirq_init(void)
639{
48f20a9a
OJ
640 int cpu;
641
642 for_each_possible_cpu(cpu) {
643 per_cpu(tasklet_vec, cpu).tail =
644 &per_cpu(tasklet_vec, cpu).head;
645 per_cpu(tasklet_hi_vec, cpu).tail =
646 &per_cpu(tasklet_hi_vec, cpu).head;
647 }
648
962cf36c
CM
649 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
650 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
651}
652
3e339b5d 653static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 654{
3e339b5d
TG
655 return local_softirq_pending();
656}
1da177e4 657
3e339b5d
TG
658static void run_ksoftirqd(unsigned int cpu)
659{
660 local_irq_disable();
661 if (local_softirq_pending()) {
0bed698a
FW
662 /*
663 * We can safely run softirq on inline stack, as we are not deep
664 * in the task stack here.
665 */
3e339b5d 666 __do_softirq();
3e339b5d 667 local_irq_enable();
60479676 668 cond_resched_rcu_qs();
3e339b5d 669 return;
1da177e4 670 }
3e339b5d 671 local_irq_enable();
1da177e4
LT
672}
673
674#ifdef CONFIG_HOTPLUG_CPU
675/*
676 * tasklet_kill_immediate is called to remove a tasklet which can already be
677 * scheduled for execution on @cpu.
678 *
679 * Unlike tasklet_kill, this function removes the tasklet
680 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
681 *
682 * When this function is called, @cpu must be in the CPU_DEAD state.
683 */
684void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
685{
686 struct tasklet_struct **i;
687
688 BUG_ON(cpu_online(cpu));
689 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
690
691 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
692 return;
693
694 /* CPU is dead, so no lock needed. */
48f20a9a 695 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
696 if (*i == t) {
697 *i = t->next;
48f20a9a
OJ
698 /* If this was the tail element, move the tail ptr */
699 if (*i == NULL)
700 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
701 return;
702 }
703 }
704 BUG();
705}
706
c4544dbc 707static int takeover_tasklets(unsigned int cpu)
1da177e4 708{
1da177e4
LT
709 /* CPU is dead, so no lock needed. */
710 local_irq_disable();
711
712 /* Find end, append list for that CPU. */
e5e41723 713 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964
CL
714 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
715 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
716 per_cpu(tasklet_vec, cpu).head = NULL;
717 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
718 }
1da177e4
LT
719 raise_softirq_irqoff(TASKLET_SOFTIRQ);
720
e5e41723 721 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
722 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
723 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
724 per_cpu(tasklet_hi_vec, cpu).head = NULL;
725 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
726 }
1da177e4
LT
727 raise_softirq_irqoff(HI_SOFTIRQ);
728
729 local_irq_enable();
c4544dbc 730 return 0;
1da177e4 731}
c4544dbc
SAS
732#else
733#define takeover_tasklets NULL
1da177e4
LT
734#endif /* CONFIG_HOTPLUG_CPU */
735
3e339b5d
TG
736static struct smp_hotplug_thread softirq_threads = {
737 .store = &ksoftirqd,
738 .thread_should_run = ksoftirqd_should_run,
739 .thread_fn = run_ksoftirqd,
740 .thread_comm = "ksoftirqd/%u",
741};
742
7babe8db 743static __init int spawn_ksoftirqd(void)
1da177e4 744{
c4544dbc
SAS
745 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
746 takeover_tasklets);
3e339b5d
TG
747 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
748
1da177e4
LT
749 return 0;
750}
7babe8db 751early_initcall(spawn_ksoftirqd);
78eef01b 752
43a25632
YL
753/*
754 * [ These __weak aliases are kept in a separate compilation unit, so that
755 * GCC does not inline them incorrectly. ]
756 */
757
758int __init __weak early_irq_init(void)
759{
760 return 0;
761}
762
4a046d17
YL
763int __init __weak arch_probe_nr_irqs(void)
764{
b683de2b 765 return NR_IRQS_LEGACY;
4a046d17
YL
766}
767
43a25632
YL
768int __init __weak arch_early_irq_init(void)
769{
770 return 0;
771}
62a08ae2
TG
772
773unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
774{
775 return from;
776}