]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/softirq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * | |
6 | * Distribute under GPLv2. | |
7 | * | |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | |
9 | */ | |
10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | ||
13 | #include <linux/export.h> | |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/notifier.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/freezer.h> | |
22 | #include <linux/kthread.h> | |
23 | #include <linux/rcupdate.h> | |
24 | #include <linux/ftrace.h> | |
25 | #include <linux/smp.h> | |
26 | #include <linux/smpboot.h> | |
27 | #include <linux/tick.h> | |
28 | #include <linux/irq.h> | |
29 | ||
30 | #define CREATE_TRACE_POINTS | |
31 | #include <trace/events/irq.h> | |
32 | ||
33 | /* | |
34 | - No shared variables, all the data are CPU local. | |
35 | - If a softirq needs serialization, let it serialize itself | |
36 | by its own spinlocks. | |
37 | - Even if softirq is serialized, only local cpu is marked for | |
38 | execution. Hence, we get something sort of weak cpu binding. | |
39 | Though it is still not clear, will it result in better locality | |
40 | or will not. | |
41 | ||
42 | Examples: | |
43 | - NET RX softirq. It is multithreaded and does not require | |
44 | any global serialization. | |
45 | - NET TX softirq. It kicks software netdevice queues, hence | |
46 | it is logically serialized per device, but this serialization | |
47 | is invisible to common code. | |
48 | - Tasklets: serialized wrt itself. | |
49 | */ | |
50 | ||
51 | #ifndef __ARCH_IRQ_STAT | |
52 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | |
53 | EXPORT_SYMBOL(irq_stat); | |
54 | #endif | |
55 | ||
56 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; | |
57 | ||
58 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | |
59 | ||
60 | const char * const softirq_to_name[NR_SOFTIRQS] = { | |
61 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", | |
62 | "TASKLET", "SCHED", "HRTIMER", "RCU" | |
63 | }; | |
64 | ||
65 | /* | |
66 | * we cannot loop indefinitely here to avoid userspace starvation, | |
67 | * but we also don't want to introduce a worst case 1/HZ latency | |
68 | * to the pending events, so lets the scheduler to balance | |
69 | * the softirq load for us. | |
70 | */ | |
71 | static void wakeup_softirqd(void) | |
72 | { | |
73 | /* Interrupts are disabled: no need to stop preemption */ | |
74 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | |
75 | ||
76 | if (tsk && tsk->state != TASK_RUNNING) | |
77 | wake_up_process(tsk); | |
78 | } | |
79 | ||
80 | /* | |
81 | * If ksoftirqd is scheduled, we do not want to process pending softirqs | |
82 | * right now. Let ksoftirqd handle this at its own rate, to get fairness. | |
83 | */ | |
84 | static bool ksoftirqd_running(void) | |
85 | { | |
86 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | |
87 | ||
88 | return tsk && (tsk->state == TASK_RUNNING); | |
89 | } | |
90 | ||
91 | /* | |
92 | * preempt_count and SOFTIRQ_OFFSET usage: | |
93 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | |
94 | * softirq processing. | |
95 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | |
96 | * on local_bh_disable or local_bh_enable. | |
97 | * This lets us distinguish between whether we are currently processing | |
98 | * softirq and whether we just have bh disabled. | |
99 | */ | |
100 | ||
101 | /* | |
102 | * This one is for softirq.c-internal use, | |
103 | * where hardirqs are disabled legitimately: | |
104 | */ | |
105 | #ifdef CONFIG_TRACE_IRQFLAGS | |
106 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) | |
107 | { | |
108 | unsigned long flags; | |
109 | ||
110 | WARN_ON_ONCE(in_irq()); | |
111 | ||
112 | raw_local_irq_save(flags); | |
113 | /* | |
114 | * The preempt tracer hooks into preempt_count_add and will break | |
115 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | |
116 | * is set and before current->softirq_enabled is cleared. | |
117 | * We must manually increment preempt_count here and manually | |
118 | * call the trace_preempt_off later. | |
119 | */ | |
120 | __preempt_count_add(cnt); | |
121 | /* | |
122 | * Were softirqs turned off above: | |
123 | */ | |
124 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) | |
125 | trace_softirqs_off(ip); | |
126 | raw_local_irq_restore(flags); | |
127 | ||
128 | if (preempt_count() == cnt) { | |
129 | #ifdef CONFIG_DEBUG_PREEMPT | |
130 | current->preempt_disable_ip = get_lock_parent_ip(); | |
131 | #endif | |
132 | trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); | |
133 | } | |
134 | } | |
135 | EXPORT_SYMBOL(__local_bh_disable_ip); | |
136 | #endif /* CONFIG_TRACE_IRQFLAGS */ | |
137 | ||
138 | static void __local_bh_enable(unsigned int cnt) | |
139 | { | |
140 | lockdep_assert_irqs_disabled(); | |
141 | ||
142 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) | |
143 | trace_softirqs_on(_RET_IP_); | |
144 | preempt_count_sub(cnt); | |
145 | } | |
146 | ||
147 | /* | |
148 | * Special-case - softirqs can safely be enabled in | |
149 | * cond_resched_softirq(), or by __do_softirq(), | |
150 | * without processing still-pending softirqs: | |
151 | */ | |
152 | void _local_bh_enable(void) | |
153 | { | |
154 | WARN_ON_ONCE(in_irq()); | |
155 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); | |
156 | } | |
157 | EXPORT_SYMBOL(_local_bh_enable); | |
158 | ||
159 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) | |
160 | { | |
161 | WARN_ON_ONCE(in_irq()); | |
162 | lockdep_assert_irqs_enabled(); | |
163 | #ifdef CONFIG_TRACE_IRQFLAGS | |
164 | local_irq_disable(); | |
165 | #endif | |
166 | /* | |
167 | * Are softirqs going to be turned on now: | |
168 | */ | |
169 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) | |
170 | trace_softirqs_on(ip); | |
171 | /* | |
172 | * Keep preemption disabled until we are done with | |
173 | * softirq processing: | |
174 | */ | |
175 | preempt_count_sub(cnt - 1); | |
176 | ||
177 | if (unlikely(!in_interrupt() && local_softirq_pending())) { | |
178 | /* | |
179 | * Run softirq if any pending. And do it in its own stack | |
180 | * as we may be calling this deep in a task call stack already. | |
181 | */ | |
182 | do_softirq(); | |
183 | } | |
184 | ||
185 | preempt_count_dec(); | |
186 | #ifdef CONFIG_TRACE_IRQFLAGS | |
187 | local_irq_enable(); | |
188 | #endif | |
189 | preempt_check_resched(); | |
190 | } | |
191 | EXPORT_SYMBOL(__local_bh_enable_ip); | |
192 | ||
193 | /* | |
194 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, | |
195 | * but break the loop if need_resched() is set or after 2 ms. | |
196 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in | |
197 | * certain cases, such as stop_machine(), jiffies may cease to | |
198 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as | |
199 | * well to make sure we eventually return from this method. | |
200 | * | |
201 | * These limits have been established via experimentation. | |
202 | * The two things to balance is latency against fairness - | |
203 | * we want to handle softirqs as soon as possible, but they | |
204 | * should not be able to lock up the box. | |
205 | */ | |
206 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) | |
207 | #define MAX_SOFTIRQ_RESTART 10 | |
208 | ||
209 | #ifdef CONFIG_TRACE_IRQFLAGS | |
210 | /* | |
211 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need | |
212 | * to keep the lockdep irq context tracking as tight as possible in order to | |
213 | * not miss-qualify lock contexts and miss possible deadlocks. | |
214 | */ | |
215 | ||
216 | static inline bool lockdep_softirq_start(void) | |
217 | { | |
218 | bool in_hardirq = false; | |
219 | ||
220 | if (trace_hardirq_context(current)) { | |
221 | in_hardirq = true; | |
222 | trace_hardirq_exit(); | |
223 | } | |
224 | ||
225 | lockdep_softirq_enter(); | |
226 | ||
227 | return in_hardirq; | |
228 | } | |
229 | ||
230 | static inline void lockdep_softirq_end(bool in_hardirq) | |
231 | { | |
232 | lockdep_softirq_exit(); | |
233 | ||
234 | if (in_hardirq) | |
235 | trace_hardirq_enter(); | |
236 | } | |
237 | #else | |
238 | static inline bool lockdep_softirq_start(void) { return false; } | |
239 | static inline void lockdep_softirq_end(bool in_hardirq) { } | |
240 | #endif | |
241 | ||
242 | asmlinkage __visible void __softirq_entry __do_softirq(void) | |
243 | { | |
244 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; | |
245 | unsigned long old_flags = current->flags; | |
246 | int max_restart = MAX_SOFTIRQ_RESTART; | |
247 | struct softirq_action *h; | |
248 | bool in_hardirq; | |
249 | __u32 pending; | |
250 | int softirq_bit; | |
251 | ||
252 | /* | |
253 | * Mask out PF_MEMALLOC s current task context is borrowed for the | |
254 | * softirq. A softirq handled such as network RX might set PF_MEMALLOC | |
255 | * again if the socket is related to swap | |
256 | */ | |
257 | current->flags &= ~PF_MEMALLOC; | |
258 | ||
259 | pending = local_softirq_pending(); | |
260 | account_irq_enter_time(current); | |
261 | ||
262 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); | |
263 | in_hardirq = lockdep_softirq_start(); | |
264 | ||
265 | restart: | |
266 | /* Reset the pending bitmask before enabling irqs */ | |
267 | set_softirq_pending(0); | |
268 | ||
269 | local_irq_enable(); | |
270 | ||
271 | h = softirq_vec; | |
272 | ||
273 | while ((softirq_bit = ffs(pending))) { | |
274 | unsigned int vec_nr; | |
275 | int prev_count; | |
276 | ||
277 | h += softirq_bit - 1; | |
278 | ||
279 | vec_nr = h - softirq_vec; | |
280 | prev_count = preempt_count(); | |
281 | ||
282 | kstat_incr_softirqs_this_cpu(vec_nr); | |
283 | ||
284 | trace_softirq_entry(vec_nr); | |
285 | h->action(h); | |
286 | trace_softirq_exit(vec_nr); | |
287 | if (unlikely(prev_count != preempt_count())) { | |
288 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", | |
289 | vec_nr, softirq_to_name[vec_nr], h->action, | |
290 | prev_count, preempt_count()); | |
291 | preempt_count_set(prev_count); | |
292 | } | |
293 | h++; | |
294 | pending >>= softirq_bit; | |
295 | } | |
296 | ||
297 | rcu_bh_qs(); | |
298 | local_irq_disable(); | |
299 | ||
300 | pending = local_softirq_pending(); | |
301 | if (pending) { | |
302 | if (time_before(jiffies, end) && !need_resched() && | |
303 | --max_restart) | |
304 | goto restart; | |
305 | ||
306 | wakeup_softirqd(); | |
307 | } | |
308 | ||
309 | lockdep_softirq_end(in_hardirq); | |
310 | account_irq_exit_time(current); | |
311 | __local_bh_enable(SOFTIRQ_OFFSET); | |
312 | WARN_ON_ONCE(in_interrupt()); | |
313 | current_restore_flags(old_flags, PF_MEMALLOC); | |
314 | } | |
315 | ||
316 | asmlinkage __visible void do_softirq(void) | |
317 | { | |
318 | __u32 pending; | |
319 | unsigned long flags; | |
320 | ||
321 | if (in_interrupt()) | |
322 | return; | |
323 | ||
324 | local_irq_save(flags); | |
325 | ||
326 | pending = local_softirq_pending(); | |
327 | ||
328 | if (pending && !ksoftirqd_running()) | |
329 | do_softirq_own_stack(); | |
330 | ||
331 | local_irq_restore(flags); | |
332 | } | |
333 | ||
334 | /* | |
335 | * Enter an interrupt context. | |
336 | */ | |
337 | void irq_enter(void) | |
338 | { | |
339 | rcu_irq_enter(); | |
340 | if (is_idle_task(current) && !in_interrupt()) { | |
341 | /* | |
342 | * Prevent raise_softirq from needlessly waking up ksoftirqd | |
343 | * here, as softirq will be serviced on return from interrupt. | |
344 | */ | |
345 | local_bh_disable(); | |
346 | tick_irq_enter(); | |
347 | _local_bh_enable(); | |
348 | } | |
349 | ||
350 | __irq_enter(); | |
351 | } | |
352 | ||
353 | static inline void invoke_softirq(void) | |
354 | { | |
355 | if (ksoftirqd_running()) | |
356 | return; | |
357 | ||
358 | if (!force_irqthreads) { | |
359 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | |
360 | /* | |
361 | * We can safely execute softirq on the current stack if | |
362 | * it is the irq stack, because it should be near empty | |
363 | * at this stage. | |
364 | */ | |
365 | __do_softirq(); | |
366 | #else | |
367 | /* | |
368 | * Otherwise, irq_exit() is called on the task stack that can | |
369 | * be potentially deep already. So call softirq in its own stack | |
370 | * to prevent from any overrun. | |
371 | */ | |
372 | do_softirq_own_stack(); | |
373 | #endif | |
374 | } else { | |
375 | wakeup_softirqd(); | |
376 | } | |
377 | } | |
378 | ||
379 | static inline void tick_irq_exit(void) | |
380 | { | |
381 | #ifdef CONFIG_NO_HZ_COMMON | |
382 | int cpu = smp_processor_id(); | |
383 | ||
384 | /* Make sure that timer wheel updates are propagated */ | |
385 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { | |
386 | if (!in_interrupt()) | |
387 | tick_nohz_irq_exit(); | |
388 | } | |
389 | #endif | |
390 | } | |
391 | ||
392 | /* | |
393 | * Exit an interrupt context. Process softirqs if needed and possible: | |
394 | */ | |
395 | void irq_exit(void) | |
396 | { | |
397 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED | |
398 | local_irq_disable(); | |
399 | #else | |
400 | lockdep_assert_irqs_disabled(); | |
401 | #endif | |
402 | account_irq_exit_time(current); | |
403 | preempt_count_sub(HARDIRQ_OFFSET); | |
404 | if (!in_interrupt() && local_softirq_pending()) | |
405 | invoke_softirq(); | |
406 | ||
407 | tick_irq_exit(); | |
408 | rcu_irq_exit(); | |
409 | trace_hardirq_exit(); /* must be last! */ | |
410 | } | |
411 | ||
412 | /* | |
413 | * This function must run with irqs disabled! | |
414 | */ | |
415 | inline void raise_softirq_irqoff(unsigned int nr) | |
416 | { | |
417 | __raise_softirq_irqoff(nr); | |
418 | ||
419 | /* | |
420 | * If we're in an interrupt or softirq, we're done | |
421 | * (this also catches softirq-disabled code). We will | |
422 | * actually run the softirq once we return from | |
423 | * the irq or softirq. | |
424 | * | |
425 | * Otherwise we wake up ksoftirqd to make sure we | |
426 | * schedule the softirq soon. | |
427 | */ | |
428 | if (!in_interrupt()) | |
429 | wakeup_softirqd(); | |
430 | } | |
431 | ||
432 | void raise_softirq(unsigned int nr) | |
433 | { | |
434 | unsigned long flags; | |
435 | ||
436 | local_irq_save(flags); | |
437 | raise_softirq_irqoff(nr); | |
438 | local_irq_restore(flags); | |
439 | } | |
440 | ||
441 | void __raise_softirq_irqoff(unsigned int nr) | |
442 | { | |
443 | trace_softirq_raise(nr); | |
444 | or_softirq_pending(1UL << nr); | |
445 | } | |
446 | ||
447 | void open_softirq(int nr, void (*action)(struct softirq_action *)) | |
448 | { | |
449 | softirq_vec[nr].action = action; | |
450 | } | |
451 | ||
452 | /* | |
453 | * Tasklets | |
454 | */ | |
455 | struct tasklet_head { | |
456 | struct tasklet_struct *head; | |
457 | struct tasklet_struct **tail; | |
458 | }; | |
459 | ||
460 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | |
461 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | |
462 | ||
463 | void __tasklet_schedule(struct tasklet_struct *t) | |
464 | { | |
465 | unsigned long flags; | |
466 | ||
467 | local_irq_save(flags); | |
468 | t->next = NULL; | |
469 | *__this_cpu_read(tasklet_vec.tail) = t; | |
470 | __this_cpu_write(tasklet_vec.tail, &(t->next)); | |
471 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | |
472 | local_irq_restore(flags); | |
473 | } | |
474 | EXPORT_SYMBOL(__tasklet_schedule); | |
475 | ||
476 | void __tasklet_hi_schedule(struct tasklet_struct *t) | |
477 | { | |
478 | unsigned long flags; | |
479 | ||
480 | local_irq_save(flags); | |
481 | t->next = NULL; | |
482 | *__this_cpu_read(tasklet_hi_vec.tail) = t; | |
483 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | |
484 | raise_softirq_irqoff(HI_SOFTIRQ); | |
485 | local_irq_restore(flags); | |
486 | } | |
487 | EXPORT_SYMBOL(__tasklet_hi_schedule); | |
488 | ||
489 | static __latent_entropy void tasklet_action(struct softirq_action *a) | |
490 | { | |
491 | struct tasklet_struct *list; | |
492 | ||
493 | local_irq_disable(); | |
494 | list = __this_cpu_read(tasklet_vec.head); | |
495 | __this_cpu_write(tasklet_vec.head, NULL); | |
496 | __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); | |
497 | local_irq_enable(); | |
498 | ||
499 | while (list) { | |
500 | struct tasklet_struct *t = list; | |
501 | ||
502 | list = list->next; | |
503 | ||
504 | if (tasklet_trylock(t)) { | |
505 | if (!atomic_read(&t->count)) { | |
506 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, | |
507 | &t->state)) | |
508 | BUG(); | |
509 | t->func(t->data); | |
510 | tasklet_unlock(t); | |
511 | continue; | |
512 | } | |
513 | tasklet_unlock(t); | |
514 | } | |
515 | ||
516 | local_irq_disable(); | |
517 | t->next = NULL; | |
518 | *__this_cpu_read(tasklet_vec.tail) = t; | |
519 | __this_cpu_write(tasklet_vec.tail, &(t->next)); | |
520 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); | |
521 | local_irq_enable(); | |
522 | } | |
523 | } | |
524 | ||
525 | static __latent_entropy void tasklet_hi_action(struct softirq_action *a) | |
526 | { | |
527 | struct tasklet_struct *list; | |
528 | ||
529 | local_irq_disable(); | |
530 | list = __this_cpu_read(tasklet_hi_vec.head); | |
531 | __this_cpu_write(tasklet_hi_vec.head, NULL); | |
532 | __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); | |
533 | local_irq_enable(); | |
534 | ||
535 | while (list) { | |
536 | struct tasklet_struct *t = list; | |
537 | ||
538 | list = list->next; | |
539 | ||
540 | if (tasklet_trylock(t)) { | |
541 | if (!atomic_read(&t->count)) { | |
542 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, | |
543 | &t->state)) | |
544 | BUG(); | |
545 | t->func(t->data); | |
546 | tasklet_unlock(t); | |
547 | continue; | |
548 | } | |
549 | tasklet_unlock(t); | |
550 | } | |
551 | ||
552 | local_irq_disable(); | |
553 | t->next = NULL; | |
554 | *__this_cpu_read(tasklet_hi_vec.tail) = t; | |
555 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | |
556 | __raise_softirq_irqoff(HI_SOFTIRQ); | |
557 | local_irq_enable(); | |
558 | } | |
559 | } | |
560 | ||
561 | void tasklet_init(struct tasklet_struct *t, | |
562 | void (*func)(unsigned long), unsigned long data) | |
563 | { | |
564 | t->next = NULL; | |
565 | t->state = 0; | |
566 | atomic_set(&t->count, 0); | |
567 | t->func = func; | |
568 | t->data = data; | |
569 | } | |
570 | EXPORT_SYMBOL(tasklet_init); | |
571 | ||
572 | void tasklet_kill(struct tasklet_struct *t) | |
573 | { | |
574 | if (in_interrupt()) | |
575 | pr_notice("Attempt to kill tasklet from interrupt\n"); | |
576 | ||
577 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
578 | do { | |
579 | yield(); | |
580 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); | |
581 | } | |
582 | tasklet_unlock_wait(t); | |
583 | clear_bit(TASKLET_STATE_SCHED, &t->state); | |
584 | } | |
585 | EXPORT_SYMBOL(tasklet_kill); | |
586 | ||
587 | /* | |
588 | * tasklet_hrtimer | |
589 | */ | |
590 | ||
591 | /* | |
592 | * The trampoline is called when the hrtimer expires. It schedules a tasklet | |
593 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended | |
594 | * hrtimer callback, but from softirq context. | |
595 | */ | |
596 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | |
597 | { | |
598 | struct tasklet_hrtimer *ttimer = | |
599 | container_of(timer, struct tasklet_hrtimer, timer); | |
600 | ||
601 | tasklet_hi_schedule(&ttimer->tasklet); | |
602 | return HRTIMER_NORESTART; | |
603 | } | |
604 | ||
605 | /* | |
606 | * Helper function which calls the hrtimer callback from | |
607 | * tasklet/softirq context | |
608 | */ | |
609 | static void __tasklet_hrtimer_trampoline(unsigned long data) | |
610 | { | |
611 | struct tasklet_hrtimer *ttimer = (void *)data; | |
612 | enum hrtimer_restart restart; | |
613 | ||
614 | restart = ttimer->function(&ttimer->timer); | |
615 | if (restart != HRTIMER_NORESTART) | |
616 | hrtimer_restart(&ttimer->timer); | |
617 | } | |
618 | ||
619 | /** | |
620 | * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks | |
621 | * @ttimer: tasklet_hrtimer which is initialized | |
622 | * @function: hrtimer callback function which gets called from softirq context | |
623 | * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) | |
624 | * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) | |
625 | */ | |
626 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |
627 | enum hrtimer_restart (*function)(struct hrtimer *), | |
628 | clockid_t which_clock, enum hrtimer_mode mode) | |
629 | { | |
630 | hrtimer_init(&ttimer->timer, which_clock, mode); | |
631 | ttimer->timer.function = __hrtimer_tasklet_trampoline; | |
632 | tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, | |
633 | (unsigned long)ttimer); | |
634 | ttimer->function = function; | |
635 | } | |
636 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); | |
637 | ||
638 | void __init softirq_init(void) | |
639 | { | |
640 | int cpu; | |
641 | ||
642 | for_each_possible_cpu(cpu) { | |
643 | per_cpu(tasklet_vec, cpu).tail = | |
644 | &per_cpu(tasklet_vec, cpu).head; | |
645 | per_cpu(tasklet_hi_vec, cpu).tail = | |
646 | &per_cpu(tasklet_hi_vec, cpu).head; | |
647 | } | |
648 | ||
649 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); | |
650 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | |
651 | } | |
652 | ||
653 | static int ksoftirqd_should_run(unsigned int cpu) | |
654 | { | |
655 | return local_softirq_pending(); | |
656 | } | |
657 | ||
658 | static void run_ksoftirqd(unsigned int cpu) | |
659 | { | |
660 | local_irq_disable(); | |
661 | if (local_softirq_pending()) { | |
662 | /* | |
663 | * We can safely run softirq on inline stack, as we are not deep | |
664 | * in the task stack here. | |
665 | */ | |
666 | __do_softirq(); | |
667 | local_irq_enable(); | |
668 | cond_resched_rcu_qs(); | |
669 | return; | |
670 | } | |
671 | local_irq_enable(); | |
672 | } | |
673 | ||
674 | #ifdef CONFIG_HOTPLUG_CPU | |
675 | /* | |
676 | * tasklet_kill_immediate is called to remove a tasklet which can already be | |
677 | * scheduled for execution on @cpu. | |
678 | * | |
679 | * Unlike tasklet_kill, this function removes the tasklet | |
680 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | |
681 | * | |
682 | * When this function is called, @cpu must be in the CPU_DEAD state. | |
683 | */ | |
684 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |
685 | { | |
686 | struct tasklet_struct **i; | |
687 | ||
688 | BUG_ON(cpu_online(cpu)); | |
689 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | |
690 | ||
691 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | |
692 | return; | |
693 | ||
694 | /* CPU is dead, so no lock needed. */ | |
695 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { | |
696 | if (*i == t) { | |
697 | *i = t->next; | |
698 | /* If this was the tail element, move the tail ptr */ | |
699 | if (*i == NULL) | |
700 | per_cpu(tasklet_vec, cpu).tail = i; | |
701 | return; | |
702 | } | |
703 | } | |
704 | BUG(); | |
705 | } | |
706 | ||
707 | static int takeover_tasklets(unsigned int cpu) | |
708 | { | |
709 | /* CPU is dead, so no lock needed. */ | |
710 | local_irq_disable(); | |
711 | ||
712 | /* Find end, append list for that CPU. */ | |
713 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { | |
714 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; | |
715 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); | |
716 | per_cpu(tasklet_vec, cpu).head = NULL; | |
717 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | |
718 | } | |
719 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | |
720 | ||
721 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { | |
722 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; | |
723 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); | |
724 | per_cpu(tasklet_hi_vec, cpu).head = NULL; | |
725 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | |
726 | } | |
727 | raise_softirq_irqoff(HI_SOFTIRQ); | |
728 | ||
729 | local_irq_enable(); | |
730 | return 0; | |
731 | } | |
732 | #else | |
733 | #define takeover_tasklets NULL | |
734 | #endif /* CONFIG_HOTPLUG_CPU */ | |
735 | ||
736 | static struct smp_hotplug_thread softirq_threads = { | |
737 | .store = &ksoftirqd, | |
738 | .thread_should_run = ksoftirqd_should_run, | |
739 | .thread_fn = run_ksoftirqd, | |
740 | .thread_comm = "ksoftirqd/%u", | |
741 | }; | |
742 | ||
743 | static __init int spawn_ksoftirqd(void) | |
744 | { | |
745 | cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, | |
746 | takeover_tasklets); | |
747 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | |
748 | ||
749 | return 0; | |
750 | } | |
751 | early_initcall(spawn_ksoftirqd); | |
752 | ||
753 | /* | |
754 | * [ These __weak aliases are kept in a separate compilation unit, so that | |
755 | * GCC does not inline them incorrectly. ] | |
756 | */ | |
757 | ||
758 | int __init __weak early_irq_init(void) | |
759 | { | |
760 | return 0; | |
761 | } | |
762 | ||
763 | int __init __weak arch_probe_nr_irqs(void) | |
764 | { | |
765 | return NR_IRQS_LEGACY; | |
766 | } | |
767 | ||
768 | int __init __weak arch_early_irq_init(void) | |
769 | { | |
770 | return 0; | |
771 | } | |
772 | ||
773 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) | |
774 | { | |
775 | return from; | |
776 | } |