]>
Commit | Line | Data |
---|---|---|
79bf2bb3 TG |
1 | /* |
2 | * linux/kernel/time/tick-sched.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * No idle tick implementation for low and high resolution timers | |
9 | * | |
10 | * Started by: Thomas Gleixner and Ingo Molnar | |
11 | * | |
12 | * For licencing details see kernel-base/COPYING | |
13 | */ | |
14 | #include <linux/cpu.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/hrtimer.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/profile.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/tick.h> | |
23 | ||
9e203bcc DM |
24 | #include <asm/irq_regs.h> |
25 | ||
79bf2bb3 TG |
26 | #include "tick-internal.h" |
27 | ||
28 | /* | |
29 | * Per cpu nohz control structure | |
30 | */ | |
31 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); | |
32 | ||
33 | /* | |
34 | * The time, when the last jiffy update happened. Protected by xtime_lock. | |
35 | */ | |
36 | static ktime_t last_jiffies_update; | |
37 | ||
289f480a IM |
38 | struct tick_sched *tick_get_tick_sched(int cpu) |
39 | { | |
40 | return &per_cpu(tick_cpu_sched, cpu); | |
41 | } | |
42 | ||
79bf2bb3 TG |
43 | /* |
44 | * Must be called with interrupts disabled ! | |
45 | */ | |
46 | static void tick_do_update_jiffies64(ktime_t now) | |
47 | { | |
48 | unsigned long ticks = 0; | |
49 | ktime_t delta; | |
50 | ||
51 | /* Reevalute with xtime_lock held */ | |
52 | write_seqlock(&xtime_lock); | |
53 | ||
54 | delta = ktime_sub(now, last_jiffies_update); | |
55 | if (delta.tv64 >= tick_period.tv64) { | |
56 | ||
57 | delta = ktime_sub(delta, tick_period); | |
58 | last_jiffies_update = ktime_add(last_jiffies_update, | |
59 | tick_period); | |
60 | ||
61 | /* Slow path for long timeouts */ | |
62 | if (unlikely(delta.tv64 >= tick_period.tv64)) { | |
63 | s64 incr = ktime_to_ns(tick_period); | |
64 | ||
65 | ticks = ktime_divns(delta, incr); | |
66 | ||
67 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
68 | incr * ticks); | |
69 | } | |
70 | do_timer(++ticks); | |
71 | } | |
72 | write_sequnlock(&xtime_lock); | |
73 | } | |
74 | ||
75 | /* | |
76 | * Initialize and return retrieve the jiffies update. | |
77 | */ | |
78 | static ktime_t tick_init_jiffy_update(void) | |
79 | { | |
80 | ktime_t period; | |
81 | ||
82 | write_seqlock(&xtime_lock); | |
83 | /* Did we start the jiffies update yet ? */ | |
84 | if (last_jiffies_update.tv64 == 0) | |
85 | last_jiffies_update = tick_next_period; | |
86 | period = last_jiffies_update; | |
87 | write_sequnlock(&xtime_lock); | |
88 | return period; | |
89 | } | |
90 | ||
91 | /* | |
92 | * NOHZ - aka dynamic tick functionality | |
93 | */ | |
94 | #ifdef CONFIG_NO_HZ | |
95 | /* | |
96 | * NO HZ enabled ? | |
97 | */ | |
98 | static int tick_nohz_enabled __read_mostly = 1; | |
99 | ||
100 | /* | |
101 | * Enable / Disable tickless mode | |
102 | */ | |
103 | static int __init setup_tick_nohz(char *str) | |
104 | { | |
105 | if (!strcmp(str, "off")) | |
106 | tick_nohz_enabled = 0; | |
107 | else if (!strcmp(str, "on")) | |
108 | tick_nohz_enabled = 1; | |
109 | else | |
110 | return 0; | |
111 | return 1; | |
112 | } | |
113 | ||
114 | __setup("nohz=", setup_tick_nohz); | |
115 | ||
116 | /** | |
117 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
118 | * | |
119 | * Called from interrupt entry when the CPU was idle | |
120 | * | |
121 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
122 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
123 | * value. We do this unconditionally on any cpu, as we don't know whether the | |
124 | * cpu, which has the update task assigned is in a long sleep. | |
125 | */ | |
126 | void tick_nohz_update_jiffies(void) | |
127 | { | |
128 | int cpu = smp_processor_id(); | |
129 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
130 | unsigned long flags; | |
131 | ktime_t now; | |
132 | ||
133 | if (!ts->tick_stopped) | |
134 | return; | |
135 | ||
136 | cpu_clear(cpu, nohz_cpu_mask); | |
137 | now = ktime_get(); | |
138 | ||
139 | local_irq_save(flags); | |
140 | tick_do_update_jiffies64(now); | |
141 | local_irq_restore(flags); | |
142 | } | |
143 | ||
144 | /** | |
145 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | |
146 | * | |
147 | * When the next event is more than a tick into the future, stop the idle tick | |
148 | * Called either from the idle loop or from irq_exit() when an idle period was | |
149 | * just interrupted by an interrupt which did not cause a reschedule. | |
150 | */ | |
151 | void tick_nohz_stop_sched_tick(void) | |
152 | { | |
153 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | |
154 | struct tick_sched *ts; | |
155 | ktime_t last_update, expires, now, delta; | |
156 | int cpu; | |
157 | ||
158 | local_irq_save(flags); | |
159 | ||
160 | cpu = smp_processor_id(); | |
161 | ts = &per_cpu(tick_cpu_sched, cpu); | |
162 | ||
163 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | |
164 | goto end; | |
165 | ||
166 | if (need_resched()) | |
167 | goto end; | |
168 | ||
169 | cpu = smp_processor_id(); | |
35282316 TG |
170 | if (unlikely(local_softirq_pending())) { |
171 | static int ratelimit; | |
172 | ||
173 | if (ratelimit < 10) { | |
174 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | |
175 | local_softirq_pending()); | |
176 | ratelimit++; | |
177 | } | |
178 | } | |
79bf2bb3 TG |
179 | |
180 | now = ktime_get(); | |
181 | /* | |
182 | * When called from irq_exit we need to account the idle sleep time | |
183 | * correctly. | |
184 | */ | |
185 | if (ts->tick_stopped) { | |
186 | delta = ktime_sub(now, ts->idle_entrytime); | |
187 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
188 | } | |
189 | ||
190 | ts->idle_entrytime = now; | |
191 | ts->idle_calls++; | |
192 | ||
193 | /* Read jiffies and the time when jiffies were updated last */ | |
194 | do { | |
195 | seq = read_seqbegin(&xtime_lock); | |
196 | last_update = last_jiffies_update; | |
197 | last_jiffies = jiffies; | |
198 | } while (read_seqretry(&xtime_lock, seq)); | |
199 | ||
200 | /* Get the next timer wheel timer */ | |
201 | next_jiffies = get_next_timer_interrupt(last_jiffies); | |
202 | delta_jiffies = next_jiffies - last_jiffies; | |
203 | ||
6ba9b346 IM |
204 | if (rcu_needs_cpu(cpu)) |
205 | delta_jiffies = 1; | |
79bf2bb3 TG |
206 | /* |
207 | * Do not stop the tick, if we are only one off | |
208 | * or if the cpu is required for rcu | |
209 | */ | |
6ba9b346 | 210 | if (!ts->tick_stopped && delta_jiffies == 1) |
79bf2bb3 TG |
211 | goto out; |
212 | ||
213 | /* Schedule the tick, if we are at least one jiffie off */ | |
214 | if ((long)delta_jiffies >= 1) { | |
215 | ||
6ba9b346 | 216 | if (delta_jiffies > 1) |
79bf2bb3 TG |
217 | cpu_set(cpu, nohz_cpu_mask); |
218 | /* | |
219 | * nohz_stop_sched_tick can be called several times before | |
220 | * the nohz_restart_sched_tick is called. This happens when | |
221 | * interrupts arrive which do not cause a reschedule. In the | |
222 | * first call we save the current tick time, so we can restart | |
223 | * the scheduler tick in nohz_restart_sched_tick. | |
224 | */ | |
225 | if (!ts->tick_stopped) { | |
46cb4b7c SS |
226 | if (select_nohz_load_balancer(1)) { |
227 | /* | |
228 | * sched tick not stopped! | |
229 | */ | |
230 | cpu_clear(cpu, nohz_cpu_mask); | |
231 | goto out; | |
232 | } | |
233 | ||
79bf2bb3 TG |
234 | ts->idle_tick = ts->sched_timer.expires; |
235 | ts->tick_stopped = 1; | |
236 | ts->idle_jiffies = last_jiffies; | |
237 | } | |
d3ed7824 TG |
238 | |
239 | /* | |
240 | * If this cpu is the one which updates jiffies, then | |
241 | * give up the assignment and let it be taken by the | |
242 | * cpu which runs the tick timer next, which might be | |
243 | * this cpu as well. If we don't drop this here the | |
244 | * jiffies might be stale and do_timer() never | |
245 | * invoked. | |
246 | */ | |
247 | if (cpu == tick_do_timer_cpu) | |
248 | tick_do_timer_cpu = -1; | |
249 | ||
eaad084b TG |
250 | ts->idle_sleeps++; |
251 | ||
252 | /* | |
253 | * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that | |
254 | * there is no timer pending or at least extremly far | |
255 | * into the future (12 days for HZ=1000). In this case | |
256 | * we simply stop the tick timer: | |
257 | */ | |
258 | if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) { | |
259 | ts->idle_expires.tv64 = KTIME_MAX; | |
260 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | |
261 | hrtimer_cancel(&ts->sched_timer); | |
262 | goto out; | |
263 | } | |
264 | ||
79bf2bb3 TG |
265 | /* |
266 | * calculate the expiry time for the next timer wheel | |
267 | * timer | |
268 | */ | |
269 | expires = ktime_add_ns(last_update, tick_period.tv64 * | |
270 | delta_jiffies); | |
271 | ts->idle_expires = expires; | |
79bf2bb3 TG |
272 | |
273 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
274 | hrtimer_start(&ts->sched_timer, expires, | |
275 | HRTIMER_MODE_ABS); | |
276 | /* Check, if the timer was already in the past */ | |
277 | if (hrtimer_active(&ts->sched_timer)) | |
278 | goto out; | |
279 | } else if(!tick_program_event(expires, 0)) | |
280 | goto out; | |
281 | /* | |
282 | * We are past the event already. So we crossed a | |
283 | * jiffie boundary. Update jiffies and raise the | |
284 | * softirq. | |
285 | */ | |
286 | tick_do_update_jiffies64(ktime_get()); | |
287 | cpu_clear(cpu, nohz_cpu_mask); | |
288 | } | |
289 | raise_softirq_irqoff(TIMER_SOFTIRQ); | |
290 | out: | |
291 | ts->next_jiffies = next_jiffies; | |
292 | ts->last_jiffies = last_jiffies; | |
293 | end: | |
294 | local_irq_restore(flags); | |
295 | } | |
296 | ||
297 | /** | |
298 | * nohz_restart_sched_tick - restart the idle tick from the idle task | |
299 | * | |
300 | * Restart the idle tick when the CPU is woken up from idle | |
301 | */ | |
302 | void tick_nohz_restart_sched_tick(void) | |
303 | { | |
304 | int cpu = smp_processor_id(); | |
305 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
306 | unsigned long ticks; | |
307 | ktime_t now, delta; | |
308 | ||
309 | if (!ts->tick_stopped) | |
310 | return; | |
311 | ||
312 | /* Update jiffies first */ | |
313 | now = ktime_get(); | |
314 | ||
315 | local_irq_disable(); | |
46cb4b7c | 316 | select_nohz_load_balancer(0); |
79bf2bb3 TG |
317 | tick_do_update_jiffies64(now); |
318 | cpu_clear(cpu, nohz_cpu_mask); | |
319 | ||
320 | /* Account the idle time */ | |
321 | delta = ktime_sub(now, ts->idle_entrytime); | |
322 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
323 | ||
324 | /* | |
325 | * We stopped the tick in idle. Update process times would miss the | |
326 | * time we slept as update_process_times does only a 1 tick | |
327 | * accounting. Enforce that this is accounted to idle ! | |
328 | */ | |
329 | ticks = jiffies - ts->idle_jiffies; | |
330 | /* | |
331 | * We might be one off. Do not randomly account a huge number of ticks! | |
332 | */ | |
333 | if (ticks && ticks < LONG_MAX) { | |
334 | add_preempt_count(HARDIRQ_OFFSET); | |
335 | account_system_time(current, HARDIRQ_OFFSET, | |
336 | jiffies_to_cputime(ticks)); | |
337 | sub_preempt_count(HARDIRQ_OFFSET); | |
338 | } | |
339 | ||
340 | /* | |
341 | * Cancel the scheduled timer and restore the tick | |
342 | */ | |
343 | ts->tick_stopped = 0; | |
344 | hrtimer_cancel(&ts->sched_timer); | |
345 | ts->sched_timer.expires = ts->idle_tick; | |
346 | ||
347 | while (1) { | |
348 | /* Forward the time to expire in the future */ | |
349 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
350 | ||
351 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | |
352 | hrtimer_start(&ts->sched_timer, | |
353 | ts->sched_timer.expires, | |
354 | HRTIMER_MODE_ABS); | |
355 | /* Check, if the timer was already in the past */ | |
356 | if (hrtimer_active(&ts->sched_timer)) | |
357 | break; | |
358 | } else { | |
359 | if (!tick_program_event(ts->sched_timer.expires, 0)) | |
360 | break; | |
361 | } | |
362 | /* Update jiffies and reread time */ | |
363 | tick_do_update_jiffies64(now); | |
364 | now = ktime_get(); | |
365 | } | |
366 | local_irq_enable(); | |
367 | } | |
368 | ||
369 | static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) | |
370 | { | |
371 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
372 | return tick_program_event(ts->sched_timer.expires, 0); | |
373 | } | |
374 | ||
375 | /* | |
376 | * The nohz low res interrupt handler | |
377 | */ | |
378 | static void tick_nohz_handler(struct clock_event_device *dev) | |
379 | { | |
380 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
381 | struct pt_regs *regs = get_irq_regs(); | |
d3ed7824 | 382 | int cpu = smp_processor_id(); |
79bf2bb3 TG |
383 | ktime_t now = ktime_get(); |
384 | ||
385 | dev->next_event.tv64 = KTIME_MAX; | |
386 | ||
d3ed7824 TG |
387 | /* |
388 | * Check if the do_timer duty was dropped. We don't care about | |
389 | * concurrency: This happens only when the cpu in charge went | |
390 | * into a long sleep. If two cpus happen to assign themself to | |
391 | * this duty, then the jiffies update is still serialized by | |
392 | * xtime_lock. | |
393 | */ | |
394 | if (unlikely(tick_do_timer_cpu == -1)) | |
395 | tick_do_timer_cpu = cpu; | |
396 | ||
79bf2bb3 | 397 | /* Check, if the jiffies need an update */ |
d3ed7824 TG |
398 | if (tick_do_timer_cpu == cpu) |
399 | tick_do_update_jiffies64(now); | |
79bf2bb3 TG |
400 | |
401 | /* | |
402 | * When we are idle and the tick is stopped, we have to touch | |
403 | * the watchdog as we might not schedule for a really long | |
404 | * time. This happens on complete idle SMP systems while | |
405 | * waiting on the login prompt. We also increment the "start | |
406 | * of idle" jiffy stamp so the idle accounting adjustment we | |
407 | * do when we go busy again does not account too much ticks. | |
408 | */ | |
409 | if (ts->tick_stopped) { | |
410 | touch_softlockup_watchdog(); | |
411 | ts->idle_jiffies++; | |
412 | } | |
413 | ||
414 | update_process_times(user_mode(regs)); | |
415 | profile_tick(CPU_PROFILING); | |
416 | ||
417 | /* Do not restart, when we are in the idle loop */ | |
418 | if (ts->tick_stopped) | |
419 | return; | |
420 | ||
421 | while (tick_nohz_reprogram(ts, now)) { | |
422 | now = ktime_get(); | |
423 | tick_do_update_jiffies64(now); | |
424 | } | |
425 | } | |
426 | ||
427 | /** | |
428 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
429 | */ | |
430 | static void tick_nohz_switch_to_nohz(void) | |
431 | { | |
432 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
433 | ktime_t next; | |
434 | ||
435 | if (!tick_nohz_enabled) | |
436 | return; | |
437 | ||
438 | local_irq_disable(); | |
439 | if (tick_switch_to_oneshot(tick_nohz_handler)) { | |
440 | local_irq_enable(); | |
441 | return; | |
442 | } | |
443 | ||
444 | ts->nohz_mode = NOHZ_MODE_LOWRES; | |
445 | ||
446 | /* | |
447 | * Recycle the hrtimer in ts, so we can share the | |
448 | * hrtimer_forward with the highres code. | |
449 | */ | |
450 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
451 | /* Get the next period */ | |
452 | next = tick_init_jiffy_update(); | |
453 | ||
454 | for (;;) { | |
455 | ts->sched_timer.expires = next; | |
456 | if (!tick_program_event(next, 0)) | |
457 | break; | |
458 | next = ktime_add(next, tick_period); | |
459 | } | |
460 | local_irq_enable(); | |
461 | ||
462 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", | |
463 | smp_processor_id()); | |
464 | } | |
465 | ||
466 | #else | |
467 | ||
468 | static inline void tick_nohz_switch_to_nohz(void) { } | |
469 | ||
470 | #endif /* NO_HZ */ | |
471 | ||
472 | /* | |
473 | * High resolution timer specific code | |
474 | */ | |
475 | #ifdef CONFIG_HIGH_RES_TIMERS | |
476 | /* | |
477 | * We rearm the timer until we get disabled by the idle code | |
478 | * Called with interrupts disabled and timer->base->cpu_base->lock held. | |
479 | */ | |
480 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
481 | { | |
482 | struct tick_sched *ts = | |
483 | container_of(timer, struct tick_sched, sched_timer); | |
484 | struct hrtimer_cpu_base *base = timer->base->cpu_base; | |
485 | struct pt_regs *regs = get_irq_regs(); | |
486 | ktime_t now = ktime_get(); | |
d3ed7824 TG |
487 | int cpu = smp_processor_id(); |
488 | ||
489 | #ifdef CONFIG_NO_HZ | |
490 | /* | |
491 | * Check if the do_timer duty was dropped. We don't care about | |
492 | * concurrency: This happens only when the cpu in charge went | |
493 | * into a long sleep. If two cpus happen to assign themself to | |
494 | * this duty, then the jiffies update is still serialized by | |
495 | * xtime_lock. | |
496 | */ | |
497 | if (unlikely(tick_do_timer_cpu == -1)) | |
498 | tick_do_timer_cpu = cpu; | |
499 | #endif | |
79bf2bb3 TG |
500 | |
501 | /* Check, if the jiffies need an update */ | |
d3ed7824 TG |
502 | if (tick_do_timer_cpu == cpu) |
503 | tick_do_update_jiffies64(now); | |
79bf2bb3 TG |
504 | |
505 | /* | |
506 | * Do not call, when we are not in irq context and have | |
507 | * no valid regs pointer | |
508 | */ | |
509 | if (regs) { | |
510 | /* | |
511 | * When we are idle and the tick is stopped, we have to touch | |
512 | * the watchdog as we might not schedule for a really long | |
513 | * time. This happens on complete idle SMP systems while | |
514 | * waiting on the login prompt. We also increment the "start of | |
515 | * idle" jiffy stamp so the idle accounting adjustment we do | |
516 | * when we go busy again does not account too much ticks. | |
517 | */ | |
518 | if (ts->tick_stopped) { | |
519 | touch_softlockup_watchdog(); | |
520 | ts->idle_jiffies++; | |
521 | } | |
522 | /* | |
523 | * update_process_times() might take tasklist_lock, hence | |
524 | * drop the base lock. sched-tick hrtimers are per-CPU and | |
525 | * never accessible by userspace APIs, so this is safe to do. | |
526 | */ | |
527 | spin_unlock(&base->lock); | |
528 | update_process_times(user_mode(regs)); | |
529 | profile_tick(CPU_PROFILING); | |
530 | spin_lock(&base->lock); | |
531 | } | |
532 | ||
533 | /* Do not restart, when we are in the idle loop */ | |
534 | if (ts->tick_stopped) | |
535 | return HRTIMER_NORESTART; | |
536 | ||
537 | hrtimer_forward(timer, now, tick_period); | |
538 | ||
539 | return HRTIMER_RESTART; | |
540 | } | |
541 | ||
542 | /** | |
543 | * tick_setup_sched_timer - setup the tick emulation timer | |
544 | */ | |
545 | void tick_setup_sched_timer(void) | |
546 | { | |
547 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
548 | ktime_t now = ktime_get(); | |
3704540b | 549 | u64 offset; |
79bf2bb3 TG |
550 | |
551 | /* | |
552 | * Emulate tick processing via per-CPU hrtimers: | |
553 | */ | |
554 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
555 | ts->sched_timer.function = tick_sched_timer; | |
556 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | |
557 | ||
3704540b | 558 | /* Get the next period (per cpu) */ |
79bf2bb3 | 559 | ts->sched_timer.expires = tick_init_jiffy_update(); |
3704540b JS |
560 | offset = ktime_to_ns(tick_period) >> 1; |
561 | do_div(offset, NR_CPUS); | |
562 | offset *= smp_processor_id(); | |
563 | ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); | |
79bf2bb3 TG |
564 | |
565 | for (;;) { | |
566 | hrtimer_forward(&ts->sched_timer, now, tick_period); | |
567 | hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, | |
568 | HRTIMER_MODE_ABS); | |
569 | /* Check, if the timer was already in the past */ | |
570 | if (hrtimer_active(&ts->sched_timer)) | |
571 | break; | |
572 | now = ktime_get(); | |
573 | } | |
574 | ||
575 | #ifdef CONFIG_NO_HZ | |
576 | if (tick_nohz_enabled) | |
577 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | |
578 | #endif | |
579 | } | |
580 | ||
581 | void tick_cancel_sched_timer(int cpu) | |
582 | { | |
583 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
584 | ||
585 | if (ts->sched_timer.base) | |
586 | hrtimer_cancel(&ts->sched_timer); | |
587 | ts->tick_stopped = 0; | |
588 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | |
589 | } | |
590 | #endif /* HIGH_RES_TIMERS */ | |
591 | ||
592 | /** | |
593 | * Async notification about clocksource changes | |
594 | */ | |
595 | void tick_clock_notify(void) | |
596 | { | |
597 | int cpu; | |
598 | ||
599 | for_each_possible_cpu(cpu) | |
600 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
601 | } | |
602 | ||
603 | /* | |
604 | * Async notification about clock event changes | |
605 | */ | |
606 | void tick_oneshot_notify(void) | |
607 | { | |
608 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
609 | ||
610 | set_bit(0, &ts->check_clocks); | |
611 | } | |
612 | ||
613 | /** | |
614 | * Check, if a change happened, which makes oneshot possible. | |
615 | * | |
616 | * Called cyclic from the hrtimer softirq (driven by the timer | |
617 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
618 | * mode, because high resolution timers are disabled (either compile | |
619 | * or runtime). | |
620 | */ | |
621 | int tick_check_oneshot_change(int allow_nohz) | |
622 | { | |
623 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | |
624 | ||
625 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
626 | return 0; | |
627 | ||
628 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
629 | return 0; | |
630 | ||
631 | if (!timekeeping_is_continuous() || !tick_is_oneshot_available()) | |
632 | return 0; | |
633 | ||
634 | if (!allow_nohz) | |
635 | return 1; | |
636 | ||
637 | tick_nohz_switch_to_nohz(); | |
638 | return 0; | |
639 | } |