]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/hrtimer.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * High-resolution kernel timers | |
9 | * | |
10 | * In contrast to the low-resolution timeout API implemented in | |
11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | |
12 | * depending on system configuration and capabilities. | |
13 | * | |
14 | * These timers are currently used for: | |
15 | * - itimers | |
16 | * - POSIX timers | |
17 | * - nanosleep | |
18 | * - precise in-kernel timing | |
19 | * | |
20 | * Started by: Thomas Gleixner and Ingo Molnar | |
21 | * | |
22 | * Credits: | |
23 | * based on kernel/timer.c | |
24 | * | |
25 | * Help, testing, suggestions, bugfixes, improvements were | |
26 | * provided by: | |
27 | * | |
28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | |
29 | * et. al. | |
30 | * | |
31 | * For licencing details see kernel-base/COPYING | |
32 | */ | |
33 | ||
34 | #include <linux/cpu.h> | |
35 | #include <linux/export.h> | |
36 | #include <linux/percpu.h> | |
37 | #include <linux/hrtimer.h> | |
38 | #include <linux/notifier.h> | |
39 | #include <linux/syscalls.h> | |
40 | #include <linux/kallsyms.h> | |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/tick.h> | |
43 | #include <linux/seq_file.h> | |
44 | #include <linux/err.h> | |
45 | #include <linux/debugobjects.h> | |
46 | #include <linux/sched/signal.h> | |
47 | #include <linux/sched/sysctl.h> | |
48 | #include <linux/sched/rt.h> | |
49 | #include <linux/sched/deadline.h> | |
50 | #include <linux/sched/nohz.h> | |
51 | #include <linux/sched/debug.h> | |
52 | #include <linux/timer.h> | |
53 | #include <linux/freezer.h> | |
54 | #include <linux/compat.h> | |
55 | ||
56 | #include <linux/uaccess.h> | |
57 | ||
58 | #include <trace/events/timer.h> | |
59 | ||
60 | #include "tick-internal.h" | |
61 | ||
62 | /* | |
63 | * The timer bases: | |
64 | * | |
65 | * There are more clockids than hrtimer bases. Thus, we index | |
66 | * into the timer bases by the hrtimer_base_type enum. When trying | |
67 | * to reach a base using a clockid, hrtimer_clockid_to_base() | |
68 | * is used to convert from clockid to the proper hrtimer_base_type. | |
69 | */ | |
70 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |
71 | { | |
72 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), | |
73 | .clock_base = | |
74 | { | |
75 | { | |
76 | .index = HRTIMER_BASE_MONOTONIC, | |
77 | .clockid = CLOCK_MONOTONIC, | |
78 | .get_time = &ktime_get, | |
79 | }, | |
80 | { | |
81 | .index = HRTIMER_BASE_REALTIME, | |
82 | .clockid = CLOCK_REALTIME, | |
83 | .get_time = &ktime_get_real, | |
84 | }, | |
85 | { | |
86 | .index = HRTIMER_BASE_BOOTTIME, | |
87 | .clockid = CLOCK_BOOTTIME, | |
88 | .get_time = &ktime_get_boottime, | |
89 | }, | |
90 | { | |
91 | .index = HRTIMER_BASE_TAI, | |
92 | .clockid = CLOCK_TAI, | |
93 | .get_time = &ktime_get_clocktai, | |
94 | }, | |
95 | } | |
96 | }; | |
97 | ||
98 | static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { | |
99 | /* Make sure we catch unsupported clockids */ | |
100 | [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, | |
101 | ||
102 | [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, | |
103 | [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, | |
104 | [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, | |
105 | [CLOCK_TAI] = HRTIMER_BASE_TAI, | |
106 | }; | |
107 | ||
108 | /* | |
109 | * Functions and macros which are different for UP/SMP systems are kept in a | |
110 | * single place | |
111 | */ | |
112 | #ifdef CONFIG_SMP | |
113 | ||
114 | /* | |
115 | * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base() | |
116 | * such that hrtimer_callback_running() can unconditionally dereference | |
117 | * timer->base->cpu_base | |
118 | */ | |
119 | static struct hrtimer_cpu_base migration_cpu_base = { | |
120 | .clock_base = { { .cpu_base = &migration_cpu_base, }, }, | |
121 | }; | |
122 | ||
123 | #define migration_base migration_cpu_base.clock_base[0] | |
124 | ||
125 | /* | |
126 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | |
127 | * means that all timers which are tied to this base via timer->base are | |
128 | * locked, and the base itself is locked too. | |
129 | * | |
130 | * So __run_timers/migrate_timers can safely modify all timers which could | |
131 | * be found on the lists/queues. | |
132 | * | |
133 | * When the timer's base is locked, and the timer removed from list, it is | |
134 | * possible to set timer->base = &migration_base and drop the lock: the timer | |
135 | * remains locked. | |
136 | */ | |
137 | static | |
138 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |
139 | unsigned long *flags) | |
140 | { | |
141 | struct hrtimer_clock_base *base; | |
142 | ||
143 | for (;;) { | |
144 | base = timer->base; | |
145 | if (likely(base != &migration_base)) { | |
146 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | |
147 | if (likely(base == timer->base)) | |
148 | return base; | |
149 | /* The timer has migrated to another CPU: */ | |
150 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | |
151 | } | |
152 | cpu_relax(); | |
153 | } | |
154 | } | |
155 | ||
156 | /* | |
157 | * With HIGHRES=y we do not migrate the timer when it is expiring | |
158 | * before the next event on the target cpu because we cannot reprogram | |
159 | * the target cpu hardware and we would cause it to fire late. | |
160 | * | |
161 | * Called with cpu_base->lock of target cpu held. | |
162 | */ | |
163 | static int | |
164 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | |
165 | { | |
166 | #ifdef CONFIG_HIGH_RES_TIMERS | |
167 | ktime_t expires; | |
168 | ||
169 | if (!new_base->cpu_base->hres_active) | |
170 | return 0; | |
171 | ||
172 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | |
173 | return expires <= new_base->cpu_base->expires_next; | |
174 | #else | |
175 | return 0; | |
176 | #endif | |
177 | } | |
178 | ||
179 | static inline | |
180 | struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, | |
181 | int pinned) | |
182 | { | |
183 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) | |
184 | if (static_branch_likely(&timers_migration_enabled) && !pinned) | |
185 | return &per_cpu(hrtimer_bases, get_nohz_timer_target()); | |
186 | #endif | |
187 | return base; | |
188 | } | |
189 | ||
190 | /* | |
191 | * We switch the timer base to a power-optimized selected CPU target, | |
192 | * if: | |
193 | * - NO_HZ_COMMON is enabled | |
194 | * - timer migration is enabled | |
195 | * - the timer callback is not running | |
196 | * - the timer is not the first expiring timer on the new target | |
197 | * | |
198 | * If one of the above requirements is not fulfilled we move the timer | |
199 | * to the current CPU or leave it on the previously assigned CPU if | |
200 | * the timer callback is currently running. | |
201 | */ | |
202 | static inline struct hrtimer_clock_base * | |
203 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |
204 | int pinned) | |
205 | { | |
206 | struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base; | |
207 | struct hrtimer_clock_base *new_base; | |
208 | int basenum = base->index; | |
209 | ||
210 | this_cpu_base = this_cpu_ptr(&hrtimer_bases); | |
211 | new_cpu_base = get_target_base(this_cpu_base, pinned); | |
212 | again: | |
213 | new_base = &new_cpu_base->clock_base[basenum]; | |
214 | ||
215 | if (base != new_base) { | |
216 | /* | |
217 | * We are trying to move timer to new_base. | |
218 | * However we can't change timer's base while it is running, | |
219 | * so we keep it on the same CPU. No hassle vs. reprogramming | |
220 | * the event source in the high resolution case. The softirq | |
221 | * code will take care of this when the timer function has | |
222 | * completed. There is no conflict as we hold the lock until | |
223 | * the timer is enqueued. | |
224 | */ | |
225 | if (unlikely(hrtimer_callback_running(timer))) | |
226 | return base; | |
227 | ||
228 | /* See the comment in lock_hrtimer_base() */ | |
229 | timer->base = &migration_base; | |
230 | raw_spin_unlock(&base->cpu_base->lock); | |
231 | raw_spin_lock(&new_base->cpu_base->lock); | |
232 | ||
233 | if (new_cpu_base != this_cpu_base && | |
234 | hrtimer_check_target(timer, new_base)) { | |
235 | raw_spin_unlock(&new_base->cpu_base->lock); | |
236 | raw_spin_lock(&base->cpu_base->lock); | |
237 | new_cpu_base = this_cpu_base; | |
238 | timer->base = base; | |
239 | goto again; | |
240 | } | |
241 | timer->base = new_base; | |
242 | } else { | |
243 | if (new_cpu_base != this_cpu_base && | |
244 | hrtimer_check_target(timer, new_base)) { | |
245 | new_cpu_base = this_cpu_base; | |
246 | goto again; | |
247 | } | |
248 | } | |
249 | return new_base; | |
250 | } | |
251 | ||
252 | #else /* CONFIG_SMP */ | |
253 | ||
254 | static inline struct hrtimer_clock_base * | |
255 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
256 | { | |
257 | struct hrtimer_clock_base *base = timer->base; | |
258 | ||
259 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | |
260 | ||
261 | return base; | |
262 | } | |
263 | ||
264 | # define switch_hrtimer_base(t, b, p) (b) | |
265 | ||
266 | #endif /* !CONFIG_SMP */ | |
267 | ||
268 | /* | |
269 | * Functions for the union type storage format of ktime_t which are | |
270 | * too large for inlining: | |
271 | */ | |
272 | #if BITS_PER_LONG < 64 | |
273 | /* | |
274 | * Divide a ktime value by a nanosecond value | |
275 | */ | |
276 | s64 __ktime_divns(const ktime_t kt, s64 div) | |
277 | { | |
278 | int sft = 0; | |
279 | s64 dclc; | |
280 | u64 tmp; | |
281 | ||
282 | dclc = ktime_to_ns(kt); | |
283 | tmp = dclc < 0 ? -dclc : dclc; | |
284 | ||
285 | /* Make sure the divisor is less than 2^32: */ | |
286 | while (div >> 32) { | |
287 | sft++; | |
288 | div >>= 1; | |
289 | } | |
290 | tmp >>= sft; | |
291 | do_div(tmp, (unsigned long) div); | |
292 | return dclc < 0 ? -tmp : tmp; | |
293 | } | |
294 | EXPORT_SYMBOL_GPL(__ktime_divns); | |
295 | #endif /* BITS_PER_LONG >= 64 */ | |
296 | ||
297 | /* | |
298 | * Add two ktime values and do a safety check for overflow: | |
299 | */ | |
300 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | |
301 | { | |
302 | ktime_t res = ktime_add_unsafe(lhs, rhs); | |
303 | ||
304 | /* | |
305 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | |
306 | * return to user space in a timespec: | |
307 | */ | |
308 | if (res < 0 || res < lhs || res < rhs) | |
309 | res = ktime_set(KTIME_SEC_MAX, 0); | |
310 | ||
311 | return res; | |
312 | } | |
313 | ||
314 | EXPORT_SYMBOL_GPL(ktime_add_safe); | |
315 | ||
316 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | |
317 | ||
318 | static struct debug_obj_descr hrtimer_debug_descr; | |
319 | ||
320 | static void *hrtimer_debug_hint(void *addr) | |
321 | { | |
322 | return ((struct hrtimer *) addr)->function; | |
323 | } | |
324 | ||
325 | /* | |
326 | * fixup_init is called when: | |
327 | * - an active object is initialized | |
328 | */ | |
329 | static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state) | |
330 | { | |
331 | struct hrtimer *timer = addr; | |
332 | ||
333 | switch (state) { | |
334 | case ODEBUG_STATE_ACTIVE: | |
335 | hrtimer_cancel(timer); | |
336 | debug_object_init(timer, &hrtimer_debug_descr); | |
337 | return true; | |
338 | default: | |
339 | return false; | |
340 | } | |
341 | } | |
342 | ||
343 | /* | |
344 | * fixup_activate is called when: | |
345 | * - an active object is activated | |
346 | * - an unknown non-static object is activated | |
347 | */ | |
348 | static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state) | |
349 | { | |
350 | switch (state) { | |
351 | case ODEBUG_STATE_ACTIVE: | |
352 | WARN_ON(1); | |
353 | ||
354 | default: | |
355 | return false; | |
356 | } | |
357 | } | |
358 | ||
359 | /* | |
360 | * fixup_free is called when: | |
361 | * - an active object is freed | |
362 | */ | |
363 | static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state) | |
364 | { | |
365 | struct hrtimer *timer = addr; | |
366 | ||
367 | switch (state) { | |
368 | case ODEBUG_STATE_ACTIVE: | |
369 | hrtimer_cancel(timer); | |
370 | debug_object_free(timer, &hrtimer_debug_descr); | |
371 | return true; | |
372 | default: | |
373 | return false; | |
374 | } | |
375 | } | |
376 | ||
377 | static struct debug_obj_descr hrtimer_debug_descr = { | |
378 | .name = "hrtimer", | |
379 | .debug_hint = hrtimer_debug_hint, | |
380 | .fixup_init = hrtimer_fixup_init, | |
381 | .fixup_activate = hrtimer_fixup_activate, | |
382 | .fixup_free = hrtimer_fixup_free, | |
383 | }; | |
384 | ||
385 | static inline void debug_hrtimer_init(struct hrtimer *timer) | |
386 | { | |
387 | debug_object_init(timer, &hrtimer_debug_descr); | |
388 | } | |
389 | ||
390 | static inline void debug_hrtimer_activate(struct hrtimer *timer) | |
391 | { | |
392 | debug_object_activate(timer, &hrtimer_debug_descr); | |
393 | } | |
394 | ||
395 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | |
396 | { | |
397 | debug_object_deactivate(timer, &hrtimer_debug_descr); | |
398 | } | |
399 | ||
400 | static inline void debug_hrtimer_free(struct hrtimer *timer) | |
401 | { | |
402 | debug_object_free(timer, &hrtimer_debug_descr); | |
403 | } | |
404 | ||
405 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
406 | enum hrtimer_mode mode); | |
407 | ||
408 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | |
409 | enum hrtimer_mode mode) | |
410 | { | |
411 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); | |
412 | __hrtimer_init(timer, clock_id, mode); | |
413 | } | |
414 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); | |
415 | ||
416 | void destroy_hrtimer_on_stack(struct hrtimer *timer) | |
417 | { | |
418 | debug_object_free(timer, &hrtimer_debug_descr); | |
419 | } | |
420 | EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); | |
421 | ||
422 | #else | |
423 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | |
424 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |
425 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | |
426 | #endif | |
427 | ||
428 | static inline void | |
429 | debug_init(struct hrtimer *timer, clockid_t clockid, | |
430 | enum hrtimer_mode mode) | |
431 | { | |
432 | debug_hrtimer_init(timer); | |
433 | trace_hrtimer_init(timer, clockid, mode); | |
434 | } | |
435 | ||
436 | static inline void debug_activate(struct hrtimer *timer, | |
437 | enum hrtimer_mode mode) | |
438 | { | |
439 | debug_hrtimer_activate(timer); | |
440 | trace_hrtimer_start(timer, mode); | |
441 | } | |
442 | ||
443 | static inline void debug_deactivate(struct hrtimer *timer) | |
444 | { | |
445 | debug_hrtimer_deactivate(timer); | |
446 | trace_hrtimer_cancel(timer); | |
447 | } | |
448 | ||
449 | static struct hrtimer_clock_base * | |
450 | __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) | |
451 | { | |
452 | unsigned int idx; | |
453 | ||
454 | if (!*active) | |
455 | return NULL; | |
456 | ||
457 | idx = __ffs(*active); | |
458 | *active &= ~(1U << idx); | |
459 | ||
460 | return &cpu_base->clock_base[idx]; | |
461 | } | |
462 | ||
463 | #define for_each_active_base(base, cpu_base, active) \ | |
464 | while ((base = __next_base((cpu_base), &(active)))) | |
465 | ||
466 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) | |
467 | static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, | |
468 | struct hrtimer *timer) | |
469 | { | |
470 | #ifdef CONFIG_HIGH_RES_TIMERS | |
471 | cpu_base->next_timer = timer; | |
472 | #endif | |
473 | } | |
474 | ||
475 | static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) | |
476 | { | |
477 | struct hrtimer_clock_base *base; | |
478 | unsigned int active = cpu_base->active_bases; | |
479 | ktime_t expires, expires_next = KTIME_MAX; | |
480 | ||
481 | hrtimer_update_next_timer(cpu_base, NULL); | |
482 | for_each_active_base(base, cpu_base, active) { | |
483 | struct timerqueue_node *next; | |
484 | struct hrtimer *timer; | |
485 | ||
486 | next = timerqueue_getnext(&base->active); | |
487 | timer = container_of(next, struct hrtimer, node); | |
488 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | |
489 | if (expires < expires_next) { | |
490 | expires_next = expires; | |
491 | hrtimer_update_next_timer(cpu_base, timer); | |
492 | } | |
493 | } | |
494 | /* | |
495 | * clock_was_set() might have changed base->offset of any of | |
496 | * the clock bases so the result might be negative. Fix it up | |
497 | * to prevent a false positive in clockevents_program_event(). | |
498 | */ | |
499 | if (expires_next < 0) | |
500 | expires_next = 0; | |
501 | return expires_next; | |
502 | } | |
503 | #endif | |
504 | ||
505 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | |
506 | { | |
507 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | |
508 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; | |
509 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; | |
510 | ||
511 | return ktime_get_update_offsets_now(&base->clock_was_set_seq, | |
512 | offs_real, offs_boot, offs_tai); | |
513 | } | |
514 | ||
515 | /* | |
516 | * Is the high resolution mode active ? | |
517 | */ | |
518 | static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) | |
519 | { | |
520 | return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? | |
521 | cpu_base->hres_active : 0; | |
522 | } | |
523 | ||
524 | static inline int hrtimer_hres_active(void) | |
525 | { | |
526 | return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases)); | |
527 | } | |
528 | ||
529 | /* High resolution timer related functions */ | |
530 | #ifdef CONFIG_HIGH_RES_TIMERS | |
531 | ||
532 | /* | |
533 | * High resolution timer enabled ? | |
534 | */ | |
535 | static bool hrtimer_hres_enabled __read_mostly = true; | |
536 | unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC; | |
537 | EXPORT_SYMBOL_GPL(hrtimer_resolution); | |
538 | ||
539 | /* | |
540 | * Enable / Disable high resolution mode | |
541 | */ | |
542 | static int __init setup_hrtimer_hres(char *str) | |
543 | { | |
544 | return (kstrtobool(str, &hrtimer_hres_enabled) == 0); | |
545 | } | |
546 | ||
547 | __setup("highres=", setup_hrtimer_hres); | |
548 | ||
549 | /* | |
550 | * hrtimer_high_res_enabled - query, if the highres mode is enabled | |
551 | */ | |
552 | static inline int hrtimer_is_hres_enabled(void) | |
553 | { | |
554 | return hrtimer_hres_enabled; | |
555 | } | |
556 | ||
557 | /* | |
558 | * Reprogram the event source with checking both queues for the | |
559 | * next event | |
560 | * Called with interrupts disabled and base->lock held | |
561 | */ | |
562 | static void | |
563 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |
564 | { | |
565 | ktime_t expires_next; | |
566 | ||
567 | if (!cpu_base->hres_active) | |
568 | return; | |
569 | ||
570 | expires_next = __hrtimer_get_next_event(cpu_base); | |
571 | ||
572 | if (skip_equal && expires_next == cpu_base->expires_next) | |
573 | return; | |
574 | ||
575 | cpu_base->expires_next = expires_next; | |
576 | ||
577 | /* | |
578 | * If a hang was detected in the last timer interrupt then we | |
579 | * leave the hang delay active in the hardware. We want the | |
580 | * system to make progress. That also prevents the following | |
581 | * scenario: | |
582 | * T1 expires 50ms from now | |
583 | * T2 expires 5s from now | |
584 | * | |
585 | * T1 is removed, so this code is called and would reprogram | |
586 | * the hardware to 5s from now. Any hrtimer_start after that | |
587 | * will not reprogram the hardware due to hang_detected being | |
588 | * set. So we'd effectivly block all timers until the T2 event | |
589 | * fires. | |
590 | */ | |
591 | if (cpu_base->hang_detected) | |
592 | return; | |
593 | ||
594 | tick_program_event(cpu_base->expires_next, 1); | |
595 | } | |
596 | ||
597 | /* | |
598 | * When a timer is enqueued and expires earlier than the already enqueued | |
599 | * timers, we have to check, whether it expires earlier than the timer for | |
600 | * which the clock event device was armed. | |
601 | * | |
602 | * Called with interrupts disabled and base->cpu_base.lock held | |
603 | */ | |
604 | static void hrtimer_reprogram(struct hrtimer *timer, | |
605 | struct hrtimer_clock_base *base) | |
606 | { | |
607 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
608 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | |
609 | ||
610 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); | |
611 | ||
612 | /* | |
613 | * If the timer is not on the current cpu, we cannot reprogram | |
614 | * the other cpus clock event device. | |
615 | */ | |
616 | if (base->cpu_base != cpu_base) | |
617 | return; | |
618 | ||
619 | /* | |
620 | * If the hrtimer interrupt is running, then it will | |
621 | * reevaluate the clock bases and reprogram the clock event | |
622 | * device. The callbacks are always executed in hard interrupt | |
623 | * context so we don't need an extra check for a running | |
624 | * callback. | |
625 | */ | |
626 | if (cpu_base->in_hrtirq) | |
627 | return; | |
628 | ||
629 | /* | |
630 | * CLOCK_REALTIME timer might be requested with an absolute | |
631 | * expiry time which is less than base->offset. Set it to 0. | |
632 | */ | |
633 | if (expires < 0) | |
634 | expires = 0; | |
635 | ||
636 | if (expires >= cpu_base->expires_next) | |
637 | return; | |
638 | ||
639 | /* Update the pointer to the next expiring timer */ | |
640 | cpu_base->next_timer = timer; | |
641 | ||
642 | /* | |
643 | * If a hang was detected in the last timer interrupt then we | |
644 | * do not schedule a timer which is earlier than the expiry | |
645 | * which we enforced in the hang detection. We want the system | |
646 | * to make progress. | |
647 | */ | |
648 | if (cpu_base->hang_detected) | |
649 | return; | |
650 | ||
651 | /* | |
652 | * Program the timer hardware. We enforce the expiry for | |
653 | * events which are already in the past. | |
654 | */ | |
655 | cpu_base->expires_next = expires; | |
656 | tick_program_event(expires, 1); | |
657 | } | |
658 | ||
659 | /* | |
660 | * Initialize the high resolution related parts of cpu_base | |
661 | */ | |
662 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |
663 | { | |
664 | base->expires_next = KTIME_MAX; | |
665 | } | |
666 | ||
667 | /* | |
668 | * Retrigger next event is called after clock was set | |
669 | * | |
670 | * Called with interrupts disabled via on_each_cpu() | |
671 | */ | |
672 | static void retrigger_next_event(void *arg) | |
673 | { | |
674 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); | |
675 | ||
676 | if (!base->hres_active) | |
677 | return; | |
678 | ||
679 | raw_spin_lock(&base->lock); | |
680 | hrtimer_update_base(base); | |
681 | hrtimer_force_reprogram(base, 0); | |
682 | raw_spin_unlock(&base->lock); | |
683 | } | |
684 | ||
685 | /* | |
686 | * Switch to high resolution mode | |
687 | */ | |
688 | static void hrtimer_switch_to_hres(void) | |
689 | { | |
690 | struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); | |
691 | ||
692 | if (tick_init_highres()) { | |
693 | printk(KERN_WARNING "Could not switch to high resolution " | |
694 | "mode on CPU %d\n", base->cpu); | |
695 | return; | |
696 | } | |
697 | base->hres_active = 1; | |
698 | hrtimer_resolution = HIGH_RES_NSEC; | |
699 | ||
700 | tick_setup_sched_timer(); | |
701 | /* "Retrigger" the interrupt to get things going */ | |
702 | retrigger_next_event(NULL); | |
703 | } | |
704 | ||
705 | static void clock_was_set_work(struct work_struct *work) | |
706 | { | |
707 | clock_was_set(); | |
708 | } | |
709 | ||
710 | static DECLARE_WORK(hrtimer_work, clock_was_set_work); | |
711 | ||
712 | /* | |
713 | * Called from timekeeping and resume code to reprogram the hrtimer | |
714 | * interrupt device on all cpus. | |
715 | */ | |
716 | void clock_was_set_delayed(void) | |
717 | { | |
718 | schedule_work(&hrtimer_work); | |
719 | } | |
720 | ||
721 | #else | |
722 | ||
723 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | |
724 | static inline void hrtimer_switch_to_hres(void) { } | |
725 | static inline void | |
726 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | |
727 | static inline int hrtimer_reprogram(struct hrtimer *timer, | |
728 | struct hrtimer_clock_base *base) | |
729 | { | |
730 | return 0; | |
731 | } | |
732 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | |
733 | static inline void retrigger_next_event(void *arg) { } | |
734 | ||
735 | #endif /* CONFIG_HIGH_RES_TIMERS */ | |
736 | ||
737 | /* | |
738 | * Clock realtime was set | |
739 | * | |
740 | * Change the offset of the realtime clock vs. the monotonic | |
741 | * clock. | |
742 | * | |
743 | * We might have to reprogram the high resolution timer interrupt. On | |
744 | * SMP we call the architecture specific code to retrigger _all_ high | |
745 | * resolution timer interrupts. On UP we just disable interrupts and | |
746 | * call the high resolution interrupt code. | |
747 | */ | |
748 | void clock_was_set(void) | |
749 | { | |
750 | #ifdef CONFIG_HIGH_RES_TIMERS | |
751 | /* Retrigger the CPU local events everywhere */ | |
752 | on_each_cpu(retrigger_next_event, NULL, 1); | |
753 | #endif | |
754 | timerfd_clock_was_set(); | |
755 | } | |
756 | ||
757 | /* | |
758 | * During resume we might have to reprogram the high resolution timer | |
759 | * interrupt on all online CPUs. However, all other CPUs will be | |
760 | * stopped with IRQs interrupts disabled so the clock_was_set() call | |
761 | * must be deferred. | |
762 | */ | |
763 | void hrtimers_resume(void) | |
764 | { | |
765 | lockdep_assert_irqs_disabled(); | |
766 | /* Retrigger on the local CPU */ | |
767 | retrigger_next_event(NULL); | |
768 | /* And schedule a retrigger for all others */ | |
769 | clock_was_set_delayed(); | |
770 | } | |
771 | ||
772 | /* | |
773 | * Counterpart to lock_hrtimer_base above: | |
774 | */ | |
775 | static inline | |
776 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
777 | { | |
778 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | |
779 | } | |
780 | ||
781 | /** | |
782 | * hrtimer_forward - forward the timer expiry | |
783 | * @timer: hrtimer to forward | |
784 | * @now: forward past this time | |
785 | * @interval: the interval to forward | |
786 | * | |
787 | * Forward the timer expiry so it will expire in the future. | |
788 | * Returns the number of overruns. | |
789 | * | |
790 | * Can be safely called from the callback function of @timer. If | |
791 | * called from other contexts @timer must neither be enqueued nor | |
792 | * running the callback and the caller needs to take care of | |
793 | * serialization. | |
794 | * | |
795 | * Note: This only updates the timer expiry value and does not requeue | |
796 | * the timer. | |
797 | */ | |
798 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |
799 | { | |
800 | u64 orun = 1; | |
801 | ktime_t delta; | |
802 | ||
803 | delta = ktime_sub(now, hrtimer_get_expires(timer)); | |
804 | ||
805 | if (delta < 0) | |
806 | return 0; | |
807 | ||
808 | if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) | |
809 | return 0; | |
810 | ||
811 | if (interval < hrtimer_resolution) | |
812 | interval = hrtimer_resolution; | |
813 | ||
814 | if (unlikely(delta >= interval)) { | |
815 | s64 incr = ktime_to_ns(interval); | |
816 | ||
817 | orun = ktime_divns(delta, incr); | |
818 | hrtimer_add_expires_ns(timer, incr * orun); | |
819 | if (hrtimer_get_expires_tv64(timer) > now) | |
820 | return orun; | |
821 | /* | |
822 | * This (and the ktime_add() below) is the | |
823 | * correction for exact: | |
824 | */ | |
825 | orun++; | |
826 | } | |
827 | hrtimer_add_expires(timer, interval); | |
828 | ||
829 | return orun; | |
830 | } | |
831 | EXPORT_SYMBOL_GPL(hrtimer_forward); | |
832 | ||
833 | /* | |
834 | * enqueue_hrtimer - internal function to (re)start a timer | |
835 | * | |
836 | * The timer is inserted in expiry order. Insertion into the | |
837 | * red black tree is O(log(n)). Must hold the base lock. | |
838 | * | |
839 | * Returns 1 when the new timer is the leftmost timer in the tree. | |
840 | */ | |
841 | static int enqueue_hrtimer(struct hrtimer *timer, | |
842 | struct hrtimer_clock_base *base, | |
843 | enum hrtimer_mode mode) | |
844 | { | |
845 | debug_activate(timer, mode); | |
846 | ||
847 | base->cpu_base->active_bases |= 1 << base->index; | |
848 | ||
849 | timer->state = HRTIMER_STATE_ENQUEUED; | |
850 | ||
851 | return timerqueue_add(&base->active, &timer->node); | |
852 | } | |
853 | ||
854 | /* | |
855 | * __remove_hrtimer - internal function to remove a timer | |
856 | * | |
857 | * Caller must hold the base lock. | |
858 | * | |
859 | * High resolution timer mode reprograms the clock event device when the | |
860 | * timer is the one which expires next. The caller can disable this by setting | |
861 | * reprogram to zero. This is useful, when the context does a reprogramming | |
862 | * anyway (e.g. timer interrupt) | |
863 | */ | |
864 | static void __remove_hrtimer(struct hrtimer *timer, | |
865 | struct hrtimer_clock_base *base, | |
866 | u8 newstate, int reprogram) | |
867 | { | |
868 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; | |
869 | u8 state = timer->state; | |
870 | ||
871 | timer->state = newstate; | |
872 | if (!(state & HRTIMER_STATE_ENQUEUED)) | |
873 | return; | |
874 | ||
875 | if (!timerqueue_del(&base->active, &timer->node)) | |
876 | cpu_base->active_bases &= ~(1 << base->index); | |
877 | ||
878 | #ifdef CONFIG_HIGH_RES_TIMERS | |
879 | /* | |
880 | * Note: If reprogram is false we do not update | |
881 | * cpu_base->next_timer. This happens when we remove the first | |
882 | * timer on a remote cpu. No harm as we never dereference | |
883 | * cpu_base->next_timer. So the worst thing what can happen is | |
884 | * an superflous call to hrtimer_force_reprogram() on the | |
885 | * remote cpu later on if the same timer gets enqueued again. | |
886 | */ | |
887 | if (reprogram && timer == cpu_base->next_timer) | |
888 | hrtimer_force_reprogram(cpu_base, 1); | |
889 | #endif | |
890 | } | |
891 | ||
892 | /* | |
893 | * remove hrtimer, called with base lock held | |
894 | */ | |
895 | static inline int | |
896 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart) | |
897 | { | |
898 | if (hrtimer_is_queued(timer)) { | |
899 | u8 state = timer->state; | |
900 | int reprogram; | |
901 | ||
902 | /* | |
903 | * Remove the timer and force reprogramming when high | |
904 | * resolution mode is active and the timer is on the current | |
905 | * CPU. If we remove a timer on another CPU, reprogramming is | |
906 | * skipped. The interrupt event on this CPU is fired and | |
907 | * reprogramming happens in the interrupt handler. This is a | |
908 | * rare case and less expensive than a smp call. | |
909 | */ | |
910 | debug_deactivate(timer); | |
911 | reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); | |
912 | ||
913 | if (!restart) | |
914 | state = HRTIMER_STATE_INACTIVE; | |
915 | ||
916 | __remove_hrtimer(timer, base, state, reprogram); | |
917 | return 1; | |
918 | } | |
919 | return 0; | |
920 | } | |
921 | ||
922 | static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim, | |
923 | const enum hrtimer_mode mode) | |
924 | { | |
925 | #ifdef CONFIG_TIME_LOW_RES | |
926 | /* | |
927 | * CONFIG_TIME_LOW_RES indicates that the system has no way to return | |
928 | * granular time values. For relative timers we add hrtimer_resolution | |
929 | * (i.e. one jiffie) to prevent short timeouts. | |
930 | */ | |
931 | timer->is_rel = mode & HRTIMER_MODE_REL; | |
932 | if (timer->is_rel) | |
933 | tim = ktime_add_safe(tim, hrtimer_resolution); | |
934 | #endif | |
935 | return tim; | |
936 | } | |
937 | ||
938 | /** | |
939 | * hrtimer_start_range_ns - (re)start an hrtimer | |
940 | * @timer: the timer to be added | |
941 | * @tim: expiry time | |
942 | * @delta_ns: "slack" range for the timer | |
943 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or | |
944 | * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED) | |
945 | */ | |
946 | void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
947 | u64 delta_ns, const enum hrtimer_mode mode) | |
948 | { | |
949 | struct hrtimer_clock_base *base, *new_base; | |
950 | unsigned long flags; | |
951 | int leftmost; | |
952 | ||
953 | base = lock_hrtimer_base(timer, &flags); | |
954 | ||
955 | /* Remove an active timer from the queue: */ | |
956 | remove_hrtimer(timer, base, true); | |
957 | ||
958 | if (mode & HRTIMER_MODE_REL) | |
959 | tim = ktime_add_safe(tim, base->get_time()); | |
960 | ||
961 | tim = hrtimer_update_lowres(timer, tim, mode); | |
962 | ||
963 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); | |
964 | ||
965 | /* Switch the timer base, if necessary: */ | |
966 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); | |
967 | ||
968 | leftmost = enqueue_hrtimer(timer, new_base, mode); | |
969 | if (!leftmost) | |
970 | goto unlock; | |
971 | ||
972 | if (!hrtimer_is_hres_active(timer)) { | |
973 | /* | |
974 | * Kick to reschedule the next tick to handle the new timer | |
975 | * on dynticks target. | |
976 | */ | |
977 | if (is_timers_nohz_active()) | |
978 | wake_up_nohz_cpu(new_base->cpu_base->cpu); | |
979 | } else { | |
980 | hrtimer_reprogram(timer, new_base); | |
981 | } | |
982 | unlock: | |
983 | unlock_hrtimer_base(timer, &flags); | |
984 | } | |
985 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | |
986 | ||
987 | /** | |
988 | * hrtimer_try_to_cancel - try to deactivate a timer | |
989 | * @timer: hrtimer to stop | |
990 | * | |
991 | * Returns: | |
992 | * 0 when the timer was not active | |
993 | * 1 when the timer was active | |
994 | * -1 when the timer is currently executing the callback function and | |
995 | * cannot be stopped | |
996 | */ | |
997 | int hrtimer_try_to_cancel(struct hrtimer *timer) | |
998 | { | |
999 | struct hrtimer_clock_base *base; | |
1000 | unsigned long flags; | |
1001 | int ret = -1; | |
1002 | ||
1003 | /* | |
1004 | * Check lockless first. If the timer is not active (neither | |
1005 | * enqueued nor running the callback, nothing to do here. The | |
1006 | * base lock does not serialize against a concurrent enqueue, | |
1007 | * so we can avoid taking it. | |
1008 | */ | |
1009 | if (!hrtimer_active(timer)) | |
1010 | return 0; | |
1011 | ||
1012 | base = lock_hrtimer_base(timer, &flags); | |
1013 | ||
1014 | if (!hrtimer_callback_running(timer)) | |
1015 | ret = remove_hrtimer(timer, base, false); | |
1016 | ||
1017 | unlock_hrtimer_base(timer, &flags); | |
1018 | ||
1019 | return ret; | |
1020 | ||
1021 | } | |
1022 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); | |
1023 | ||
1024 | /** | |
1025 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | |
1026 | * @timer: the timer to be cancelled | |
1027 | * | |
1028 | * Returns: | |
1029 | * 0 when the timer was not active | |
1030 | * 1 when the timer was active | |
1031 | */ | |
1032 | int hrtimer_cancel(struct hrtimer *timer) | |
1033 | { | |
1034 | for (;;) { | |
1035 | int ret = hrtimer_try_to_cancel(timer); | |
1036 | ||
1037 | if (ret >= 0) | |
1038 | return ret; | |
1039 | cpu_relax(); | |
1040 | } | |
1041 | } | |
1042 | EXPORT_SYMBOL_GPL(hrtimer_cancel); | |
1043 | ||
1044 | /** | |
1045 | * hrtimer_get_remaining - get remaining time for the timer | |
1046 | * @timer: the timer to read | |
1047 | * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y | |
1048 | */ | |
1049 | ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust) | |
1050 | { | |
1051 | unsigned long flags; | |
1052 | ktime_t rem; | |
1053 | ||
1054 | lock_hrtimer_base(timer, &flags); | |
1055 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust) | |
1056 | rem = hrtimer_expires_remaining_adjusted(timer); | |
1057 | else | |
1058 | rem = hrtimer_expires_remaining(timer); | |
1059 | unlock_hrtimer_base(timer, &flags); | |
1060 | ||
1061 | return rem; | |
1062 | } | |
1063 | EXPORT_SYMBOL_GPL(__hrtimer_get_remaining); | |
1064 | ||
1065 | #ifdef CONFIG_NO_HZ_COMMON | |
1066 | /** | |
1067 | * hrtimer_get_next_event - get the time until next expiry event | |
1068 | * | |
1069 | * Returns the next expiry time or KTIME_MAX if no timer is pending. | |
1070 | */ | |
1071 | u64 hrtimer_get_next_event(void) | |
1072 | { | |
1073 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1074 | u64 expires = KTIME_MAX; | |
1075 | unsigned long flags; | |
1076 | ||
1077 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1078 | ||
1079 | if (!__hrtimer_hres_active(cpu_base)) | |
1080 | expires = __hrtimer_get_next_event(cpu_base); | |
1081 | ||
1082 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
1083 | ||
1084 | return expires; | |
1085 | } | |
1086 | #endif | |
1087 | ||
1088 | static inline int hrtimer_clockid_to_base(clockid_t clock_id) | |
1089 | { | |
1090 | if (likely(clock_id < MAX_CLOCKS)) { | |
1091 | int base = hrtimer_clock_to_base_table[clock_id]; | |
1092 | ||
1093 | if (likely(base != HRTIMER_MAX_CLOCK_BASES)) | |
1094 | return base; | |
1095 | } | |
1096 | WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); | |
1097 | return HRTIMER_BASE_MONOTONIC; | |
1098 | } | |
1099 | ||
1100 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1101 | enum hrtimer_mode mode) | |
1102 | { | |
1103 | struct hrtimer_cpu_base *cpu_base; | |
1104 | int base; | |
1105 | ||
1106 | memset(timer, 0, sizeof(struct hrtimer)); | |
1107 | ||
1108 | cpu_base = raw_cpu_ptr(&hrtimer_bases); | |
1109 | ||
1110 | /* | |
1111 | * POSIX magic: Relative CLOCK_REALTIME timers are not affected by | |
1112 | * clock modifications, so they needs to become CLOCK_MONOTONIC to | |
1113 | * ensure POSIX compliance. | |
1114 | */ | |
1115 | if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL) | |
1116 | clock_id = CLOCK_MONOTONIC; | |
1117 | ||
1118 | base = hrtimer_clockid_to_base(clock_id); | |
1119 | timer->base = &cpu_base->clock_base[base]; | |
1120 | timerqueue_init(&timer->node); | |
1121 | } | |
1122 | ||
1123 | /** | |
1124 | * hrtimer_init - initialize a timer to the given clock | |
1125 | * @timer: the timer to be initialized | |
1126 | * @clock_id: the clock to be used | |
1127 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or | |
1128 | * relative (HRTIMER_MODE_REL); pinned is not considered here! | |
1129 | */ | |
1130 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1131 | enum hrtimer_mode mode) | |
1132 | { | |
1133 | debug_init(timer, clock_id, mode); | |
1134 | __hrtimer_init(timer, clock_id, mode); | |
1135 | } | |
1136 | EXPORT_SYMBOL_GPL(hrtimer_init); | |
1137 | ||
1138 | /* | |
1139 | * A timer is active, when it is enqueued into the rbtree or the | |
1140 | * callback function is running or it's in the state of being migrated | |
1141 | * to another cpu. | |
1142 | * | |
1143 | * It is important for this function to not return a false negative. | |
1144 | */ | |
1145 | bool hrtimer_active(const struct hrtimer *timer) | |
1146 | { | |
1147 | struct hrtimer_clock_base *base; | |
1148 | unsigned int seq; | |
1149 | ||
1150 | do { | |
1151 | base = READ_ONCE(timer->base); | |
1152 | seq = raw_read_seqcount_begin(&base->seq); | |
1153 | ||
1154 | if (timer->state != HRTIMER_STATE_INACTIVE || | |
1155 | base->running == timer) | |
1156 | return true; | |
1157 | ||
1158 | } while (read_seqcount_retry(&base->seq, seq) || | |
1159 | base != READ_ONCE(timer->base)); | |
1160 | ||
1161 | return false; | |
1162 | } | |
1163 | EXPORT_SYMBOL_GPL(hrtimer_active); | |
1164 | ||
1165 | /* | |
1166 | * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3 | |
1167 | * distinct sections: | |
1168 | * | |
1169 | * - queued: the timer is queued | |
1170 | * - callback: the timer is being ran | |
1171 | * - post: the timer is inactive or (re)queued | |
1172 | * | |
1173 | * On the read side we ensure we observe timer->state and cpu_base->running | |
1174 | * from the same section, if anything changed while we looked at it, we retry. | |
1175 | * This includes timer->base changing because sequence numbers alone are | |
1176 | * insufficient for that. | |
1177 | * | |
1178 | * The sequence numbers are required because otherwise we could still observe | |
1179 | * a false negative if the read side got smeared over multiple consequtive | |
1180 | * __run_hrtimer() invocations. | |
1181 | */ | |
1182 | ||
1183 | static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, | |
1184 | struct hrtimer_clock_base *base, | |
1185 | struct hrtimer *timer, ktime_t *now) | |
1186 | { | |
1187 | enum hrtimer_restart (*fn)(struct hrtimer *); | |
1188 | int restart; | |
1189 | ||
1190 | lockdep_assert_held(&cpu_base->lock); | |
1191 | ||
1192 | debug_deactivate(timer); | |
1193 | base->running = timer; | |
1194 | ||
1195 | /* | |
1196 | * Separate the ->running assignment from the ->state assignment. | |
1197 | * | |
1198 | * As with a regular write barrier, this ensures the read side in | |
1199 | * hrtimer_active() cannot observe base->running == NULL && | |
1200 | * timer->state == INACTIVE. | |
1201 | */ | |
1202 | raw_write_seqcount_barrier(&base->seq); | |
1203 | ||
1204 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); | |
1205 | fn = timer->function; | |
1206 | ||
1207 | /* | |
1208 | * Clear the 'is relative' flag for the TIME_LOW_RES case. If the | |
1209 | * timer is restarted with a period then it becomes an absolute | |
1210 | * timer. If its not restarted it does not matter. | |
1211 | */ | |
1212 | if (IS_ENABLED(CONFIG_TIME_LOW_RES)) | |
1213 | timer->is_rel = false; | |
1214 | ||
1215 | /* | |
1216 | * The timer is marked as running in the CPU base, so it is | |
1217 | * protected against migration to a different CPU even if the lock | |
1218 | * is dropped. | |
1219 | */ | |
1220 | raw_spin_unlock(&cpu_base->lock); | |
1221 | trace_hrtimer_expire_entry(timer, now); | |
1222 | restart = fn(timer); | |
1223 | trace_hrtimer_expire_exit(timer); | |
1224 | raw_spin_lock(&cpu_base->lock); | |
1225 | ||
1226 | /* | |
1227 | * Note: We clear the running state after enqueue_hrtimer and | |
1228 | * we do not reprogram the event hardware. Happens either in | |
1229 | * hrtimer_start_range_ns() or in hrtimer_interrupt() | |
1230 | * | |
1231 | * Note: Because we dropped the cpu_base->lock above, | |
1232 | * hrtimer_start_range_ns() can have popped in and enqueued the timer | |
1233 | * for us already. | |
1234 | */ | |
1235 | if (restart != HRTIMER_NORESTART && | |
1236 | !(timer->state & HRTIMER_STATE_ENQUEUED)) | |
1237 | enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS); | |
1238 | ||
1239 | /* | |
1240 | * Separate the ->running assignment from the ->state assignment. | |
1241 | * | |
1242 | * As with a regular write barrier, this ensures the read side in | |
1243 | * hrtimer_active() cannot observe base->running.timer == NULL && | |
1244 | * timer->state == INACTIVE. | |
1245 | */ | |
1246 | raw_write_seqcount_barrier(&base->seq); | |
1247 | ||
1248 | WARN_ON_ONCE(base->running != timer); | |
1249 | base->running = NULL; | |
1250 | } | |
1251 | ||
1252 | static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) | |
1253 | { | |
1254 | struct hrtimer_clock_base *base; | |
1255 | unsigned int active = cpu_base->active_bases; | |
1256 | ||
1257 | for_each_active_base(base, cpu_base, active) { | |
1258 | struct timerqueue_node *node; | |
1259 | ktime_t basenow; | |
1260 | ||
1261 | basenow = ktime_add(now, base->offset); | |
1262 | ||
1263 | while ((node = timerqueue_getnext(&base->active))) { | |
1264 | struct hrtimer *timer; | |
1265 | ||
1266 | timer = container_of(node, struct hrtimer, node); | |
1267 | ||
1268 | /* | |
1269 | * The immediate goal for using the softexpires is | |
1270 | * minimizing wakeups, not running timers at the | |
1271 | * earliest interrupt after their soft expiration. | |
1272 | * This allows us to avoid using a Priority Search | |
1273 | * Tree, which can answer a stabbing querry for | |
1274 | * overlapping intervals and instead use the simple | |
1275 | * BST we already have. | |
1276 | * We don't add extra wakeups by delaying timers that | |
1277 | * are right-of a not yet expired timer, because that | |
1278 | * timer will have to trigger a wakeup anyway. | |
1279 | */ | |
1280 | if (basenow < hrtimer_get_softexpires_tv64(timer)) | |
1281 | break; | |
1282 | ||
1283 | __run_hrtimer(cpu_base, base, timer, &basenow); | |
1284 | } | |
1285 | } | |
1286 | } | |
1287 | ||
1288 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1289 | ||
1290 | /* | |
1291 | * High resolution timer interrupt | |
1292 | * Called with interrupts disabled | |
1293 | */ | |
1294 | void hrtimer_interrupt(struct clock_event_device *dev) | |
1295 | { | |
1296 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1297 | ktime_t expires_next, now, entry_time, delta; | |
1298 | int retries = 0; | |
1299 | ||
1300 | BUG_ON(!cpu_base->hres_active); | |
1301 | cpu_base->nr_events++; | |
1302 | dev->next_event = KTIME_MAX; | |
1303 | ||
1304 | raw_spin_lock(&cpu_base->lock); | |
1305 | entry_time = now = hrtimer_update_base(cpu_base); | |
1306 | retry: | |
1307 | cpu_base->in_hrtirq = 1; | |
1308 | /* | |
1309 | * We set expires_next to KTIME_MAX here with cpu_base->lock | |
1310 | * held to prevent that a timer is enqueued in our queue via | |
1311 | * the migration code. This does not affect enqueueing of | |
1312 | * timers which run their callback and need to be requeued on | |
1313 | * this CPU. | |
1314 | */ | |
1315 | cpu_base->expires_next = KTIME_MAX; | |
1316 | ||
1317 | __hrtimer_run_queues(cpu_base, now); | |
1318 | ||
1319 | /* Reevaluate the clock bases for the next expiry */ | |
1320 | expires_next = __hrtimer_get_next_event(cpu_base); | |
1321 | /* | |
1322 | * Store the new expiry value so the migration code can verify | |
1323 | * against it. | |
1324 | */ | |
1325 | cpu_base->expires_next = expires_next; | |
1326 | cpu_base->in_hrtirq = 0; | |
1327 | raw_spin_unlock(&cpu_base->lock); | |
1328 | ||
1329 | /* Reprogramming necessary ? */ | |
1330 | if (!tick_program_event(expires_next, 0)) { | |
1331 | cpu_base->hang_detected = 0; | |
1332 | return; | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * The next timer was already expired due to: | |
1337 | * - tracing | |
1338 | * - long lasting callbacks | |
1339 | * - being scheduled away when running in a VM | |
1340 | * | |
1341 | * We need to prevent that we loop forever in the hrtimer | |
1342 | * interrupt routine. We give it 3 attempts to avoid | |
1343 | * overreacting on some spurious event. | |
1344 | * | |
1345 | * Acquire base lock for updating the offsets and retrieving | |
1346 | * the current time. | |
1347 | */ | |
1348 | raw_spin_lock(&cpu_base->lock); | |
1349 | now = hrtimer_update_base(cpu_base); | |
1350 | cpu_base->nr_retries++; | |
1351 | if (++retries < 3) | |
1352 | goto retry; | |
1353 | /* | |
1354 | * Give the system a chance to do something else than looping | |
1355 | * here. We stored the entry time, so we know exactly how long | |
1356 | * we spent here. We schedule the next event this amount of | |
1357 | * time away. | |
1358 | */ | |
1359 | cpu_base->nr_hangs++; | |
1360 | cpu_base->hang_detected = 1; | |
1361 | raw_spin_unlock(&cpu_base->lock); | |
1362 | delta = ktime_sub(now, entry_time); | |
1363 | if ((unsigned int)delta > cpu_base->max_hang_time) | |
1364 | cpu_base->max_hang_time = (unsigned int) delta; | |
1365 | /* | |
1366 | * Limit it to a sensible value as we enforce a longer | |
1367 | * delay. Give the CPU at least 100ms to catch up. | |
1368 | */ | |
1369 | if (delta > 100 * NSEC_PER_MSEC) | |
1370 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | |
1371 | else | |
1372 | expires_next = ktime_add(now, delta); | |
1373 | tick_program_event(expires_next, 1); | |
1374 | printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", | |
1375 | ktime_to_ns(delta)); | |
1376 | } | |
1377 | ||
1378 | /* called with interrupts disabled */ | |
1379 | static inline void __hrtimer_peek_ahead_timers(void) | |
1380 | { | |
1381 | struct tick_device *td; | |
1382 | ||
1383 | if (!hrtimer_hres_active()) | |
1384 | return; | |
1385 | ||
1386 | td = this_cpu_ptr(&tick_cpu_device); | |
1387 | if (td && td->evtdev) | |
1388 | hrtimer_interrupt(td->evtdev); | |
1389 | } | |
1390 | ||
1391 | #else /* CONFIG_HIGH_RES_TIMERS */ | |
1392 | ||
1393 | static inline void __hrtimer_peek_ahead_timers(void) { } | |
1394 | ||
1395 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | |
1396 | ||
1397 | /* | |
1398 | * Called from run_local_timers in hardirq context every jiffy | |
1399 | */ | |
1400 | void hrtimer_run_queues(void) | |
1401 | { | |
1402 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | |
1403 | ktime_t now; | |
1404 | ||
1405 | if (__hrtimer_hres_active(cpu_base)) | |
1406 | return; | |
1407 | ||
1408 | /* | |
1409 | * This _is_ ugly: We have to check periodically, whether we | |
1410 | * can switch to highres and / or nohz mode. The clocksource | |
1411 | * switch happens with xtime_lock held. Notification from | |
1412 | * there only sets the check bit in the tick_oneshot code, | |
1413 | * otherwise we might deadlock vs. xtime_lock. | |
1414 | */ | |
1415 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { | |
1416 | hrtimer_switch_to_hres(); | |
1417 | return; | |
1418 | } | |
1419 | ||
1420 | raw_spin_lock(&cpu_base->lock); | |
1421 | now = hrtimer_update_base(cpu_base); | |
1422 | __hrtimer_run_queues(cpu_base, now); | |
1423 | raw_spin_unlock(&cpu_base->lock); | |
1424 | } | |
1425 | ||
1426 | /* | |
1427 | * Sleep related functions: | |
1428 | */ | |
1429 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | |
1430 | { | |
1431 | struct hrtimer_sleeper *t = | |
1432 | container_of(timer, struct hrtimer_sleeper, timer); | |
1433 | struct task_struct *task = t->task; | |
1434 | ||
1435 | t->task = NULL; | |
1436 | if (task) | |
1437 | wake_up_process(task); | |
1438 | ||
1439 | return HRTIMER_NORESTART; | |
1440 | } | |
1441 | ||
1442 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |
1443 | { | |
1444 | sl->timer.function = hrtimer_wakeup; | |
1445 | sl->task = task; | |
1446 | } | |
1447 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); | |
1448 | ||
1449 | int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) | |
1450 | { | |
1451 | switch(restart->nanosleep.type) { | |
1452 | #ifdef CONFIG_COMPAT | |
1453 | case TT_COMPAT: | |
1454 | if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp)) | |
1455 | return -EFAULT; | |
1456 | break; | |
1457 | #endif | |
1458 | case TT_NATIVE: | |
1459 | if (put_timespec64(ts, restart->nanosleep.rmtp)) | |
1460 | return -EFAULT; | |
1461 | break; | |
1462 | default: | |
1463 | BUG(); | |
1464 | } | |
1465 | return -ERESTART_RESTARTBLOCK; | |
1466 | } | |
1467 | ||
1468 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | |
1469 | { | |
1470 | struct restart_block *restart; | |
1471 | ||
1472 | hrtimer_init_sleeper(t, current); | |
1473 | ||
1474 | do { | |
1475 | set_current_state(TASK_INTERRUPTIBLE); | |
1476 | hrtimer_start_expires(&t->timer, mode); | |
1477 | ||
1478 | if (likely(t->task)) | |
1479 | freezable_schedule(); | |
1480 | ||
1481 | hrtimer_cancel(&t->timer); | |
1482 | mode = HRTIMER_MODE_ABS; | |
1483 | ||
1484 | } while (t->task && !signal_pending(current)); | |
1485 | ||
1486 | __set_current_state(TASK_RUNNING); | |
1487 | ||
1488 | if (!t->task) | |
1489 | return 0; | |
1490 | ||
1491 | restart = ¤t->restart_block; | |
1492 | if (restart->nanosleep.type != TT_NONE) { | |
1493 | ktime_t rem = hrtimer_expires_remaining(&t->timer); | |
1494 | struct timespec64 rmt; | |
1495 | ||
1496 | if (rem <= 0) | |
1497 | return 0; | |
1498 | rmt = ktime_to_timespec64(rem); | |
1499 | ||
1500 | return nanosleep_copyout(restart, &rmt); | |
1501 | } | |
1502 | return -ERESTART_RESTARTBLOCK; | |
1503 | } | |
1504 | ||
1505 | static long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |
1506 | { | |
1507 | struct hrtimer_sleeper t; | |
1508 | int ret; | |
1509 | ||
1510 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, | |
1511 | HRTIMER_MODE_ABS); | |
1512 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | |
1513 | ||
1514 | ret = do_nanosleep(&t, HRTIMER_MODE_ABS); | |
1515 | destroy_hrtimer_on_stack(&t.timer); | |
1516 | return ret; | |
1517 | } | |
1518 | ||
1519 | long hrtimer_nanosleep(const struct timespec64 *rqtp, | |
1520 | const enum hrtimer_mode mode, const clockid_t clockid) | |
1521 | { | |
1522 | struct restart_block *restart; | |
1523 | struct hrtimer_sleeper t; | |
1524 | int ret = 0; | |
1525 | u64 slack; | |
1526 | ||
1527 | slack = current->timer_slack_ns; | |
1528 | if (dl_task(current) || rt_task(current)) | |
1529 | slack = 0; | |
1530 | ||
1531 | hrtimer_init_on_stack(&t.timer, clockid, mode); | |
1532 | hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); | |
1533 | ret = do_nanosleep(&t, mode); | |
1534 | if (ret != -ERESTART_RESTARTBLOCK) | |
1535 | goto out; | |
1536 | ||
1537 | /* Absolute timers do not update the rmtp value and restart: */ | |
1538 | if (mode == HRTIMER_MODE_ABS) { | |
1539 | ret = -ERESTARTNOHAND; | |
1540 | goto out; | |
1541 | } | |
1542 | ||
1543 | restart = ¤t->restart_block; | |
1544 | restart->fn = hrtimer_nanosleep_restart; | |
1545 | restart->nanosleep.clockid = t.timer.base->clockid; | |
1546 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | |
1547 | out: | |
1548 | destroy_hrtimer_on_stack(&t.timer); | |
1549 | return ret; | |
1550 | } | |
1551 | ||
1552 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | |
1553 | struct timespec __user *, rmtp) | |
1554 | { | |
1555 | struct timespec64 tu; | |
1556 | ||
1557 | if (get_timespec64(&tu, rqtp)) | |
1558 | return -EFAULT; | |
1559 | ||
1560 | if (!timespec64_valid(&tu)) | |
1561 | return -EINVAL; | |
1562 | ||
1563 | current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; | |
1564 | current->restart_block.nanosleep.rmtp = rmtp; | |
1565 | return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); | |
1566 | } | |
1567 | ||
1568 | #ifdef CONFIG_COMPAT | |
1569 | ||
1570 | COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |
1571 | struct compat_timespec __user *, rmtp) | |
1572 | { | |
1573 | struct timespec64 tu; | |
1574 | ||
1575 | if (compat_get_timespec64(&tu, rqtp)) | |
1576 | return -EFAULT; | |
1577 | ||
1578 | if (!timespec64_valid(&tu)) | |
1579 | return -EINVAL; | |
1580 | ||
1581 | current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; | |
1582 | current->restart_block.nanosleep.compat_rmtp = rmtp; | |
1583 | return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); | |
1584 | } | |
1585 | #endif | |
1586 | ||
1587 | /* | |
1588 | * Functions related to boot-time initialization: | |
1589 | */ | |
1590 | int hrtimers_prepare_cpu(unsigned int cpu) | |
1591 | { | |
1592 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | |
1593 | int i; | |
1594 | ||
1595 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | |
1596 | cpu_base->clock_base[i].cpu_base = cpu_base; | |
1597 | timerqueue_init_head(&cpu_base->clock_base[i].active); | |
1598 | } | |
1599 | ||
1600 | cpu_base->cpu = cpu; | |
1601 | cpu_base->hres_active = 0; | |
1602 | hrtimer_init_hres(cpu_base); | |
1603 | return 0; | |
1604 | } | |
1605 | ||
1606 | #ifdef CONFIG_HOTPLUG_CPU | |
1607 | ||
1608 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |
1609 | struct hrtimer_clock_base *new_base) | |
1610 | { | |
1611 | struct hrtimer *timer; | |
1612 | struct timerqueue_node *node; | |
1613 | ||
1614 | while ((node = timerqueue_getnext(&old_base->active))) { | |
1615 | timer = container_of(node, struct hrtimer, node); | |
1616 | BUG_ON(hrtimer_callback_running(timer)); | |
1617 | debug_deactivate(timer); | |
1618 | ||
1619 | /* | |
1620 | * Mark it as ENQUEUED not INACTIVE otherwise the | |
1621 | * timer could be seen as !active and just vanish away | |
1622 | * under us on another CPU | |
1623 | */ | |
1624 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0); | |
1625 | timer->base = new_base; | |
1626 | /* | |
1627 | * Enqueue the timers on the new cpu. This does not | |
1628 | * reprogram the event device in case the timer | |
1629 | * expires before the earliest on this CPU, but we run | |
1630 | * hrtimer_interrupt after we migrated everything to | |
1631 | * sort out already expired timers and reprogram the | |
1632 | * event device. | |
1633 | */ | |
1634 | enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS); | |
1635 | } | |
1636 | } | |
1637 | ||
1638 | int hrtimers_dead_cpu(unsigned int scpu) | |
1639 | { | |
1640 | struct hrtimer_cpu_base *old_base, *new_base; | |
1641 | int i; | |
1642 | ||
1643 | BUG_ON(cpu_online(scpu)); | |
1644 | tick_cancel_sched_timer(scpu); | |
1645 | ||
1646 | local_irq_disable(); | |
1647 | old_base = &per_cpu(hrtimer_bases, scpu); | |
1648 | new_base = this_cpu_ptr(&hrtimer_bases); | |
1649 | /* | |
1650 | * The caller is globally serialized and nobody else | |
1651 | * takes two locks at once, deadlock is not possible. | |
1652 | */ | |
1653 | raw_spin_lock(&new_base->lock); | |
1654 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | |
1655 | ||
1656 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | |
1657 | migrate_hrtimer_list(&old_base->clock_base[i], | |
1658 | &new_base->clock_base[i]); | |
1659 | } | |
1660 | ||
1661 | raw_spin_unlock(&old_base->lock); | |
1662 | raw_spin_unlock(&new_base->lock); | |
1663 | ||
1664 | /* Check, if we got expired work to do */ | |
1665 | __hrtimer_peek_ahead_timers(); | |
1666 | local_irq_enable(); | |
1667 | return 0; | |
1668 | } | |
1669 | ||
1670 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1671 | ||
1672 | void __init hrtimers_init(void) | |
1673 | { | |
1674 | hrtimers_prepare_cpu(smp_processor_id()); | |
1675 | } | |
1676 | ||
1677 | /** | |
1678 | * schedule_hrtimeout_range_clock - sleep until timeout | |
1679 | * @expires: timeout value (ktime_t) | |
1680 | * @delta: slack in expires timeout (ktime_t) | |
1681 | * @mode: timer mode | |
1682 | * @clock_id: timer clock to be used | |
1683 | */ | |
1684 | int __sched | |
1685 | schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, | |
1686 | const enum hrtimer_mode mode, clockid_t clock_id) | |
1687 | { | |
1688 | struct hrtimer_sleeper t; | |
1689 | ||
1690 | /* | |
1691 | * Optimize when a zero timeout value is given. It does not | |
1692 | * matter whether this is an absolute or a relative time. | |
1693 | */ | |
1694 | if (expires && *expires == 0) { | |
1695 | __set_current_state(TASK_RUNNING); | |
1696 | return 0; | |
1697 | } | |
1698 | ||
1699 | /* | |
1700 | * A NULL parameter means "infinite" | |
1701 | */ | |
1702 | if (!expires) { | |
1703 | schedule(); | |
1704 | return -EINTR; | |
1705 | } | |
1706 | ||
1707 | hrtimer_init_on_stack(&t.timer, clock_id, mode); | |
1708 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); | |
1709 | ||
1710 | hrtimer_init_sleeper(&t, current); | |
1711 | ||
1712 | hrtimer_start_expires(&t.timer, mode); | |
1713 | ||
1714 | if (likely(t.task)) | |
1715 | schedule(); | |
1716 | ||
1717 | hrtimer_cancel(&t.timer); | |
1718 | destroy_hrtimer_on_stack(&t.timer); | |
1719 | ||
1720 | __set_current_state(TASK_RUNNING); | |
1721 | ||
1722 | return !t.task ? 0 : -EINTR; | |
1723 | } | |
1724 | ||
1725 | /** | |
1726 | * schedule_hrtimeout_range - sleep until timeout | |
1727 | * @expires: timeout value (ktime_t) | |
1728 | * @delta: slack in expires timeout (ktime_t) | |
1729 | * @mode: timer mode | |
1730 | * | |
1731 | * Make the current task sleep until the given expiry time has | |
1732 | * elapsed. The routine will return immediately unless | |
1733 | * the current task state has been set (see set_current_state()). | |
1734 | * | |
1735 | * The @delta argument gives the kernel the freedom to schedule the | |
1736 | * actual wakeup to a time that is both power and performance friendly. | |
1737 | * The kernel give the normal best effort behavior for "@expires+@delta", | |
1738 | * but may decide to fire the timer earlier, but no earlier than @expires. | |
1739 | * | |
1740 | * You can set the task state as follows - | |
1741 | * | |
1742 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
1743 | * pass before the routine returns unless the current task is explicitly | |
1744 | * woken up, (e.g. by wake_up_process()). | |
1745 | * | |
1746 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1747 | * delivered to the current task or the current task is explicitly woken | |
1748 | * up. | |
1749 | * | |
1750 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1751 | * routine returns. | |
1752 | * | |
1753 | * Returns 0 when the timer has expired. If the task was woken before the | |
1754 | * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or | |
1755 | * by an explicit wakeup, it returns -EINTR. | |
1756 | */ | |
1757 | int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta, | |
1758 | const enum hrtimer_mode mode) | |
1759 | { | |
1760 | return schedule_hrtimeout_range_clock(expires, delta, mode, | |
1761 | CLOCK_MONOTONIC); | |
1762 | } | |
1763 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | |
1764 | ||
1765 | /** | |
1766 | * schedule_hrtimeout - sleep until timeout | |
1767 | * @expires: timeout value (ktime_t) | |
1768 | * @mode: timer mode | |
1769 | * | |
1770 | * Make the current task sleep until the given expiry time has | |
1771 | * elapsed. The routine will return immediately unless | |
1772 | * the current task state has been set (see set_current_state()). | |
1773 | * | |
1774 | * You can set the task state as follows - | |
1775 | * | |
1776 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
1777 | * pass before the routine returns unless the current task is explicitly | |
1778 | * woken up, (e.g. by wake_up_process()). | |
1779 | * | |
1780 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1781 | * delivered to the current task or the current task is explicitly woken | |
1782 | * up. | |
1783 | * | |
1784 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1785 | * routine returns. | |
1786 | * | |
1787 | * Returns 0 when the timer has expired. If the task was woken before the | |
1788 | * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or | |
1789 | * by an explicit wakeup, it returns -EINTR. | |
1790 | */ | |
1791 | int __sched schedule_hrtimeout(ktime_t *expires, | |
1792 | const enum hrtimer_mode mode) | |
1793 | { | |
1794 | return schedule_hrtimeout_range(expires, 0, mode); | |
1795 | } | |
1796 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |