]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3e51f33f | 2 | /* |
97fb7a0a | 3 | * sched_clock() for unstable CPU clocks |
3e51f33f | 4 | * |
90eec103 | 5 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra |
3e51f33f | 6 | * |
c300ba25 SR |
7 | * Updates and enhancements: |
8 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
9 | * | |
3e51f33f PZ |
10 | * Based on code by: |
11 | * Ingo Molnar <mingo@redhat.com> | |
12 | * Guillaume Chazarain <guichaz@gmail.com> | |
13 | * | |
c676329a | 14 | * |
97fb7a0a | 15 | * What this file implements: |
c676329a PZ |
16 | * |
17 | * cpu_clock(i) provides a fast (execution time) high resolution | |
18 | * clock with bounded drift between CPUs. The value of cpu_clock(i) | |
19 | * is monotonic for constant i. The timestamp returned is in nanoseconds. | |
20 | * | |
21 | * ######################### BIG FAT WARNING ########################## | |
22 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | |
23 | * # go backwards !! # | |
24 | * #################################################################### | |
25 | * | |
26 | * There is no strict promise about the base, although it tends to start | |
27 | * at 0 on boot (but people really shouldn't rely on that). | |
28 | * | |
29 | * cpu_clock(i) -- can be used from any context, including NMI. | |
97fb7a0a | 30 | * local_clock() -- is cpu_clock() on the current CPU. |
c676329a | 31 | * |
ef08f0ff PZ |
32 | * sched_clock_cpu(i) |
33 | * | |
97fb7a0a | 34 | * How it is implemented: |
c676329a PZ |
35 | * |
36 | * The implementation either uses sched_clock() when | |
37 | * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the | |
38 | * sched_clock() is assumed to provide these properties (mostly it means | |
39 | * the architecture provides a globally synchronized highres time source). | |
40 | * | |
41 | * Otherwise it tries to create a semi stable clock from a mixture of other | |
42 | * clocks, including: | |
43 | * | |
3b03706f | 44 | * - GTOD (clock monotonic) |
3e51f33f PZ |
45 | * - sched_clock() |
46 | * - explicit idle events | |
47 | * | |
c676329a PZ |
48 | * We use GTOD as base and use sched_clock() deltas to improve resolution. The |
49 | * deltas are filtered to provide monotonicity and keeping it within an | |
50 | * expected window. | |
3e51f33f PZ |
51 | * |
52 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
53 | * that is otherwise invisible (TSC gets stopped). | |
54 | * | |
3e51f33f | 55 | */ |
325ea10c | 56 | #include "sched.h" |
5d2a4e91 | 57 | #include <linux/sched_clock.h> |
3e51f33f | 58 | |
2c3d103b HD |
59 | /* |
60 | * Scheduler clock - returns current time in nanosec units. | |
61 | * This is default implementation. | |
62 | * Architectures and sub-architectures can override this. | |
63 | */ | |
52f5684c | 64 | unsigned long long __weak sched_clock(void) |
2c3d103b | 65 | { |
92d23f70 R |
66 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
67 | * (NSEC_PER_SEC / HZ); | |
2c3d103b | 68 | } |
b6ac23af | 69 | EXPORT_SYMBOL_GPL(sched_clock); |
3e51f33f | 70 | |
46457ea4 | 71 | static DEFINE_STATIC_KEY_FALSE(sched_clock_running); |
c1955a3d | 72 | |
3e51f33f | 73 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
acb04058 PZ |
74 | /* |
75 | * We must start with !__sched_clock_stable because the unstable -> stable | |
76 | * transition is accurate, while the stable -> unstable transition is not. | |
77 | * | |
78 | * Similarly we start with __sched_clock_stable_early, thereby assuming we | |
79 | * will become stable, such that there's only a single 1 -> 0 transition. | |
80 | */ | |
555570d7 | 81 | static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); |
acb04058 | 82 | static int __sched_clock_stable_early = 1; |
35af99e6 | 83 | |
5680d809 | 84 | /* |
698eff63 | 85 | * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset |
5680d809 | 86 | */ |
698eff63 PZ |
87 | __read_mostly u64 __sched_clock_offset; |
88 | static __read_mostly u64 __gtod_offset; | |
5680d809 PZ |
89 | |
90 | struct sched_clock_data { | |
91 | u64 tick_raw; | |
92 | u64 tick_gtod; | |
93 | u64 clock; | |
94 | }; | |
95 | ||
96 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
97 | ||
98 | static inline struct sched_clock_data *this_scd(void) | |
99 | { | |
100 | return this_cpu_ptr(&sched_clock_data); | |
101 | } | |
102 | ||
103 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
104 | { | |
105 | return &per_cpu(sched_clock_data, cpu); | |
106 | } | |
107 | ||
35af99e6 PZ |
108 | int sched_clock_stable(void) |
109 | { | |
555570d7 | 110 | return static_branch_likely(&__sched_clock_stable); |
35af99e6 PZ |
111 | } |
112 | ||
cf15ca8d PZ |
113 | static void __scd_stamp(struct sched_clock_data *scd) |
114 | { | |
115 | scd->tick_gtod = ktime_get_ns(); | |
116 | scd->tick_raw = sched_clock(); | |
117 | } | |
118 | ||
d375b4e0 | 119 | static void __set_sched_clock_stable(void) |
35af99e6 | 120 | { |
45aea321 | 121 | struct sched_clock_data *scd; |
5680d809 | 122 | |
45aea321 PZ |
123 | /* |
124 | * Since we're still unstable and the tick is already running, we have | |
125 | * to disable IRQs in order to get a consistent scd->tick* reading. | |
126 | */ | |
127 | local_irq_disable(); | |
128 | scd = this_scd(); | |
5680d809 PZ |
129 | /* |
130 | * Attempt to make the (initial) unstable->stable transition continuous. | |
131 | */ | |
698eff63 | 132 | __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); |
45aea321 | 133 | local_irq_enable(); |
5680d809 PZ |
134 | |
135 | printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", | |
698eff63 PZ |
136 | scd->tick_gtod, __gtod_offset, |
137 | scd->tick_raw, __sched_clock_offset); | |
5680d809 | 138 | |
555570d7 | 139 | static_branch_enable(&__sched_clock_stable); |
4f49b90a | 140 | tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); |
d375b4e0 PZ |
141 | } |
142 | ||
cf15ca8d PZ |
143 | /* |
144 | * If we ever get here, we're screwed, because we found out -- typically after | |
145 | * the fact -- that TSC wasn't good. This means all our clocksources (including | |
146 | * ktime) could have reported wrong values. | |
147 | * | |
148 | * What we do here is an attempt to fix up and continue sort of where we left | |
149 | * off in a coherent manner. | |
150 | * | |
151 | * The only way to fully avoid random clock jumps is to boot with: | |
152 | * "tsc=unstable". | |
153 | */ | |
71fdb70e PZ |
154 | static void __sched_clock_work(struct work_struct *work) |
155 | { | |
cf15ca8d PZ |
156 | struct sched_clock_data *scd; |
157 | int cpu; | |
158 | ||
159 | /* take a current timestamp and set 'now' */ | |
160 | preempt_disable(); | |
161 | scd = this_scd(); | |
162 | __scd_stamp(scd); | |
163 | scd->clock = scd->tick_gtod + __gtod_offset; | |
164 | preempt_enable(); | |
165 | ||
166 | /* clone to all CPUs */ | |
167 | for_each_possible_cpu(cpu) | |
168 | per_cpu(sched_clock_data, cpu) = *scd; | |
169 | ||
7708d5f0 | 170 | printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n"); |
cf15ca8d PZ |
171 | printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", |
172 | scd->tick_gtod, __gtod_offset, | |
173 | scd->tick_raw, __sched_clock_offset); | |
174 | ||
71fdb70e PZ |
175 | static_branch_disable(&__sched_clock_stable); |
176 | } | |
177 | ||
178 | static DECLARE_WORK(sched_clock_work, __sched_clock_work); | |
179 | ||
180 | static void __clear_sched_clock_stable(void) | |
35af99e6 | 181 | { |
cf15ca8d PZ |
182 | if (!sched_clock_stable()) |
183 | return; | |
5680d809 | 184 | |
4f49b90a | 185 | tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); |
cf15ca8d | 186 | schedule_work(&sched_clock_work); |
71fdb70e | 187 | } |
6577e42a PZ |
188 | |
189 | void clear_sched_clock_stable(void) | |
190 | { | |
d375b4e0 PZ |
191 | __sched_clock_stable_early = 0; |
192 | ||
9881b024 | 193 | smp_mb(); /* matches sched_clock_init_late() */ |
d375b4e0 | 194 | |
46457ea4 | 195 | if (static_key_count(&sched_clock_running.key) == 2) |
71fdb70e | 196 | __clear_sched_clock_stable(); |
6577e42a PZ |
197 | } |
198 | ||
5d2a4e91 PT |
199 | static void __sched_clock_gtod_offset(void) |
200 | { | |
9407f5a7 PZ |
201 | struct sched_clock_data *scd = this_scd(); |
202 | ||
203 | __scd_stamp(scd); | |
204 | __gtod_offset = (scd->tick_raw + __sched_clock_offset) - scd->tick_gtod; | |
5d2a4e91 PT |
205 | } |
206 | ||
207 | void __init sched_clock_init(void) | |
208 | { | |
857baa87 PT |
209 | /* |
210 | * Set __gtod_offset such that once we mark sched_clock_running, | |
211 | * sched_clock_tick() continues where sched_clock() left off. | |
212 | * | |
213 | * Even if TSC is buggered, we're still UP at this point so it | |
214 | * can't really be out of sync. | |
215 | */ | |
9407f5a7 | 216 | local_irq_disable(); |
857baa87 | 217 | __sched_clock_gtod_offset(); |
9407f5a7 | 218 | local_irq_enable(); |
857baa87 | 219 | |
46457ea4 | 220 | static_branch_inc(&sched_clock_running); |
5d2a4e91 | 221 | } |
2e44b7dd PZ |
222 | /* |
223 | * We run this as late_initcall() such that it runs after all built-in drivers, | |
224 | * notably: acpi_processor and intel_idle, which can mark the TSC as unstable. | |
225 | */ | |
226 | static int __init sched_clock_init_late(void) | |
3e51f33f | 227 | { |
46457ea4 | 228 | static_branch_inc(&sched_clock_running); |
d375b4e0 PZ |
229 | /* |
230 | * Ensure that it is impossible to not do a static_key update. | |
231 | * | |
232 | * Either {set,clear}_sched_clock_stable() must see sched_clock_running | |
233 | * and do the update, or we must see their __sched_clock_stable_early | |
234 | * and do the update, or both. | |
235 | */ | |
236 | smp_mb(); /* matches {set,clear}_sched_clock_stable() */ | |
237 | ||
238 | if (__sched_clock_stable_early) | |
239 | __set_sched_clock_stable(); | |
2e44b7dd PZ |
240 | |
241 | return 0; | |
3e51f33f | 242 | } |
2e44b7dd | 243 | late_initcall(sched_clock_init_late); |
3e51f33f | 244 | |
354879bb | 245 | /* |
b342501c | 246 | * min, max except they take wrapping into account |
354879bb PZ |
247 | */ |
248 | ||
249 | static inline u64 wrap_min(u64 x, u64 y) | |
250 | { | |
251 | return (s64)(x - y) < 0 ? x : y; | |
252 | } | |
253 | ||
254 | static inline u64 wrap_max(u64 x, u64 y) | |
255 | { | |
256 | return (s64)(x - y) > 0 ? x : y; | |
257 | } | |
258 | ||
3e51f33f PZ |
259 | /* |
260 | * update the percpu scd from the raw @now value | |
261 | * | |
262 | * - filter out backward motion | |
354879bb | 263 | * - use the GTOD tick value to create a window to filter crazy TSC values |
3e51f33f | 264 | */ |
def0a9b2 | 265 | static u64 sched_clock_local(struct sched_clock_data *scd) |
3e51f33f | 266 | { |
7b09cc5a | 267 | u64 now, clock, old_clock, min_clock, max_clock, gtod; |
def0a9b2 | 268 | s64 delta; |
3e51f33f | 269 | |
def0a9b2 PZ |
270 | again: |
271 | now = sched_clock(); | |
272 | delta = now - scd->tick_raw; | |
354879bb PZ |
273 | if (unlikely(delta < 0)) |
274 | delta = 0; | |
3e51f33f | 275 | |
def0a9b2 PZ |
276 | old_clock = scd->clock; |
277 | ||
354879bb PZ |
278 | /* |
279 | * scd->clock = clamp(scd->tick_gtod + delta, | |
b342501c IM |
280 | * max(scd->tick_gtod, scd->clock), |
281 | * scd->tick_gtod + TICK_NSEC); | |
354879bb | 282 | */ |
3e51f33f | 283 | |
7b09cc5a PT |
284 | gtod = scd->tick_gtod + __gtod_offset; |
285 | clock = gtod + delta; | |
286 | min_clock = wrap_max(gtod, old_clock); | |
287 | max_clock = wrap_max(old_clock, gtod + TICK_NSEC); | |
3e51f33f | 288 | |
354879bb PZ |
289 | clock = wrap_max(clock, min_clock); |
290 | clock = wrap_min(clock, max_clock); | |
3e51f33f | 291 | |
152f9d07 | 292 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
def0a9b2 | 293 | goto again; |
56b90612 | 294 | |
def0a9b2 | 295 | return clock; |
3e51f33f PZ |
296 | } |
297 | ||
def0a9b2 | 298 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
3e51f33f | 299 | { |
def0a9b2 PZ |
300 | struct sched_clock_data *my_scd = this_scd(); |
301 | u64 this_clock, remote_clock; | |
302 | u64 *ptr, old_val, val; | |
303 | ||
a1cbcaa9 TG |
304 | #if BITS_PER_LONG != 64 |
305 | again: | |
306 | /* | |
307 | * Careful here: The local and the remote clock values need to | |
308 | * be read out atomic as we need to compare the values and | |
309 | * then update either the local or the remote side. So the | |
310 | * cmpxchg64 below only protects one readout. | |
311 | * | |
312 | * We must reread via sched_clock_local() in the retry case on | |
97fb7a0a | 313 | * 32-bit kernels as an NMI could use sched_clock_local() via the |
a1cbcaa9 | 314 | * tracer and hit between the readout of |
97fb7a0a | 315 | * the low 32-bit and the high 32-bit portion. |
a1cbcaa9 TG |
316 | */ |
317 | this_clock = sched_clock_local(my_scd); | |
318 | /* | |
97fb7a0a IM |
319 | * We must enforce atomic readout on 32-bit, otherwise the |
320 | * update on the remote CPU can hit inbetween the readout of | |
321 | * the low 32-bit and the high 32-bit portion. | |
a1cbcaa9 TG |
322 | */ |
323 | remote_clock = cmpxchg64(&scd->clock, 0, 0); | |
324 | #else | |
325 | /* | |
97fb7a0a IM |
326 | * On 64-bit kernels the read of [my]scd->clock is atomic versus the |
327 | * update, so we can avoid the above 32-bit dance. | |
a1cbcaa9 | 328 | */ |
def0a9b2 PZ |
329 | sched_clock_local(my_scd); |
330 | again: | |
331 | this_clock = my_scd->clock; | |
332 | remote_clock = scd->clock; | |
a1cbcaa9 | 333 | #endif |
def0a9b2 PZ |
334 | |
335 | /* | |
336 | * Use the opportunity that we have both locks | |
337 | * taken to couple the two clocks: we take the | |
338 | * larger time as the latest time for both | |
339 | * runqueues. (this creates monotonic movement) | |
340 | */ | |
341 | if (likely((s64)(remote_clock - this_clock) < 0)) { | |
342 | ptr = &scd->clock; | |
343 | old_val = remote_clock; | |
344 | val = this_clock; | |
3e51f33f | 345 | } else { |
def0a9b2 PZ |
346 | /* |
347 | * Should be rare, but possible: | |
348 | */ | |
349 | ptr = &my_scd->clock; | |
350 | old_val = this_clock; | |
351 | val = remote_clock; | |
3e51f33f | 352 | } |
def0a9b2 | 353 | |
152f9d07 | 354 | if (cmpxchg64(ptr, old_val, val) != old_val) |
def0a9b2 PZ |
355 | goto again; |
356 | ||
357 | return val; | |
3e51f33f PZ |
358 | } |
359 | ||
c676329a PZ |
360 | /* |
361 | * Similar to cpu_clock(), but requires local IRQs to be disabled. | |
362 | * | |
363 | * See cpu_clock(). | |
364 | */ | |
3e51f33f PZ |
365 | u64 sched_clock_cpu(int cpu) |
366 | { | |
b342501c | 367 | struct sched_clock_data *scd; |
def0a9b2 PZ |
368 | u64 clock; |
369 | ||
35af99e6 | 370 | if (sched_clock_stable()) |
698eff63 | 371 | return sched_clock() + __sched_clock_offset; |
a381759d | 372 | |
c5105d76 | 373 | if (!static_branch_likely(&sched_clock_running)) |
857baa87 | 374 | return sched_clock(); |
a381759d | 375 | |
96b3d28b | 376 | preempt_disable_notrace(); |
def0a9b2 | 377 | scd = cpu_sdc(cpu); |
3e51f33f | 378 | |
def0a9b2 PZ |
379 | if (cpu != smp_processor_id()) |
380 | clock = sched_clock_remote(scd); | |
381 | else | |
382 | clock = sched_clock_local(scd); | |
96b3d28b | 383 | preempt_enable_notrace(); |
e4e4e534 | 384 | |
3e51f33f PZ |
385 | return clock; |
386 | } | |
2c923e94 | 387 | EXPORT_SYMBOL_GPL(sched_clock_cpu); |
3e51f33f PZ |
388 | |
389 | void sched_clock_tick(void) | |
390 | { | |
8325d9c0 | 391 | struct sched_clock_data *scd; |
a381759d | 392 | |
b421b22b PZ |
393 | if (sched_clock_stable()) |
394 | return; | |
395 | ||
c5105d76 | 396 | if (!static_branch_likely(&sched_clock_running)) |
b421b22b PZ |
397 | return; |
398 | ||
2c11dba0 | 399 | lockdep_assert_irqs_disabled(); |
3e51f33f | 400 | |
8325d9c0 | 401 | scd = this_scd(); |
cf15ca8d | 402 | __scd_stamp(scd); |
b421b22b PZ |
403 | sched_clock_local(scd); |
404 | } | |
405 | ||
406 | void sched_clock_tick_stable(void) | |
407 | { | |
b421b22b PZ |
408 | if (!sched_clock_stable()) |
409 | return; | |
410 | ||
411 | /* | |
412 | * Called under watchdog_lock. | |
413 | * | |
414 | * The watchdog just found this TSC to (still) be stable, so now is a | |
415 | * good moment to update our __gtod_offset. Because once we find the | |
416 | * TSC to be unstable, any computation will be computing crap. | |
417 | */ | |
418 | local_irq_disable(); | |
5d2a4e91 | 419 | __sched_clock_gtod_offset(); |
b421b22b | 420 | local_irq_enable(); |
3e51f33f PZ |
421 | } |
422 | ||
423 | /* | |
424 | * We are going deep-idle (irqs are disabled): | |
425 | */ | |
426 | void sched_clock_idle_sleep_event(void) | |
427 | { | |
428 | sched_clock_cpu(smp_processor_id()); | |
429 | } | |
430 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
431 | ||
432 | /* | |
f9fccdb9 | 433 | * We just idled; resync with ktime. |
3e51f33f | 434 | */ |
ac1e843f | 435 | void sched_clock_idle_wakeup_event(void) |
3e51f33f | 436 | { |
f9fccdb9 PZ |
437 | unsigned long flags; |
438 | ||
439 | if (sched_clock_stable()) | |
440 | return; | |
441 | ||
442 | if (unlikely(timekeeping_suspended)) | |
1c5745aa TG |
443 | return; |
444 | ||
f9fccdb9 | 445 | local_irq_save(flags); |
354879bb | 446 | sched_clock_tick(); |
f9fccdb9 | 447 | local_irq_restore(flags); |
3e51f33f PZ |
448 | } |
449 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
450 | ||
8325d9c0 PZ |
451 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
452 | ||
5d2a4e91 PT |
453 | void __init sched_clock_init(void) |
454 | { | |
46457ea4 | 455 | static_branch_inc(&sched_clock_running); |
bd9f943e | 456 | local_irq_disable(); |
5d2a4e91 | 457 | generic_sched_clock_init(); |
bd9f943e | 458 | local_irq_enable(); |
5d2a4e91 PT |
459 | } |
460 | ||
8325d9c0 PZ |
461 | u64 sched_clock_cpu(int cpu) |
462 | { | |
c5105d76 | 463 | if (!static_branch_likely(&sched_clock_running)) |
8325d9c0 PZ |
464 | return 0; |
465 | ||
466 | return sched_clock(); | |
467 | } | |
9881b024 | 468 | |
b9f8fcd5 | 469 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
76a2a6ee | 470 | |
545a2bf7 CB |
471 | /* |
472 | * Running clock - returns the time that has elapsed while a guest has been | |
473 | * running. | |
474 | * On a guest this value should be local_clock minus the time the guest was | |
475 | * suspended by the hypervisor (for any reason). | |
476 | * On bare metal this function should return the same as local_clock. | |
477 | * Architectures and sub-architectures can override this. | |
478 | */ | |
479 | u64 __weak running_clock(void) | |
480 | { | |
481 | return local_clock(); | |
482 | } |