]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - kernel/sched/clock.c
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / kernel / sched / clock.c
CommitLineData
3e51f33f
PZ
1/*
2 * sched_clock for unstable cpu clocks
3 *
90eec103 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
3e51f33f 5 *
c300ba25
SR
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
3e51f33f
PZ
9 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
c676329a
PZ
13 *
14 * What:
15 *
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
19 *
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22 * # go backwards !! #
23 * ####################################################################
24 *
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
27 *
28 * cpu_clock(i) -- can be used from any context, including NMI.
c676329a
PZ
29 * local_clock() -- is cpu_clock() on the current cpu.
30 *
ef08f0ff
PZ
31 * sched_clock_cpu(i)
32 *
c676329a
PZ
33 * How:
34 *
35 * The implementation either uses sched_clock() when
36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37 * sched_clock() is assumed to provide these properties (mostly it means
38 * the architecture provides a globally synchronized highres time source).
39 *
40 * Otherwise it tries to create a semi stable clock from a mixture of other
41 * clocks, including:
42 *
43 * - GTOD (clock monotomic)
3e51f33f
PZ
44 * - sched_clock()
45 * - explicit idle events
46 *
c676329a
PZ
47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48 * deltas are filtered to provide monotonicity and keeping it within an
49 * expected window.
3e51f33f
PZ
50 *
51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52 * that is otherwise invisible (TSC gets stopped).
53 *
3e51f33f 54 */
3e51f33f 55#include <linux/spinlock.h>
6409c4da 56#include <linux/hardirq.h>
9984de1a 57#include <linux/export.h>
b342501c
IM
58#include <linux/percpu.h>
59#include <linux/ktime.h>
60#include <linux/sched.h>
35af99e6 61#include <linux/static_key.h>
6577e42a 62#include <linux/workqueue.h>
52f5684c 63#include <linux/compiler.h>
4f49b90a 64#include <linux/tick.h>
3e51f33f 65
2c3d103b
HD
66/*
67 * Scheduler clock - returns current time in nanosec units.
68 * This is default implementation.
69 * Architectures and sub-architectures can override this.
70 */
52f5684c 71unsigned long long __weak sched_clock(void)
2c3d103b 72{
92d23f70
R
73 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
74 * (NSEC_PER_SEC / HZ);
2c3d103b 75}
b6ac23af 76EXPORT_SYMBOL_GPL(sched_clock);
3e51f33f 77
5bb6b1ea 78__read_mostly int sched_clock_running;
c1955a3d 79
9881b024
PZ
80void sched_clock_init(void)
81{
82 sched_clock_running = 1;
83}
84
3e51f33f 85#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
acb04058
PZ
86/*
87 * We must start with !__sched_clock_stable because the unstable -> stable
88 * transition is accurate, while the stable -> unstable transition is not.
89 *
90 * Similarly we start with __sched_clock_stable_early, thereby assuming we
91 * will become stable, such that there's only a single 1 -> 0 transition.
92 */
555570d7 93static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
acb04058 94static int __sched_clock_stable_early = 1;
35af99e6 95
5680d809
PZ
96/*
97 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
98 */
99static __read_mostly u64 raw_offset;
100static __read_mostly u64 gtod_offset;
101
102struct sched_clock_data {
103 u64 tick_raw;
104 u64 tick_gtod;
105 u64 clock;
106};
107
108static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
109
110static inline struct sched_clock_data *this_scd(void)
111{
112 return this_cpu_ptr(&sched_clock_data);
113}
114
115static inline struct sched_clock_data *cpu_sdc(int cpu)
116{
117 return &per_cpu(sched_clock_data, cpu);
118}
119
35af99e6
PZ
120int sched_clock_stable(void)
121{
555570d7 122 return static_branch_likely(&__sched_clock_stable);
35af99e6
PZ
123}
124
d375b4e0 125static void __set_sched_clock_stable(void)
35af99e6 126{
5680d809
PZ
127 struct sched_clock_data *scd = this_scd();
128
129 /*
130 * Attempt to make the (initial) unstable->stable transition continuous.
131 */
132 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
133
134 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
135 scd->tick_gtod, gtod_offset,
136 scd->tick_raw, raw_offset);
137
555570d7 138 static_branch_enable(&__sched_clock_stable);
4f49b90a 139 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
d375b4e0
PZ
140}
141
6577e42a 142static void __clear_sched_clock_stable(struct work_struct *work)
35af99e6 143{
5680d809
PZ
144 struct sched_clock_data *scd = this_scd();
145
146 /*
147 * Attempt to make the stable->unstable transition continuous.
148 *
149 * Trouble is, this is typically called from the TSC watchdog
150 * timer, which is late per definition. This means the tick
151 * values can already be screwy.
152 *
153 * Still do what we can.
154 */
155 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
156
157 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
158 scd->tick_gtod, gtod_offset,
159 scd->tick_raw, raw_offset);
160
555570d7 161 static_branch_disable(&__sched_clock_stable);
4f49b90a 162 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
35af99e6 163}
3e51f33f 164
6577e42a
PZ
165static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
166
167void clear_sched_clock_stable(void)
168{
d375b4e0
PZ
169 __sched_clock_stable_early = 0;
170
9881b024 171 smp_mb(); /* matches sched_clock_init_late() */
d375b4e0 172
9881b024
PZ
173 if (sched_clock_running == 2)
174 schedule_work(&sched_clock_work);
6577e42a
PZ
175}
176
9881b024 177void sched_clock_init_late(void)
3e51f33f 178{
9881b024 179 sched_clock_running = 2;
d375b4e0
PZ
180 /*
181 * Ensure that it is impossible to not do a static_key update.
182 *
183 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
184 * and do the update, or we must see their __sched_clock_stable_early
185 * and do the update, or both.
186 */
187 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
188
189 if (__sched_clock_stable_early)
190 __set_sched_clock_stable();
3e51f33f
PZ
191}
192
354879bb 193/*
b342501c 194 * min, max except they take wrapping into account
354879bb
PZ
195 */
196
197static inline u64 wrap_min(u64 x, u64 y)
198{
199 return (s64)(x - y) < 0 ? x : y;
200}
201
202static inline u64 wrap_max(u64 x, u64 y)
203{
204 return (s64)(x - y) > 0 ? x : y;
205}
206
3e51f33f
PZ
207/*
208 * update the percpu scd from the raw @now value
209 *
210 * - filter out backward motion
354879bb 211 * - use the GTOD tick value to create a window to filter crazy TSC values
3e51f33f 212 */
def0a9b2 213static u64 sched_clock_local(struct sched_clock_data *scd)
3e51f33f 214{
def0a9b2
PZ
215 u64 now, clock, old_clock, min_clock, max_clock;
216 s64 delta;
3e51f33f 217
def0a9b2
PZ
218again:
219 now = sched_clock();
220 delta = now - scd->tick_raw;
354879bb
PZ
221 if (unlikely(delta < 0))
222 delta = 0;
3e51f33f 223
def0a9b2
PZ
224 old_clock = scd->clock;
225
354879bb
PZ
226 /*
227 * scd->clock = clamp(scd->tick_gtod + delta,
b342501c
IM
228 * max(scd->tick_gtod, scd->clock),
229 * scd->tick_gtod + TICK_NSEC);
354879bb 230 */
3e51f33f 231
5680d809 232 clock = scd->tick_gtod + gtod_offset + delta;
def0a9b2
PZ
233 min_clock = wrap_max(scd->tick_gtod, old_clock);
234 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
3e51f33f 235
354879bb
PZ
236 clock = wrap_max(clock, min_clock);
237 clock = wrap_min(clock, max_clock);
3e51f33f 238
152f9d07 239 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
def0a9b2 240 goto again;
56b90612 241
def0a9b2 242 return clock;
3e51f33f
PZ
243}
244
def0a9b2 245static u64 sched_clock_remote(struct sched_clock_data *scd)
3e51f33f 246{
def0a9b2
PZ
247 struct sched_clock_data *my_scd = this_scd();
248 u64 this_clock, remote_clock;
249 u64 *ptr, old_val, val;
250
a1cbcaa9
TG
251#if BITS_PER_LONG != 64
252again:
253 /*
254 * Careful here: The local and the remote clock values need to
255 * be read out atomic as we need to compare the values and
256 * then update either the local or the remote side. So the
257 * cmpxchg64 below only protects one readout.
258 *
259 * We must reread via sched_clock_local() in the retry case on
260 * 32bit as an NMI could use sched_clock_local() via the
261 * tracer and hit between the readout of
262 * the low32bit and the high 32bit portion.
263 */
264 this_clock = sched_clock_local(my_scd);
265 /*
266 * We must enforce atomic readout on 32bit, otherwise the
267 * update on the remote cpu can hit inbetween the readout of
268 * the low32bit and the high 32bit portion.
269 */
270 remote_clock = cmpxchg64(&scd->clock, 0, 0);
271#else
272 /*
273 * On 64bit the read of [my]scd->clock is atomic versus the
274 * update, so we can avoid the above 32bit dance.
275 */
def0a9b2
PZ
276 sched_clock_local(my_scd);
277again:
278 this_clock = my_scd->clock;
279 remote_clock = scd->clock;
a1cbcaa9 280#endif
def0a9b2
PZ
281
282 /*
283 * Use the opportunity that we have both locks
284 * taken to couple the two clocks: we take the
285 * larger time as the latest time for both
286 * runqueues. (this creates monotonic movement)
287 */
288 if (likely((s64)(remote_clock - this_clock) < 0)) {
289 ptr = &scd->clock;
290 old_val = remote_clock;
291 val = this_clock;
3e51f33f 292 } else {
def0a9b2
PZ
293 /*
294 * Should be rare, but possible:
295 */
296 ptr = &my_scd->clock;
297 old_val = this_clock;
298 val = remote_clock;
3e51f33f 299 }
def0a9b2 300
152f9d07 301 if (cmpxchg64(ptr, old_val, val) != old_val)
def0a9b2
PZ
302 goto again;
303
304 return val;
3e51f33f
PZ
305}
306
c676329a
PZ
307/*
308 * Similar to cpu_clock(), but requires local IRQs to be disabled.
309 *
310 * See cpu_clock().
311 */
3e51f33f
PZ
312u64 sched_clock_cpu(int cpu)
313{
b342501c 314 struct sched_clock_data *scd;
def0a9b2
PZ
315 u64 clock;
316
35af99e6 317 if (sched_clock_stable())
5680d809 318 return sched_clock() + raw_offset;
a381759d 319
a381759d
PZ
320 if (unlikely(!sched_clock_running))
321 return 0ull;
322
96b3d28b 323 preempt_disable_notrace();
def0a9b2 324 scd = cpu_sdc(cpu);
3e51f33f 325
def0a9b2
PZ
326 if (cpu != smp_processor_id())
327 clock = sched_clock_remote(scd);
328 else
329 clock = sched_clock_local(scd);
96b3d28b 330 preempt_enable_notrace();
e4e4e534 331
3e51f33f
PZ
332 return clock;
333}
2c923e94 334EXPORT_SYMBOL_GPL(sched_clock_cpu);
3e51f33f
PZ
335
336void sched_clock_tick(void)
337{
8325d9c0 338 struct sched_clock_data *scd;
a381759d 339
3e51f33f
PZ
340 WARN_ON_ONCE(!irqs_disabled());
341
5680d809
PZ
342 /*
343 * Update these values even if sched_clock_stable(), because it can
344 * become unstable at any point in time at which point we need some
345 * values to fall back on.
346 *
347 * XXX arguably we can skip this if we expose tsc_clocksource_reliable
348 */
8325d9c0 349 scd = this_scd();
5680d809
PZ
350 scd->tick_raw = sched_clock();
351 scd->tick_gtod = ktime_get_ns();
3e51f33f 352
5680d809
PZ
353 if (!sched_clock_stable() && likely(sched_clock_running))
354 sched_clock_local(scd);
3e51f33f
PZ
355}
356
357/*
358 * We are going deep-idle (irqs are disabled):
359 */
360void sched_clock_idle_sleep_event(void)
361{
362 sched_clock_cpu(smp_processor_id());
363}
364EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
365
366/*
367 * We just idled delta nanoseconds (called with irqs disabled):
368 */
369void sched_clock_idle_wakeup_event(u64 delta_ns)
370{
1c5745aa
TG
371 if (timekeeping_suspended)
372 return;
373
354879bb 374 sched_clock_tick();
03e0d461 375 touch_softlockup_watchdog_sched();
3e51f33f
PZ
376}
377EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
378
8325d9c0
PZ
379#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
380
8325d9c0
PZ
381u64 sched_clock_cpu(int cpu)
382{
383 if (unlikely(!sched_clock_running))
384 return 0;
385
386 return sched_clock();
387}
9881b024 388
b9f8fcd5 389#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
76a2a6ee 390
545a2bf7
CB
391/*
392 * Running clock - returns the time that has elapsed while a guest has been
393 * running.
394 * On a guest this value should be local_clock minus the time the guest was
395 * suspended by the hypervisor (for any reason).
396 * On bare metal this function should return the same as local_clock.
397 * Architectures and sub-architectures can override this.
398 */
399u64 __weak running_clock(void)
400{
401 return local_clock();
402}