]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
5 | * | |
6 | * Updates and enhancements: | |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
9 | * Based on code by: | |
10 | * Ingo Molnar <mingo@redhat.com> | |
11 | * Guillaume Chazarain <guichaz@gmail.com> | |
12 | * | |
13 | * | |
14 | * What: | |
15 | * | |
16 | * cpu_clock(i) provides a fast (execution time) high resolution | |
17 | * clock with bounded drift between CPUs. The value of cpu_clock(i) | |
18 | * is monotonic for constant i. The timestamp returned is in nanoseconds. | |
19 | * | |
20 | * ######################### BIG FAT WARNING ########################## | |
21 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | |
22 | * # go backwards !! # | |
23 | * #################################################################### | |
24 | * | |
25 | * There is no strict promise about the base, although it tends to start | |
26 | * at 0 on boot (but people really shouldn't rely on that). | |
27 | * | |
28 | * cpu_clock(i) -- can be used from any context, including NMI. | |
29 | * local_clock() -- is cpu_clock() on the current cpu. | |
30 | * | |
31 | * sched_clock_cpu(i) | |
32 | * | |
33 | * How: | |
34 | * | |
35 | * The implementation either uses sched_clock() when | |
36 | * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the | |
37 | * sched_clock() is assumed to provide these properties (mostly it means | |
38 | * the architecture provides a globally synchronized highres time source). | |
39 | * | |
40 | * Otherwise it tries to create a semi stable clock from a mixture of other | |
41 | * clocks, including: | |
42 | * | |
43 | * - GTOD (clock monotomic) | |
44 | * - sched_clock() | |
45 | * - explicit idle events | |
46 | * | |
47 | * We use GTOD as base and use sched_clock() deltas to improve resolution. The | |
48 | * deltas are filtered to provide monotonicity and keeping it within an | |
49 | * expected window. | |
50 | * | |
51 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
52 | * that is otherwise invisible (TSC gets stopped). | |
53 | * | |
54 | */ | |
55 | #include <linux/spinlock.h> | |
56 | #include <linux/hardirq.h> | |
57 | #include <linux/export.h> | |
58 | #include <linux/percpu.h> | |
59 | #include <linux/ktime.h> | |
60 | #include <linux/sched.h> | |
61 | #include <linux/static_key.h> | |
62 | #include <linux/workqueue.h> | |
63 | #include <linux/compiler.h> | |
64 | ||
65 | /* | |
66 | * Scheduler clock - returns current time in nanosec units. | |
67 | * This is default implementation. | |
68 | * Architectures and sub-architectures can override this. | |
69 | */ | |
70 | unsigned long long __weak sched_clock(void) | |
71 | { | |
72 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) | |
73 | * (NSEC_PER_SEC / HZ); | |
74 | } | |
75 | EXPORT_SYMBOL_GPL(sched_clock); | |
76 | ||
77 | __read_mostly int sched_clock_running; | |
78 | ||
79 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
80 | static struct static_key __sched_clock_stable = STATIC_KEY_INIT; | |
81 | static int __sched_clock_stable_early; | |
82 | ||
83 | int sched_clock_stable(void) | |
84 | { | |
85 | return static_key_false(&__sched_clock_stable); | |
86 | } | |
87 | ||
88 | static void __set_sched_clock_stable(void) | |
89 | { | |
90 | if (!sched_clock_stable()) | |
91 | static_key_slow_inc(&__sched_clock_stable); | |
92 | } | |
93 | ||
94 | void set_sched_clock_stable(void) | |
95 | { | |
96 | __sched_clock_stable_early = 1; | |
97 | ||
98 | smp_mb(); /* matches sched_clock_init() */ | |
99 | ||
100 | if (!sched_clock_running) | |
101 | return; | |
102 | ||
103 | __set_sched_clock_stable(); | |
104 | } | |
105 | ||
106 | static void __clear_sched_clock_stable(struct work_struct *work) | |
107 | { | |
108 | /* XXX worry about clock continuity */ | |
109 | if (sched_clock_stable()) | |
110 | static_key_slow_dec(&__sched_clock_stable); | |
111 | } | |
112 | ||
113 | static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); | |
114 | ||
115 | void clear_sched_clock_stable(void) | |
116 | { | |
117 | __sched_clock_stable_early = 0; | |
118 | ||
119 | smp_mb(); /* matches sched_clock_init() */ | |
120 | ||
121 | if (!sched_clock_running) | |
122 | return; | |
123 | ||
124 | schedule_work(&sched_clock_work); | |
125 | } | |
126 | ||
127 | struct sched_clock_data { | |
128 | u64 tick_raw; | |
129 | u64 tick_gtod; | |
130 | u64 clock; | |
131 | }; | |
132 | ||
133 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
134 | ||
135 | static inline struct sched_clock_data *this_scd(void) | |
136 | { | |
137 | return this_cpu_ptr(&sched_clock_data); | |
138 | } | |
139 | ||
140 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
141 | { | |
142 | return &per_cpu(sched_clock_data, cpu); | |
143 | } | |
144 | ||
145 | void sched_clock_init(void) | |
146 | { | |
147 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
148 | int cpu; | |
149 | ||
150 | for_each_possible_cpu(cpu) { | |
151 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
152 | ||
153 | scd->tick_raw = 0; | |
154 | scd->tick_gtod = ktime_now; | |
155 | scd->clock = ktime_now; | |
156 | } | |
157 | ||
158 | sched_clock_running = 1; | |
159 | ||
160 | /* | |
161 | * Ensure that it is impossible to not do a static_key update. | |
162 | * | |
163 | * Either {set,clear}_sched_clock_stable() must see sched_clock_running | |
164 | * and do the update, or we must see their __sched_clock_stable_early | |
165 | * and do the update, or both. | |
166 | */ | |
167 | smp_mb(); /* matches {set,clear}_sched_clock_stable() */ | |
168 | ||
169 | if (__sched_clock_stable_early) | |
170 | __set_sched_clock_stable(); | |
171 | else | |
172 | __clear_sched_clock_stable(NULL); | |
173 | } | |
174 | ||
175 | /* | |
176 | * min, max except they take wrapping into account | |
177 | */ | |
178 | ||
179 | static inline u64 wrap_min(u64 x, u64 y) | |
180 | { | |
181 | return (s64)(x - y) < 0 ? x : y; | |
182 | } | |
183 | ||
184 | static inline u64 wrap_max(u64 x, u64 y) | |
185 | { | |
186 | return (s64)(x - y) > 0 ? x : y; | |
187 | } | |
188 | ||
189 | /* | |
190 | * update the percpu scd from the raw @now value | |
191 | * | |
192 | * - filter out backward motion | |
193 | * - use the GTOD tick value to create a window to filter crazy TSC values | |
194 | */ | |
195 | static u64 sched_clock_local(struct sched_clock_data *scd) | |
196 | { | |
197 | u64 now, clock, old_clock, min_clock, max_clock; | |
198 | s64 delta; | |
199 | ||
200 | again: | |
201 | now = sched_clock(); | |
202 | delta = now - scd->tick_raw; | |
203 | if (unlikely(delta < 0)) | |
204 | delta = 0; | |
205 | ||
206 | old_clock = scd->clock; | |
207 | ||
208 | /* | |
209 | * scd->clock = clamp(scd->tick_gtod + delta, | |
210 | * max(scd->tick_gtod, scd->clock), | |
211 | * scd->tick_gtod + TICK_NSEC); | |
212 | */ | |
213 | ||
214 | clock = scd->tick_gtod + delta; | |
215 | min_clock = wrap_max(scd->tick_gtod, old_clock); | |
216 | max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); | |
217 | ||
218 | clock = wrap_max(clock, min_clock); | |
219 | clock = wrap_min(clock, max_clock); | |
220 | ||
221 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) | |
222 | goto again; | |
223 | ||
224 | return clock; | |
225 | } | |
226 | ||
227 | static u64 sched_clock_remote(struct sched_clock_data *scd) | |
228 | { | |
229 | struct sched_clock_data *my_scd = this_scd(); | |
230 | u64 this_clock, remote_clock; | |
231 | u64 *ptr, old_val, val; | |
232 | ||
233 | #if BITS_PER_LONG != 64 | |
234 | again: | |
235 | /* | |
236 | * Careful here: The local and the remote clock values need to | |
237 | * be read out atomic as we need to compare the values and | |
238 | * then update either the local or the remote side. So the | |
239 | * cmpxchg64 below only protects one readout. | |
240 | * | |
241 | * We must reread via sched_clock_local() in the retry case on | |
242 | * 32bit as an NMI could use sched_clock_local() via the | |
243 | * tracer and hit between the readout of | |
244 | * the low32bit and the high 32bit portion. | |
245 | */ | |
246 | this_clock = sched_clock_local(my_scd); | |
247 | /* | |
248 | * We must enforce atomic readout on 32bit, otherwise the | |
249 | * update on the remote cpu can hit inbetween the readout of | |
250 | * the low32bit and the high 32bit portion. | |
251 | */ | |
252 | remote_clock = cmpxchg64(&scd->clock, 0, 0); | |
253 | #else | |
254 | /* | |
255 | * On 64bit the read of [my]scd->clock is atomic versus the | |
256 | * update, so we can avoid the above 32bit dance. | |
257 | */ | |
258 | sched_clock_local(my_scd); | |
259 | again: | |
260 | this_clock = my_scd->clock; | |
261 | remote_clock = scd->clock; | |
262 | #endif | |
263 | ||
264 | /* | |
265 | * Use the opportunity that we have both locks | |
266 | * taken to couple the two clocks: we take the | |
267 | * larger time as the latest time for both | |
268 | * runqueues. (this creates monotonic movement) | |
269 | */ | |
270 | if (likely((s64)(remote_clock - this_clock) < 0)) { | |
271 | ptr = &scd->clock; | |
272 | old_val = remote_clock; | |
273 | val = this_clock; | |
274 | } else { | |
275 | /* | |
276 | * Should be rare, but possible: | |
277 | */ | |
278 | ptr = &my_scd->clock; | |
279 | old_val = this_clock; | |
280 | val = remote_clock; | |
281 | } | |
282 | ||
283 | if (cmpxchg64(ptr, old_val, val) != old_val) | |
284 | goto again; | |
285 | ||
286 | return val; | |
287 | } | |
288 | ||
289 | /* | |
290 | * Similar to cpu_clock(), but requires local IRQs to be disabled. | |
291 | * | |
292 | * See cpu_clock(). | |
293 | */ | |
294 | u64 sched_clock_cpu(int cpu) | |
295 | { | |
296 | struct sched_clock_data *scd; | |
297 | u64 clock; | |
298 | ||
299 | if (sched_clock_stable()) | |
300 | return sched_clock(); | |
301 | ||
302 | if (unlikely(!sched_clock_running)) | |
303 | return 0ull; | |
304 | ||
305 | preempt_disable_notrace(); | |
306 | scd = cpu_sdc(cpu); | |
307 | ||
308 | if (cpu != smp_processor_id()) | |
309 | clock = sched_clock_remote(scd); | |
310 | else | |
311 | clock = sched_clock_local(scd); | |
312 | preempt_enable_notrace(); | |
313 | ||
314 | return clock; | |
315 | } | |
316 | ||
317 | void sched_clock_tick(void) | |
318 | { | |
319 | struct sched_clock_data *scd; | |
320 | u64 now, now_gtod; | |
321 | ||
322 | if (sched_clock_stable()) | |
323 | return; | |
324 | ||
325 | if (unlikely(!sched_clock_running)) | |
326 | return; | |
327 | ||
328 | WARN_ON_ONCE(!irqs_disabled()); | |
329 | ||
330 | scd = this_scd(); | |
331 | now_gtod = ktime_to_ns(ktime_get()); | |
332 | now = sched_clock(); | |
333 | ||
334 | scd->tick_raw = now; | |
335 | scd->tick_gtod = now_gtod; | |
336 | sched_clock_local(scd); | |
337 | } | |
338 | ||
339 | /* | |
340 | * We are going deep-idle (irqs are disabled): | |
341 | */ | |
342 | void sched_clock_idle_sleep_event(void) | |
343 | { | |
344 | sched_clock_cpu(smp_processor_id()); | |
345 | } | |
346 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
347 | ||
348 | /* | |
349 | * We just idled delta nanoseconds (called with irqs disabled): | |
350 | */ | |
351 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
352 | { | |
353 | if (timekeeping_suspended) | |
354 | return; | |
355 | ||
356 | sched_clock_tick(); | |
357 | touch_softlockup_watchdog(); | |
358 | } | |
359 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
360 | ||
361 | /* | |
362 | * As outlined at the top, provides a fast, high resolution, nanosecond | |
363 | * time source that is monotonic per cpu argument and has bounded drift | |
364 | * between cpus. | |
365 | * | |
366 | * ######################### BIG FAT WARNING ########################## | |
367 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | |
368 | * # go backwards !! # | |
369 | * #################################################################### | |
370 | */ | |
371 | u64 cpu_clock(int cpu) | |
372 | { | |
373 | if (!sched_clock_stable()) | |
374 | return sched_clock_cpu(cpu); | |
375 | ||
376 | return sched_clock(); | |
377 | } | |
378 | ||
379 | /* | |
380 | * Similar to cpu_clock() for the current cpu. Time will only be observed | |
381 | * to be monotonic if care is taken to only compare timestampt taken on the | |
382 | * same CPU. | |
383 | * | |
384 | * See cpu_clock(). | |
385 | */ | |
386 | u64 local_clock(void) | |
387 | { | |
388 | if (!sched_clock_stable()) | |
389 | return sched_clock_cpu(raw_smp_processor_id()); | |
390 | ||
391 | return sched_clock(); | |
392 | } | |
393 | ||
394 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | |
395 | ||
396 | void sched_clock_init(void) | |
397 | { | |
398 | sched_clock_running = 1; | |
399 | } | |
400 | ||
401 | u64 sched_clock_cpu(int cpu) | |
402 | { | |
403 | if (unlikely(!sched_clock_running)) | |
404 | return 0; | |
405 | ||
406 | return sched_clock(); | |
407 | } | |
408 | ||
409 | u64 cpu_clock(int cpu) | |
410 | { | |
411 | return sched_clock(); | |
412 | } | |
413 | ||
414 | u64 local_clock(void) | |
415 | { | |
416 | return sched_clock(); | |
417 | } | |
418 | ||
419 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | |
420 | ||
421 | EXPORT_SYMBOL_GPL(cpu_clock); | |
422 | EXPORT_SYMBOL_GPL(local_clock); | |
423 | ||
424 | /* | |
425 | * Running clock - returns the time that has elapsed while a guest has been | |
426 | * running. | |
427 | * On a guest this value should be local_clock minus the time the guest was | |
428 | * suspended by the hypervisor (for any reason). | |
429 | * On bare metal this function should return the same as local_clock. | |
430 | * Architectures and sub-architectures can override this. | |
431 | */ | |
432 | u64 __weak running_clock(void) | |
433 | { | |
434 | return local_clock(); | |
435 | } |