]>
Commit | Line | Data |
---|---|---|
e6017571 IM |
1 | #ifndef _LINUX_SCHED_CLOCK_H |
2 | #define _LINUX_SCHED_CLOCK_H | |
3 | ||
ea947639 | 4 | #include <linux/smp.h> |
e6017571 | 5 | |
56898103 IM |
6 | /* |
7 | * Do not use outside of architecture code which knows its limitations. | |
8 | * | |
9 | * sched_clock() has no promise of monotonicity or bounded drift between | |
10 | * CPUs, use (which you should not) requires disabling IRQs. | |
11 | * | |
12 | * Please use one of the three interfaces below. | |
13 | */ | |
14 | extern unsigned long long notrace sched_clock(void); | |
15 | ||
16 | /* | |
17 | * See the comment in kernel/sched/clock.c | |
18 | */ | |
19 | extern u64 running_clock(void); | |
20 | extern u64 sched_clock_cpu(int cpu); | |
21 | ||
22 | ||
23 | extern void sched_clock_init(void); | |
24 | ||
25 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
26 | static inline void sched_clock_init_late(void) | |
27 | { | |
28 | } | |
29 | ||
30 | static inline void sched_clock_tick(void) | |
31 | { | |
32 | } | |
33 | ||
34 | static inline void clear_sched_clock_stable(void) | |
35 | { | |
36 | } | |
37 | ||
38 | static inline void sched_clock_idle_sleep_event(void) | |
39 | { | |
40 | } | |
41 | ||
42 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | |
43 | { | |
44 | } | |
45 | ||
46 | static inline u64 cpu_clock(int cpu) | |
47 | { | |
48 | return sched_clock(); | |
49 | } | |
50 | ||
51 | static inline u64 local_clock(void) | |
52 | { | |
53 | return sched_clock(); | |
54 | } | |
55 | #else | |
56 | extern void sched_clock_init_late(void); | |
56898103 IM |
57 | extern int sched_clock_stable(void); |
58 | extern void clear_sched_clock_stable(void); | |
59 | ||
698eff63 PZ |
60 | /* |
61 | * When sched_clock_stable(), __sched_clock_offset provides the offset | |
62 | * between local_clock() and sched_clock(). | |
63 | */ | |
64 | extern u64 __sched_clock_offset; | |
65 | ||
66 | ||
56898103 IM |
67 | extern void sched_clock_tick(void); |
68 | extern void sched_clock_idle_sleep_event(void); | |
69 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | |
70 | ||
71 | /* | |
72 | * As outlined in clock.c, provides a fast, high resolution, nanosecond | |
73 | * time source that is monotonic per cpu argument and has bounded drift | |
74 | * between cpus. | |
75 | * | |
76 | * ######################### BIG FAT WARNING ########################## | |
77 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | |
78 | * # go backwards !! # | |
79 | * #################################################################### | |
80 | */ | |
81 | static inline u64 cpu_clock(int cpu) | |
82 | { | |
83 | return sched_clock_cpu(cpu); | |
84 | } | |
85 | ||
86 | static inline u64 local_clock(void) | |
87 | { | |
88 | return sched_clock_cpu(raw_smp_processor_id()); | |
89 | } | |
90 | #endif | |
91 | ||
92 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | |
93 | /* | |
94 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. | |
95 | * The reason for this explicit opt-in is not to have perf penalty with | |
96 | * slow sched_clocks. | |
97 | */ | |
98 | extern void enable_sched_clock_irqtime(void); | |
99 | extern void disable_sched_clock_irqtime(void); | |
100 | #else | |
101 | static inline void enable_sched_clock_irqtime(void) {} | |
102 | static inline void disable_sched_clock_irqtime(void) {} | |
103 | #endif | |
104 | ||
e6017571 | 105 | #endif /* _LINUX_SCHED_CLOCK_H */ |