]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/time/sched_clock.c
Merge branch 'fscache-fixes' into for-next
[mirror_ubuntu-zesty-kernel.git] / kernel / time / sched_clock.c
1 /*
2 * sched_clock.c: Generic sched_clock() support, to extend low level
3 * hardware time counters to full 64-bit ns values.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9 #include <linux/clocksource.h>
10 #include <linux/init.h>
11 #include <linux/jiffies.h>
12 #include <linux/ktime.h>
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/sched.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/hrtimer.h>
18 #include <linux/sched_clock.h>
19 #include <linux/seqlock.h>
20 #include <linux/bitops.h>
21
22 /**
23 * struct clock_read_data - data required to read from sched_clock()
24 *
25 * @epoch_ns: sched_clock() value at last update
26 * @epoch_cyc: Clock cycle value at last update.
27 * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
28 * clocks.
29 * @read_sched_clock: Current clock source (or dummy source when suspended).
30 * @mult: Multipler for scaled math conversion.
31 * @shift: Shift value for scaled math conversion.
32 *
33 * Care must be taken when updating this structure; it is read by
34 * some very hot code paths. It occupies <=40 bytes and, when combined
35 * with the seqcount used to synchronize access, comfortably fits into
36 * a 64 byte cache line.
37 */
38 struct clock_read_data {
39 u64 epoch_ns;
40 u64 epoch_cyc;
41 u64 sched_clock_mask;
42 u64 (*read_sched_clock)(void);
43 u32 mult;
44 u32 shift;
45 };
46
47 /**
48 * struct clock_data - all data needed for sched_clock() (including
49 * registration of a new clock source)
50 *
51 * @seq: Sequence counter for protecting updates. The lowest
52 * bit is the index for @read_data.
53 * @read_data: Data required to read from sched_clock.
54 * @wrap_kt: Duration for which clock can run before wrapping.
55 * @rate: Tick rate of the registered clock.
56 * @actual_read_sched_clock: Registered hardware level clock read function.
57 *
58 * The ordering of this structure has been chosen to optimize cache
59 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
60 * into a single 64-byte cache line.
61 */
62 struct clock_data {
63 seqcount_t seq;
64 struct clock_read_data read_data[2];
65 ktime_t wrap_kt;
66 unsigned long rate;
67
68 u64 (*actual_read_sched_clock)(void);
69 };
70
71 static struct hrtimer sched_clock_timer;
72 static int irqtime = -1;
73
74 core_param(irqtime, irqtime, int, 0400);
75
76 static u64 notrace jiffy_sched_clock_read(void)
77 {
78 /*
79 * We don't need to use get_jiffies_64 on 32-bit arches here
80 * because we register with BITS_PER_LONG
81 */
82 return (u64)(jiffies - INITIAL_JIFFIES);
83 }
84
85 static struct clock_data cd ____cacheline_aligned = {
86 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
87 .read_sched_clock = jiffy_sched_clock_read, },
88 .actual_read_sched_clock = jiffy_sched_clock_read,
89 };
90
91 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
92 {
93 return (cyc * mult) >> shift;
94 }
95
96 unsigned long long notrace sched_clock(void)
97 {
98 u64 cyc, res;
99 unsigned long seq;
100 struct clock_read_data *rd;
101
102 do {
103 seq = raw_read_seqcount(&cd.seq);
104 rd = cd.read_data + (seq & 1);
105
106 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
107 rd->sched_clock_mask;
108 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
109 } while (read_seqcount_retry(&cd.seq, seq));
110
111 return res;
112 }
113
114 /*
115 * Updating the data required to read the clock.
116 *
117 * sched_clock() will never observe mis-matched data even if called from
118 * an NMI. We do this by maintaining an odd/even copy of the data and
119 * steering sched_clock() to one or the other using a sequence counter.
120 * In order to preserve the data cache profile of sched_clock() as much
121 * as possible the system reverts back to the even copy when the update
122 * completes; the odd copy is used *only* during an update.
123 */
124 static void update_clock_read_data(struct clock_read_data *rd)
125 {
126 /* update the backup (odd) copy with the new data */
127 cd.read_data[1] = *rd;
128
129 /* steer readers towards the odd copy */
130 raw_write_seqcount_latch(&cd.seq);
131
132 /* now its safe for us to update the normal (even) copy */
133 cd.read_data[0] = *rd;
134
135 /* switch readers back to the even copy */
136 raw_write_seqcount_latch(&cd.seq);
137 }
138
139 /*
140 * Atomically update the sched_clock() epoch.
141 */
142 static void update_sched_clock(void)
143 {
144 u64 cyc;
145 u64 ns;
146 struct clock_read_data rd;
147
148 rd = cd.read_data[0];
149
150 cyc = cd.actual_read_sched_clock();
151 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
152
153 rd.epoch_ns = ns;
154 rd.epoch_cyc = cyc;
155
156 update_clock_read_data(&rd);
157 }
158
159 static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
160 {
161 update_sched_clock();
162 hrtimer_forward_now(hrt, cd.wrap_kt);
163
164 return HRTIMER_RESTART;
165 }
166
167 void __init
168 sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
169 {
170 u64 res, wrap, new_mask, new_epoch, cyc, ns;
171 u32 new_mult, new_shift;
172 unsigned long r;
173 char r_unit;
174 struct clock_read_data rd;
175
176 if (cd.rate > rate)
177 return;
178
179 WARN_ON(!irqs_disabled());
180
181 /* Calculate the mult/shift to convert counter ticks to ns. */
182 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
183
184 new_mask = CLOCKSOURCE_MASK(bits);
185 cd.rate = rate;
186
187 /* Calculate how many nanosecs until we risk wrapping */
188 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
189 cd.wrap_kt = ns_to_ktime(wrap);
190
191 rd = cd.read_data[0];
192
193 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
194 new_epoch = read();
195 cyc = cd.actual_read_sched_clock();
196 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
197 cd.actual_read_sched_clock = read;
198
199 rd.read_sched_clock = read;
200 rd.sched_clock_mask = new_mask;
201 rd.mult = new_mult;
202 rd.shift = new_shift;
203 rd.epoch_cyc = new_epoch;
204 rd.epoch_ns = ns;
205
206 update_clock_read_data(&rd);
207
208 r = rate;
209 if (r >= 4000000) {
210 r /= 1000000;
211 r_unit = 'M';
212 } else {
213 if (r >= 1000) {
214 r /= 1000;
215 r_unit = 'k';
216 } else {
217 r_unit = ' ';
218 }
219 }
220
221 /* Calculate the ns resolution of this counter */
222 res = cyc_to_ns(1ULL, new_mult, new_shift);
223
224 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
225 bits, r, r_unit, res, wrap);
226
227 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
228 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
229 enable_sched_clock_irqtime();
230
231 pr_debug("Registered %pF as sched_clock source\n", read);
232 }
233
234 void __init sched_clock_postinit(void)
235 {
236 /*
237 * If no sched_clock() function has been provided at that point,
238 * make it the final one one.
239 */
240 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
241 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
242
243 update_sched_clock();
244
245 /*
246 * Start the timer to keep sched_clock() properly updated and
247 * sets the initial epoch.
248 */
249 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
250 sched_clock_timer.function = sched_clock_poll;
251 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
252 }
253
254 /*
255 * Clock read function for use when the clock is suspended.
256 *
257 * This function makes it appear to sched_clock() as if the clock
258 * stopped counting at its last update.
259 *
260 * This function must only be called from the critical
261 * section in sched_clock(). It relies on the read_seqcount_retry()
262 * at the end of the critical section to be sure we observe the
263 * correct copy of 'epoch_cyc'.
264 */
265 static u64 notrace suspended_sched_clock_read(void)
266 {
267 unsigned long seq = raw_read_seqcount(&cd.seq);
268
269 return cd.read_data[seq & 1].epoch_cyc;
270 }
271
272 static int sched_clock_suspend(void)
273 {
274 struct clock_read_data *rd = &cd.read_data[0];
275
276 update_sched_clock();
277 hrtimer_cancel(&sched_clock_timer);
278 rd->read_sched_clock = suspended_sched_clock_read;
279
280 return 0;
281 }
282
283 static void sched_clock_resume(void)
284 {
285 struct clock_read_data *rd = &cd.read_data[0];
286
287 rd->epoch_cyc = cd.actual_read_sched_clock();
288 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
289 rd->read_sched_clock = cd.actual_read_sched_clock;
290 }
291
292 static struct syscore_ops sched_clock_ops = {
293 .suspend = sched_clock_suspend,
294 .resume = sched_clock_resume,
295 };
296
297 static int __init sched_clock_syscore_init(void)
298 {
299 register_syscore_ops(&sched_clock_ops);
300
301 return 0;
302 }
303 device_initcall(sched_clock_syscore_init);