]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * | |
3 | * Common time routines among all ppc machines. | |
4 | * | |
5 | * Written by Cort Dougan (cort@cs.nmt.edu) to merge | |
6 | * Paul Mackerras' version and mine for PReP and Pmac. | |
7 | * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net). | |
8 | * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com) | |
9 | * | |
10 | * First round of bugfixes by Gabriel Paubert (paubert@iram.es) | |
11 | * to make clock more stable (2.4.0-test5). The only thing | |
12 | * that this code assumes is that the timebases have been synchronized | |
13 | * by firmware on SMP and are never stopped (never do sleep | |
14 | * on SMP then, nap and doze are OK). | |
15 | * | |
16 | * Speeded up do_gettimeofday by getting rid of references to | |
17 | * xtime (which required locks for consistency). (mikejc@us.ibm.com) | |
18 | * | |
19 | * TODO (not necessarily in this file): | |
20 | * - improve precision and reproducibility of timebase frequency | |
21 | * measurement at boot time. (for iSeries, we calibrate the timebase | |
22 | * against the Titan chip's clock.) | |
23 | * - for astronomical applications: add a new function to get | |
24 | * non ambiguous timestamps even around leap seconds. This needs | |
25 | * a new timestamp format and a good name. | |
26 | * | |
27 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
28 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
29 | * | |
30 | * This program is free software; you can redistribute it and/or | |
31 | * modify it under the terms of the GNU General Public License | |
32 | * as published by the Free Software Foundation; either version | |
33 | * 2 of the License, or (at your option) any later version. | |
34 | */ | |
35 | ||
36 | #include <linux/config.h> | |
37 | #include <linux/errno.h> | |
38 | #include <linux/module.h> | |
39 | #include <linux/sched.h> | |
40 | #include <linux/kernel.h> | |
41 | #include <linux/param.h> | |
42 | #include <linux/string.h> | |
43 | #include <linux/mm.h> | |
44 | #include <linux/interrupt.h> | |
45 | #include <linux/timex.h> | |
46 | #include <linux/kernel_stat.h> | |
47 | #include <linux/mc146818rtc.h> | |
48 | #include <linux/time.h> | |
49 | #include <linux/init.h> | |
50 | #include <linux/profile.h> | |
51 | #include <linux/cpu.h> | |
52 | #include <linux/security.h> | |
53 | ||
54 | #include <asm/segment.h> | |
55 | #include <asm/io.h> | |
56 | #include <asm/processor.h> | |
57 | #include <asm/nvram.h> | |
58 | #include <asm/cache.h> | |
59 | #include <asm/machdep.h> | |
60 | #ifdef CONFIG_PPC_ISERIES | |
61 | #include <asm/iSeries/ItLpQueue.h> | |
62 | #include <asm/iSeries/HvCallXm.h> | |
63 | #endif | |
64 | #include <asm/uaccess.h> | |
65 | #include <asm/time.h> | |
66 | #include <asm/ppcdebug.h> | |
67 | #include <asm/prom.h> | |
68 | #include <asm/sections.h> | |
69 | #include <asm/systemcfg.h> | |
70 | ||
71 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | |
72 | ||
73 | EXPORT_SYMBOL(jiffies_64); | |
74 | ||
75 | /* keep track of when we need to update the rtc */ | |
76 | time_t last_rtc_update; | |
77 | extern int piranha_simulator; | |
78 | #ifdef CONFIG_PPC_ISERIES | |
79 | unsigned long iSeries_recal_titan = 0; | |
80 | unsigned long iSeries_recal_tb = 0; | |
81 | static unsigned long first_settimeofday = 1; | |
82 | #endif | |
83 | ||
84 | #define XSEC_PER_SEC (1024*1024) | |
85 | ||
86 | unsigned long tb_ticks_per_jiffy; | |
87 | unsigned long tb_ticks_per_usec = 100; /* sane default */ | |
88 | EXPORT_SYMBOL(tb_ticks_per_usec); | |
89 | unsigned long tb_ticks_per_sec; | |
90 | unsigned long tb_to_xs; | |
91 | unsigned tb_to_us; | |
92 | unsigned long processor_freq; | |
93 | DEFINE_SPINLOCK(rtc_lock); | |
94 | ||
95 | unsigned long tb_to_ns_scale; | |
96 | unsigned long tb_to_ns_shift; | |
97 | ||
98 | struct gettimeofday_struct do_gtod; | |
99 | ||
100 | extern unsigned long wall_jiffies; | |
101 | extern unsigned long lpevent_count; | |
102 | extern int smp_tb_synchronized; | |
103 | ||
104 | extern struct timezone sys_tz; | |
105 | ||
106 | void ppc_adjtimex(void); | |
107 | ||
108 | static unsigned adjusting_time = 0; | |
109 | ||
110 | static __inline__ void timer_check_rtc(void) | |
111 | { | |
112 | /* | |
113 | * update the rtc when needed, this should be performed on the | |
114 | * right fraction of a second. Half or full second ? | |
115 | * Full second works on mk48t59 clocks, others need testing. | |
116 | * Note that this update is basically only used through | |
117 | * the adjtimex system calls. Setting the HW clock in | |
118 | * any other way is a /dev/rtc and userland business. | |
119 | * This is still wrong by -0.5/+1.5 jiffies because of the | |
120 | * timer interrupt resolution and possible delay, but here we | |
121 | * hit a quantization limit which can only be solved by higher | |
122 | * resolution timers and decoupling time management from timer | |
123 | * interrupts. This is also wrong on the clocks | |
124 | * which require being written at the half second boundary. | |
125 | * We should have an rtc call that only sets the minutes and | |
126 | * seconds like on Intel to avoid problems with non UTC clocks. | |
127 | */ | |
128 | if ( (time_status & STA_UNSYNC) == 0 && | |
129 | xtime.tv_sec - last_rtc_update >= 659 && | |
130 | abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && | |
131 | jiffies - wall_jiffies == 1) { | |
132 | struct rtc_time tm; | |
133 | to_tm(xtime.tv_sec+1, &tm); | |
134 | tm.tm_year -= 1900; | |
135 | tm.tm_mon -= 1; | |
136 | if (ppc_md.set_rtc_time(&tm) == 0) | |
137 | last_rtc_update = xtime.tv_sec+1; | |
138 | else | |
139 | /* Try again one minute later */ | |
140 | last_rtc_update += 60; | |
141 | } | |
142 | } | |
143 | ||
144 | /* | |
145 | * This version of gettimeofday has microsecond resolution. | |
146 | */ | |
147 | static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val) | |
148 | { | |
149 | unsigned long sec, usec, tb_ticks; | |
150 | unsigned long xsec, tb_xsec; | |
151 | struct gettimeofday_vars * temp_varp; | |
152 | unsigned long temp_tb_to_xs, temp_stamp_xsec; | |
153 | ||
154 | /* | |
155 | * These calculations are faster (gets rid of divides) | |
156 | * if done in units of 1/2^20 rather than microseconds. | |
157 | * The conversion to microseconds at the end is done | |
158 | * without a divide (and in fact, without a multiply) | |
159 | */ | |
160 | temp_varp = do_gtod.varp; | |
161 | tb_ticks = tb_val - temp_varp->tb_orig_stamp; | |
162 | temp_tb_to_xs = temp_varp->tb_to_xs; | |
163 | temp_stamp_xsec = temp_varp->stamp_xsec; | |
164 | tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs ); | |
165 | xsec = temp_stamp_xsec + tb_xsec; | |
166 | sec = xsec / XSEC_PER_SEC; | |
167 | xsec -= sec * XSEC_PER_SEC; | |
168 | usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC; | |
169 | ||
170 | tv->tv_sec = sec; | |
171 | tv->tv_usec = usec; | |
172 | } | |
173 | ||
174 | void do_gettimeofday(struct timeval *tv) | |
175 | { | |
176 | __do_gettimeofday(tv, get_tb()); | |
177 | } | |
178 | ||
179 | EXPORT_SYMBOL(do_gettimeofday); | |
180 | ||
181 | /* Synchronize xtime with do_gettimeofday */ | |
182 | ||
183 | static inline void timer_sync_xtime(unsigned long cur_tb) | |
184 | { | |
185 | struct timeval my_tv; | |
186 | ||
187 | __do_gettimeofday(&my_tv, cur_tb); | |
188 | ||
189 | if (xtime.tv_sec <= my_tv.tv_sec) { | |
190 | xtime.tv_sec = my_tv.tv_sec; | |
191 | xtime.tv_nsec = my_tv.tv_usec * 1000; | |
192 | } | |
193 | } | |
194 | ||
195 | /* | |
196 | * When the timebase - tb_orig_stamp gets too big, we do a manipulation | |
197 | * between tb_orig_stamp and stamp_xsec. The goal here is to keep the | |
198 | * difference tb - tb_orig_stamp small enough to always fit inside a | |
199 | * 32 bits number. This is a requirement of our fast 32 bits userland | |
200 | * implementation in the vdso. If we "miss" a call to this function | |
201 | * (interrupt latency, CPU locked in a spinlock, ...) and we end up | |
202 | * with a too big difference, then the vdso will fallback to calling | |
203 | * the syscall | |
204 | */ | |
205 | static __inline__ void timer_recalc_offset(unsigned long cur_tb) | |
206 | { | |
207 | struct gettimeofday_vars * temp_varp; | |
208 | unsigned temp_idx; | |
209 | unsigned long offset, new_stamp_xsec, new_tb_orig_stamp; | |
210 | ||
211 | if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0) | |
212 | return; | |
213 | ||
214 | temp_idx = (do_gtod.var_idx == 0); | |
215 | temp_varp = &do_gtod.vars[temp_idx]; | |
216 | ||
217 | new_tb_orig_stamp = cur_tb; | |
218 | offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp; | |
219 | new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs); | |
220 | ||
221 | temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs; | |
222 | temp_varp->tb_orig_stamp = new_tb_orig_stamp; | |
223 | temp_varp->stamp_xsec = new_stamp_xsec; | |
0d8d4d42 | 224 | smp_mb(); |
1da177e4 LT |
225 | do_gtod.varp = temp_varp; |
226 | do_gtod.var_idx = temp_idx; | |
227 | ||
228 | ++(systemcfg->tb_update_count); | |
0d8d4d42 | 229 | smp_wmb(); |
1da177e4 LT |
230 | systemcfg->tb_orig_stamp = new_tb_orig_stamp; |
231 | systemcfg->stamp_xsec = new_stamp_xsec; | |
0d8d4d42 | 232 | smp_wmb(); |
1da177e4 LT |
233 | ++(systemcfg->tb_update_count); |
234 | } | |
235 | ||
236 | #ifdef CONFIG_SMP | |
237 | unsigned long profile_pc(struct pt_regs *regs) | |
238 | { | |
239 | unsigned long pc = instruction_pointer(regs); | |
240 | ||
241 | if (in_lock_functions(pc)) | |
242 | return regs->link; | |
243 | ||
244 | return pc; | |
245 | } | |
246 | EXPORT_SYMBOL(profile_pc); | |
247 | #endif | |
248 | ||
249 | #ifdef CONFIG_PPC_ISERIES | |
250 | ||
251 | /* | |
252 | * This function recalibrates the timebase based on the 49-bit time-of-day | |
253 | * value in the Titan chip. The Titan is much more accurate than the value | |
254 | * returned by the service processor for the timebase frequency. | |
255 | */ | |
256 | ||
257 | static void iSeries_tb_recal(void) | |
258 | { | |
259 | struct div_result divres; | |
260 | unsigned long titan, tb; | |
261 | tb = get_tb(); | |
262 | titan = HvCallXm_loadTod(); | |
263 | if ( iSeries_recal_titan ) { | |
264 | unsigned long tb_ticks = tb - iSeries_recal_tb; | |
265 | unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; | |
266 | unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; | |
267 | unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; | |
268 | long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; | |
269 | char sign = '+'; | |
270 | /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ | |
271 | new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; | |
272 | ||
273 | if ( tick_diff < 0 ) { | |
274 | tick_diff = -tick_diff; | |
275 | sign = '-'; | |
276 | } | |
277 | if ( tick_diff ) { | |
278 | if ( tick_diff < tb_ticks_per_jiffy/25 ) { | |
279 | printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", | |
280 | new_tb_ticks_per_jiffy, sign, tick_diff ); | |
281 | tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; | |
282 | tb_ticks_per_sec = new_tb_ticks_per_sec; | |
283 | div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); | |
284 | do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; | |
285 | tb_to_xs = divres.result_low; | |
286 | do_gtod.varp->tb_to_xs = tb_to_xs; | |
287 | systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; | |
288 | systemcfg->tb_to_xs = tb_to_xs; | |
289 | } | |
290 | else { | |
291 | printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" | |
292 | " new tb_ticks_per_jiffy = %lu\n" | |
293 | " old tb_ticks_per_jiffy = %lu\n", | |
294 | new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); | |
295 | } | |
296 | } | |
297 | } | |
298 | iSeries_recal_titan = titan; | |
299 | iSeries_recal_tb = tb; | |
300 | } | |
301 | #endif | |
302 | ||
303 | /* | |
304 | * For iSeries shared processors, we have to let the hypervisor | |
305 | * set the hardware decrementer. We set a virtual decrementer | |
306 | * in the lppaca and call the hypervisor if the virtual | |
307 | * decrementer is less than the current value in the hardware | |
308 | * decrementer. (almost always the new decrementer value will | |
309 | * be greater than the current hardware decementer so the hypervisor | |
310 | * call will not be needed) | |
311 | */ | |
312 | ||
313 | unsigned long tb_last_stamp __cacheline_aligned_in_smp; | |
314 | ||
315 | /* | |
316 | * timer_interrupt - gets called when the decrementer overflows, | |
317 | * with interrupts disabled. | |
318 | */ | |
319 | int timer_interrupt(struct pt_regs * regs) | |
320 | { | |
321 | int next_dec; | |
322 | unsigned long cur_tb; | |
323 | struct paca_struct *lpaca = get_paca(); | |
324 | unsigned long cpu = smp_processor_id(); | |
325 | ||
326 | irq_enter(); | |
327 | ||
1da177e4 | 328 | profile_tick(CPU_PROFILING, regs); |
1da177e4 LT |
329 | |
330 | lpaca->lppaca.int_dword.fields.decr_int = 0; | |
331 | ||
332 | while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) { | |
333 | /* | |
334 | * We cannot disable the decrementer, so in the period | |
335 | * between this cpu's being marked offline in cpu_online_map | |
336 | * and calling stop-self, it is taking timer interrupts. | |
337 | * Avoid calling into the scheduler rebalancing code if this | |
338 | * is the case. | |
339 | */ | |
340 | if (!cpu_is_offline(cpu)) | |
341 | update_process_times(user_mode(regs)); | |
342 | /* | |
343 | * No need to check whether cpu is offline here; boot_cpuid | |
344 | * should have been fixed up by now. | |
345 | */ | |
346 | if (cpu == boot_cpuid) { | |
347 | write_seqlock(&xtime_lock); | |
348 | tb_last_stamp = lpaca->next_jiffy_update_tb; | |
349 | timer_recalc_offset(lpaca->next_jiffy_update_tb); | |
350 | do_timer(regs); | |
351 | timer_sync_xtime(lpaca->next_jiffy_update_tb); | |
352 | timer_check_rtc(); | |
353 | write_sequnlock(&xtime_lock); | |
354 | if ( adjusting_time && (time_adjust == 0) ) | |
355 | ppc_adjtimex(); | |
356 | } | |
357 | lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy; | |
358 | } | |
359 | ||
360 | next_dec = lpaca->next_jiffy_update_tb - cur_tb; | |
361 | if (next_dec > lpaca->default_decr) | |
362 | next_dec = lpaca->default_decr; | |
363 | set_dec(next_dec); | |
364 | ||
365 | #ifdef CONFIG_PPC_ISERIES | |
366 | { | |
367 | struct ItLpQueue *lpq = lpaca->lpqueue_ptr; | |
368 | if (lpq && ItLpQueue_isLpIntPending(lpq)) | |
369 | lpevent_count += ItLpQueue_process(lpq, regs); | |
370 | } | |
371 | #endif | |
372 | ||
373 | /* collect purr register values often, for accurate calculations */ | |
374 | #if defined(CONFIG_PPC_PSERIES) | |
375 | if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { | |
376 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | |
377 | cu->current_tb = mfspr(SPRN_PURR); | |
378 | } | |
379 | #endif | |
380 | ||
381 | irq_exit(); | |
382 | ||
383 | return 1; | |
384 | } | |
385 | ||
386 | /* | |
387 | * Scheduler clock - returns current time in nanosec units. | |
388 | * | |
389 | * Note: mulhdu(a, b) (multiply high double unsigned) returns | |
390 | * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b | |
391 | * are 64-bit unsigned numbers. | |
392 | */ | |
393 | unsigned long long sched_clock(void) | |
394 | { | |
395 | return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; | |
396 | } | |
397 | ||
398 | int do_settimeofday(struct timespec *tv) | |
399 | { | |
400 | time_t wtm_sec, new_sec = tv->tv_sec; | |
401 | long wtm_nsec, new_nsec = tv->tv_nsec; | |
402 | unsigned long flags; | |
403 | unsigned long delta_xsec; | |
404 | long int tb_delta; | |
405 | unsigned long new_xsec; | |
406 | ||
407 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
408 | return -EINVAL; | |
409 | ||
410 | write_seqlock_irqsave(&xtime_lock, flags); | |
411 | /* Updating the RTC is not the job of this code. If the time is | |
412 | * stepped under NTP, the RTC will be update after STA_UNSYNC | |
413 | * is cleared. Tool like clock/hwclock either copy the RTC | |
414 | * to the system time, in which case there is no point in writing | |
415 | * to the RTC again, or write to the RTC but then they don't call | |
416 | * settimeofday to perform this operation. | |
417 | */ | |
418 | #ifdef CONFIG_PPC_ISERIES | |
419 | if ( first_settimeofday ) { | |
420 | iSeries_tb_recal(); | |
421 | first_settimeofday = 0; | |
422 | } | |
423 | #endif | |
424 | tb_delta = tb_ticks_since(tb_last_stamp); | |
425 | tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; | |
426 | ||
427 | new_nsec -= tb_delta / tb_ticks_per_usec / 1000; | |
428 | ||
429 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); | |
430 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); | |
431 | ||
432 | set_normalized_timespec(&xtime, new_sec, new_nsec); | |
433 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
434 | ||
435 | /* In case of a large backwards jump in time with NTP, we want the | |
436 | * clock to be updated as soon as the PLL is again in lock. | |
437 | */ | |
438 | last_rtc_update = new_sec - 658; | |
439 | ||
440 | time_adjust = 0; /* stop active adjtime() */ | |
441 | time_status |= STA_UNSYNC; | |
442 | time_maxerror = NTP_PHASE_LIMIT; | |
443 | time_esterror = NTP_PHASE_LIMIT; | |
444 | ||
445 | delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), | |
446 | do_gtod.varp->tb_to_xs ); | |
447 | ||
448 | new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC; | |
449 | new_xsec += new_sec * XSEC_PER_SEC; | |
450 | if ( new_xsec > delta_xsec ) { | |
451 | do_gtod.varp->stamp_xsec = new_xsec - delta_xsec; | |
452 | systemcfg->stamp_xsec = new_xsec - delta_xsec; | |
453 | } | |
454 | else { | |
455 | /* This is only for the case where the user is setting the time | |
456 | * way back to a time such that the boot time would have been | |
457 | * before 1970 ... eg. we booted ten days ago, and we are setting | |
458 | * the time to Jan 5, 1970 */ | |
459 | do_gtod.varp->stamp_xsec = new_xsec; | |
460 | do_gtod.varp->tb_orig_stamp = tb_last_stamp; | |
461 | systemcfg->stamp_xsec = new_xsec; | |
462 | systemcfg->tb_orig_stamp = tb_last_stamp; | |
463 | } | |
464 | ||
465 | systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; | |
466 | systemcfg->tz_dsttime = sys_tz.tz_dsttime; | |
467 | ||
468 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
469 | clock_was_set(); | |
470 | return 0; | |
471 | } | |
472 | ||
473 | EXPORT_SYMBOL(do_settimeofday); | |
474 | ||
475 | void __init time_init(void) | |
476 | { | |
477 | /* This function is only called on the boot processor */ | |
478 | unsigned long flags; | |
479 | struct rtc_time tm; | |
480 | struct div_result res; | |
481 | unsigned long scale, shift; | |
482 | ||
483 | ppc_md.calibrate_decr(); | |
484 | ||
485 | /* | |
486 | * Compute scale factor for sched_clock. | |
487 | * The calibrate_decr() function has set tb_ticks_per_sec, | |
488 | * which is the timebase frequency. | |
489 | * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret | |
490 | * the 128-bit result as a 64.64 fixed-point number. | |
491 | * We then shift that number right until it is less than 1.0, | |
492 | * giving us the scale factor and shift count to use in | |
493 | * sched_clock(). | |
494 | */ | |
495 | div128_by_32(1000000000, 0, tb_ticks_per_sec, &res); | |
496 | scale = res.result_low; | |
497 | for (shift = 0; res.result_high != 0; ++shift) { | |
498 | scale = (scale >> 1) | (res.result_high << 63); | |
499 | res.result_high >>= 1; | |
500 | } | |
501 | tb_to_ns_scale = scale; | |
502 | tb_to_ns_shift = shift; | |
503 | ||
504 | #ifdef CONFIG_PPC_ISERIES | |
505 | if (!piranha_simulator) | |
506 | #endif | |
507 | ppc_md.get_boot_time(&tm); | |
508 | ||
509 | write_seqlock_irqsave(&xtime_lock, flags); | |
510 | xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, | |
511 | tm.tm_hour, tm.tm_min, tm.tm_sec); | |
512 | tb_last_stamp = get_tb(); | |
513 | do_gtod.varp = &do_gtod.vars[0]; | |
514 | do_gtod.var_idx = 0; | |
515 | do_gtod.varp->tb_orig_stamp = tb_last_stamp; | |
8f80e5c9 | 516 | get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy; |
1da177e4 LT |
517 | do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; |
518 | do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; | |
519 | do_gtod.varp->tb_to_xs = tb_to_xs; | |
520 | do_gtod.tb_to_us = tb_to_us; | |
521 | systemcfg->tb_orig_stamp = tb_last_stamp; | |
522 | systemcfg->tb_update_count = 0; | |
523 | systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; | |
524 | systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; | |
525 | systemcfg->tb_to_xs = tb_to_xs; | |
526 | ||
527 | time_freq = 0; | |
528 | ||
529 | xtime.tv_nsec = 0; | |
530 | last_rtc_update = xtime.tv_sec; | |
531 | set_normalized_timespec(&wall_to_monotonic, | |
532 | -xtime.tv_sec, -xtime.tv_nsec); | |
533 | write_sequnlock_irqrestore(&xtime_lock, flags); | |
534 | ||
535 | /* Not exact, but the timer interrupt takes care of this */ | |
536 | set_dec(tb_ticks_per_jiffy); | |
537 | } | |
538 | ||
539 | /* | |
540 | * After adjtimex is called, adjust the conversion of tb ticks | |
541 | * to microseconds to keep do_gettimeofday synchronized | |
542 | * with ntpd. | |
543 | * | |
544 | * Use the time_adjust, time_freq and time_offset computed by adjtimex to | |
545 | * adjust the frequency. | |
546 | */ | |
547 | ||
548 | /* #define DEBUG_PPC_ADJTIMEX 1 */ | |
549 | ||
550 | void ppc_adjtimex(void) | |
551 | { | |
552 | unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec; | |
553 | unsigned long tb_ticks_per_sec_delta; | |
554 | long delta_freq, ltemp; | |
555 | struct div_result divres; | |
556 | unsigned long flags; | |
557 | struct gettimeofday_vars * temp_varp; | |
558 | unsigned temp_idx; | |
559 | long singleshot_ppm = 0; | |
560 | ||
561 | /* Compute parts per million frequency adjustment to accomplish the time adjustment | |
562 | implied by time_offset to be applied over the elapsed time indicated by time_constant. | |
563 | Use SHIFT_USEC to get it into the same units as time_freq. */ | |
564 | if ( time_offset < 0 ) { | |
565 | ltemp = -time_offset; | |
566 | ltemp <<= SHIFT_USEC - SHIFT_UPDATE; | |
567 | ltemp >>= SHIFT_KG + time_constant; | |
568 | ltemp = -ltemp; | |
569 | } | |
570 | else { | |
571 | ltemp = time_offset; | |
572 | ltemp <<= SHIFT_USEC - SHIFT_UPDATE; | |
573 | ltemp >>= SHIFT_KG + time_constant; | |
574 | } | |
575 | ||
576 | /* If there is a single shot time adjustment in progress */ | |
577 | if ( time_adjust ) { | |
578 | #ifdef DEBUG_PPC_ADJTIMEX | |
579 | printk("ppc_adjtimex: "); | |
580 | if ( adjusting_time == 0 ) | |
581 | printk("starting "); | |
582 | printk("single shot time_adjust = %ld\n", time_adjust); | |
583 | #endif | |
584 | ||
585 | adjusting_time = 1; | |
586 | ||
587 | /* Compute parts per million frequency adjustment to match time_adjust */ | |
588 | singleshot_ppm = tickadj * HZ; | |
589 | /* | |
590 | * The adjustment should be tickadj*HZ to match the code in | |
591 | * linux/kernel/timer.c, but experiments show that this is too | |
592 | * large. 3/4 of tickadj*HZ seems about right | |
593 | */ | |
594 | singleshot_ppm -= singleshot_ppm / 4; | |
595 | /* Use SHIFT_USEC to get it into the same units as time_freq */ | |
596 | singleshot_ppm <<= SHIFT_USEC; | |
597 | if ( time_adjust < 0 ) | |
598 | singleshot_ppm = -singleshot_ppm; | |
599 | } | |
600 | else { | |
601 | #ifdef DEBUG_PPC_ADJTIMEX | |
602 | if ( adjusting_time ) | |
603 | printk("ppc_adjtimex: ending single shot time_adjust\n"); | |
604 | #endif | |
605 | adjusting_time = 0; | |
606 | } | |
607 | ||
608 | /* Add up all of the frequency adjustments */ | |
609 | delta_freq = time_freq + ltemp + singleshot_ppm; | |
610 | ||
611 | /* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */ | |
612 | den = 1000000 * (1 << (SHIFT_USEC - 8)); | |
613 | if ( delta_freq < 0 ) { | |
614 | tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; | |
615 | new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; | |
616 | } | |
617 | else { | |
618 | tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; | |
619 | new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; | |
620 | } | |
621 | ||
622 | #ifdef DEBUG_PPC_ADJTIMEX | |
623 | printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); | |
624 | printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); | |
625 | #endif | |
626 | ||
627 | /* Compute a new value of tb_to_xs (used to convert tb to microseconds and a new value of | |
628 | stamp_xsec which is the time (in 1/2^20 second units) corresponding to tb_orig_stamp. This | |
629 | new value of stamp_xsec compensates for the change in frequency (implied by the new tb_to_xs) | |
630 | which guarantees that the current time remains the same */ | |
631 | write_seqlock_irqsave( &xtime_lock, flags ); | |
632 | tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; | |
633 | div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres ); | |
634 | new_tb_to_xs = divres.result_low; | |
635 | new_xsec = mulhdu( tb_ticks, new_tb_to_xs ); | |
636 | ||
637 | old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs ); | |
638 | new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; | |
639 | ||
640 | /* There are two copies of tb_to_xs and stamp_xsec so that no lock is needed to access and use these | |
641 | values in do_gettimeofday. We alternate the copies and as long as a reasonable time elapses between | |
642 | changes, there will never be inconsistent values. ntpd has a minimum of one minute between updates */ | |
643 | ||
644 | temp_idx = (do_gtod.var_idx == 0); | |
645 | temp_varp = &do_gtod.vars[temp_idx]; | |
646 | ||
647 | temp_varp->tb_to_xs = new_tb_to_xs; | |
648 | temp_varp->stamp_xsec = new_stamp_xsec; | |
649 | temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp; | |
0d8d4d42 | 650 | smp_mb(); |
1da177e4 LT |
651 | do_gtod.varp = temp_varp; |
652 | do_gtod.var_idx = temp_idx; | |
653 | ||
654 | /* | |
655 | * tb_update_count is used to allow the problem state gettimeofday code | |
656 | * to assure itself that it sees a consistent view of the tb_to_xs and | |
657 | * stamp_xsec variables. It reads the tb_update_count, then reads | |
658 | * tb_to_xs and stamp_xsec and then reads tb_update_count again. If | |
659 | * the two values of tb_update_count match and are even then the | |
660 | * tb_to_xs and stamp_xsec values are consistent. If not, then it | |
661 | * loops back and reads them again until this criteria is met. | |
662 | */ | |
663 | ++(systemcfg->tb_update_count); | |
0d8d4d42 | 664 | smp_wmb(); |
1da177e4 LT |
665 | systemcfg->tb_to_xs = new_tb_to_xs; |
666 | systemcfg->stamp_xsec = new_stamp_xsec; | |
0d8d4d42 | 667 | smp_wmb(); |
1da177e4 LT |
668 | ++(systemcfg->tb_update_count); |
669 | ||
670 | write_sequnlock_irqrestore( &xtime_lock, flags ); | |
671 | ||
672 | } | |
673 | ||
674 | ||
675 | #define TICK_SIZE tick | |
676 | #define FEBRUARY 2 | |
677 | #define STARTOFTIME 1970 | |
678 | #define SECDAY 86400L | |
679 | #define SECYR (SECDAY * 365) | |
680 | #define leapyear(year) ((year) % 4 == 0) | |
681 | #define days_in_year(a) (leapyear(a) ? 366 : 365) | |
682 | #define days_in_month(a) (month_days[(a) - 1]) | |
683 | ||
684 | static int month_days[12] = { | |
685 | 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 | |
686 | }; | |
687 | ||
688 | /* | |
689 | * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) | |
690 | */ | |
691 | void GregorianDay(struct rtc_time * tm) | |
692 | { | |
693 | int leapsToDate; | |
694 | int lastYear; | |
695 | int day; | |
696 | int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; | |
697 | ||
698 | lastYear=tm->tm_year-1; | |
699 | ||
700 | /* | |
701 | * Number of leap corrections to apply up to end of last year | |
702 | */ | |
703 | leapsToDate = lastYear/4 - lastYear/100 + lastYear/400; | |
704 | ||
705 | /* | |
706 | * This year is a leap year if it is divisible by 4 except when it is | |
707 | * divisible by 100 unless it is divisible by 400 | |
708 | * | |
709 | * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be | |
710 | */ | |
711 | if((tm->tm_year%4==0) && | |
712 | ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) && | |
713 | (tm->tm_mon>2)) | |
714 | { | |
715 | /* | |
716 | * We are past Feb. 29 in a leap year | |
717 | */ | |
718 | day=1; | |
719 | } | |
720 | else | |
721 | { | |
722 | day=0; | |
723 | } | |
724 | ||
725 | day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + | |
726 | tm->tm_mday; | |
727 | ||
728 | tm->tm_wday=day%7; | |
729 | } | |
730 | ||
731 | void to_tm(int tim, struct rtc_time * tm) | |
732 | { | |
733 | register int i; | |
734 | register long hms, day; | |
735 | ||
736 | day = tim / SECDAY; | |
737 | hms = tim % SECDAY; | |
738 | ||
739 | /* Hours, minutes, seconds are easy */ | |
740 | tm->tm_hour = hms / 3600; | |
741 | tm->tm_min = (hms % 3600) / 60; | |
742 | tm->tm_sec = (hms % 3600) % 60; | |
743 | ||
744 | /* Number of years in days */ | |
745 | for (i = STARTOFTIME; day >= days_in_year(i); i++) | |
746 | day -= days_in_year(i); | |
747 | tm->tm_year = i; | |
748 | ||
749 | /* Number of months in days left */ | |
750 | if (leapyear(tm->tm_year)) | |
751 | days_in_month(FEBRUARY) = 29; | |
752 | for (i = 1; day >= days_in_month(i); i++) | |
753 | day -= days_in_month(i); | |
754 | days_in_month(FEBRUARY) = 28; | |
755 | tm->tm_mon = i; | |
756 | ||
757 | /* Days are what is left over (+1) from all that. */ | |
758 | tm->tm_mday = day + 1; | |
759 | ||
760 | /* | |
761 | * Determine the day of week | |
762 | */ | |
763 | GregorianDay(tm); | |
764 | } | |
765 | ||
766 | /* Auxiliary function to compute scaling factors */ | |
767 | /* Actually the choice of a timebase running at 1/4 the of the bus | |
768 | * frequency giving resolution of a few tens of nanoseconds is quite nice. | |
769 | * It makes this computation very precise (27-28 bits typically) which | |
770 | * is optimistic considering the stability of most processor clock | |
771 | * oscillators and the precision with which the timebase frequency | |
772 | * is measured but does not harm. | |
773 | */ | |
774 | unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { | |
775 | unsigned mlt=0, tmp, err; | |
776 | /* No concern for performance, it's done once: use a stupid | |
777 | * but safe and compact method to find the multiplier. | |
778 | */ | |
779 | ||
780 | for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { | |
781 | if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp; | |
782 | } | |
783 | ||
784 | /* We might still be off by 1 for the best approximation. | |
785 | * A side effect of this is that if outscale is too large | |
786 | * the returned value will be zero. | |
787 | * Many corner cases have been checked and seem to work, | |
788 | * some might have been forgotten in the test however. | |
789 | */ | |
790 | ||
791 | err = inscale*(mlt+1); | |
792 | if (err <= inscale/2) mlt++; | |
793 | return mlt; | |
794 | } | |
795 | ||
796 | /* | |
797 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit | |
798 | * result. | |
799 | */ | |
800 | ||
801 | void div128_by_32( unsigned long dividend_high, unsigned long dividend_low, | |
802 | unsigned divisor, struct div_result *dr ) | |
803 | { | |
804 | unsigned long a,b,c,d, w,x,y,z, ra,rb,rc; | |
805 | ||
806 | a = dividend_high >> 32; | |
807 | b = dividend_high & 0xffffffff; | |
808 | c = dividend_low >> 32; | |
809 | d = dividend_low & 0xffffffff; | |
810 | ||
811 | w = a/divisor; | |
812 | ra = (a - (w * divisor)) << 32; | |
813 | ||
814 | x = (ra + b)/divisor; | |
815 | rb = ((ra + b) - (x * divisor)) << 32; | |
816 | ||
817 | y = (rb + c)/divisor; | |
818 | rc = ((rb + b) - (y * divisor)) << 32; | |
819 | ||
820 | z = (rc + d)/divisor; | |
821 | ||
822 | dr->result_high = (w << 32) + x; | |
823 | dr->result_low = (y << 32) + z; | |
824 | ||
825 | } | |
826 |