2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
65 #include <asm/div64.h>
67 #include <asm/vdso_datapage.h>
69 #include <asm/firmware.h>
71 #ifdef CONFIG_PPC_ISERIES
72 #include <asm/iseries/it_lp_queue.h>
73 #include <asm/iseries/hv_call_xm.h>
77 /* keep track of when we need to update the rtc */
78 time_t last_rtc_update
;
79 #ifdef CONFIG_PPC_ISERIES
80 static unsigned long __initdata iSeries_recal_titan
;
81 static signed long __initdata iSeries_recal_tb
;
84 /* The decrementer counts down by 128 every 128ns on a 601. */
85 #define DECREMENTER_COUNT_601 (1000000000 / HZ)
87 #define XSEC_PER_SEC (1024*1024)
90 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
92 /* compute ((xsec << 12) * max) >> 32 */
93 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
96 unsigned long tb_ticks_per_jiffy
;
97 unsigned long tb_ticks_per_usec
= 100; /* sane default */
98 EXPORT_SYMBOL(tb_ticks_per_usec
);
99 unsigned long tb_ticks_per_sec
;
100 EXPORT_SYMBOL(tb_ticks_per_sec
); /* for cputime_t conversions */
104 #define TICKLEN_SCALE TICK_LENGTH_SHIFT
105 u64 last_tick_len
; /* units are ns / 2^TICKLEN_SCALE */
106 u64 ticklen_to_xs
; /* 0.64 fraction */
108 /* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
112 DEFINE_SPINLOCK(rtc_lock
);
113 EXPORT_SYMBOL_GPL(rtc_lock
);
115 static u64 tb_to_ns_scale __read_mostly
;
116 static unsigned tb_to_ns_shift __read_mostly
;
117 static unsigned long boot_tb __read_mostly
;
119 struct gettimeofday_struct do_gtod
;
121 extern struct timezone sys_tz
;
122 static long timezone_offset
;
124 unsigned long ppc_proc_freq
;
125 unsigned long ppc_tb_freq
;
127 static u64 tb_last_jiffy __cacheline_aligned_in_smp
;
128 static DEFINE_PER_CPU(u64
, last_jiffy
);
130 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
132 * Factors for converting from cputime_t (timebase ticks) to
133 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
134 * These are all stored as 0.64 fixed-point binary fractions.
136 u64 __cputime_jiffies_factor
;
137 EXPORT_SYMBOL(__cputime_jiffies_factor
);
138 u64 __cputime_msec_factor
;
139 EXPORT_SYMBOL(__cputime_msec_factor
);
140 u64 __cputime_sec_factor
;
141 EXPORT_SYMBOL(__cputime_sec_factor
);
142 u64 __cputime_clockt_factor
;
143 EXPORT_SYMBOL(__cputime_clockt_factor
);
145 static void calc_cputime_factors(void)
147 struct div_result res
;
149 div128_by_32(HZ
, 0, tb_ticks_per_sec
, &res
);
150 __cputime_jiffies_factor
= res
.result_low
;
151 div128_by_32(1000, 0, tb_ticks_per_sec
, &res
);
152 __cputime_msec_factor
= res
.result_low
;
153 div128_by_32(1, 0, tb_ticks_per_sec
, &res
);
154 __cputime_sec_factor
= res
.result_low
;
155 div128_by_32(USER_HZ
, 0, tb_ticks_per_sec
, &res
);
156 __cputime_clockt_factor
= res
.result_low
;
160 * Read the PURR on systems that have it, otherwise the timebase.
162 static u64
read_purr(void)
164 if (cpu_has_feature(CPU_FTR_PURR
))
165 return mfspr(SPRN_PURR
);
170 * Account time for a transition between system, hard irq
173 void account_system_vtime(struct task_struct
*tsk
)
178 local_irq_save(flags
);
180 delta
= now
- get_paca()->startpurr
;
181 get_paca()->startpurr
= now
;
182 if (!in_interrupt()) {
183 delta
+= get_paca()->system_time
;
184 get_paca()->system_time
= 0;
186 account_system_time(tsk
, 0, delta
);
187 local_irq_restore(flags
);
191 * Transfer the user and system times accumulated in the paca
192 * by the exception entry and exit code to the generic process
193 * user and system time records.
194 * Must be called with interrupts disabled.
196 void account_process_vtime(struct task_struct
*tsk
)
200 utime
= get_paca()->user_time
;
201 get_paca()->user_time
= 0;
202 account_user_time(tsk
, utime
);
205 static void account_process_time(struct pt_regs
*regs
)
207 int cpu
= smp_processor_id();
209 account_process_vtime(current
);
211 if (rcu_pending(cpu
))
212 rcu_check_callbacks(cpu
, user_mode(regs
));
214 run_posix_cpu_timers(current
);
218 * Stuff for accounting stolen time.
220 struct cpu_purr_data
{
221 int initialized
; /* thread is running */
222 u64 tb
; /* last TB value read */
223 u64 purr
; /* last PURR value read */
227 * Each entry in the cpu_purr_data array is manipulated only by its
228 * "owner" cpu -- usually in the timer interrupt but also occasionally
229 * in process context for cpu online. As long as cpus do not touch
230 * each others' cpu_purr_data, disabling local interrupts is
231 * sufficient to serialize accesses.
233 static DEFINE_PER_CPU(struct cpu_purr_data
, cpu_purr_data
);
235 static void snapshot_tb_and_purr(void *data
)
238 struct cpu_purr_data
*p
= &__get_cpu_var(cpu_purr_data
);
240 local_irq_save(flags
);
242 p
->purr
= mfspr(SPRN_PURR
);
245 local_irq_restore(flags
);
249 * Called during boot when all cpus have come up.
251 void snapshot_timebases(void)
253 if (!cpu_has_feature(CPU_FTR_PURR
))
255 on_each_cpu(snapshot_tb_and_purr
, NULL
, 0, 1);
259 * Must be called with interrupts disabled.
261 void calculate_steal_time(void)
265 struct cpu_purr_data
*pme
;
267 if (!cpu_has_feature(CPU_FTR_PURR
))
269 pme
= &per_cpu(cpu_purr_data
, smp_processor_id());
270 if (!pme
->initialized
)
271 return; /* this can happen in early boot */
273 purr
= mfspr(SPRN_PURR
);
274 stolen
= (tb
- pme
->tb
) - (purr
- pme
->purr
);
276 account_steal_time(current
, stolen
);
281 #ifdef CONFIG_PPC_SPLPAR
283 * Must be called before the cpu is added to the online map when
284 * a cpu is being brought up at runtime.
286 static void snapshot_purr(void)
288 struct cpu_purr_data
*pme
;
291 if (!cpu_has_feature(CPU_FTR_PURR
))
293 local_irq_save(flags
);
294 pme
= &per_cpu(cpu_purr_data
, smp_processor_id());
296 pme
->purr
= mfspr(SPRN_PURR
);
297 pme
->initialized
= 1;
298 local_irq_restore(flags
);
301 #endif /* CONFIG_PPC_SPLPAR */
303 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
304 #define calc_cputime_factors()
305 #define account_process_time(regs) update_process_times(user_mode(regs))
306 #define calculate_steal_time() do { } while (0)
309 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
310 #define snapshot_purr() do { } while (0)
314 * Called when a cpu comes up after the system has finished booting,
315 * i.e. as a result of a hotplug cpu action.
317 void snapshot_timebase(void)
319 __get_cpu_var(last_jiffy
) = get_tb();
323 void __delay(unsigned long loops
)
331 /* the RTCL register wraps at 1000000000 */
332 diff
= get_rtcl() - start
;
335 } while (diff
< loops
);
338 while (get_tbl() - start
< loops
)
343 EXPORT_SYMBOL(__delay
);
345 void udelay(unsigned long usecs
)
347 __delay(tb_ticks_per_usec
* usecs
);
349 EXPORT_SYMBOL(udelay
);
351 static __inline__
void timer_check_rtc(void)
354 * update the rtc when needed, this should be performed on the
355 * right fraction of a second. Half or full second ?
356 * Full second works on mk48t59 clocks, others need testing.
357 * Note that this update is basically only used through
358 * the adjtimex system calls. Setting the HW clock in
359 * any other way is a /dev/rtc and userland business.
360 * This is still wrong by -0.5/+1.5 jiffies because of the
361 * timer interrupt resolution and possible delay, but here we
362 * hit a quantization limit which can only be solved by higher
363 * resolution timers and decoupling time management from timer
364 * interrupts. This is also wrong on the clocks
365 * which require being written at the half second boundary.
366 * We should have an rtc call that only sets the minutes and
367 * seconds like on Intel to avoid problems with non UTC clocks.
369 if (ppc_md
.set_rtc_time
&& ntp_synced() &&
370 xtime
.tv_sec
- last_rtc_update
>= 659 &&
371 abs((xtime
.tv_nsec
/1000) - (1000000-1000000/HZ
)) < 500000/HZ
) {
373 to_tm(xtime
.tv_sec
+ 1 + timezone_offset
, &tm
);
376 if (ppc_md
.set_rtc_time(&tm
) == 0)
377 last_rtc_update
= xtime
.tv_sec
+ 1;
379 /* Try again one minute later */
380 last_rtc_update
+= 60;
385 * This version of gettimeofday has microsecond resolution.
387 static inline void __do_gettimeofday(struct timeval
*tv
)
389 unsigned long sec
, usec
;
391 struct gettimeofday_vars
*temp_varp
;
392 u64 temp_tb_to_xs
, temp_stamp_xsec
;
395 * These calculations are faster (gets rid of divides)
396 * if done in units of 1/2^20 rather than microseconds.
397 * The conversion to microseconds at the end is done
398 * without a divide (and in fact, without a multiply)
400 temp_varp
= do_gtod
.varp
;
402 /* Sampling the time base must be done after loading
403 * do_gtod.varp in order to avoid racing with update_gtod.
405 data_barrier(temp_varp
);
406 tb_ticks
= get_tb() - temp_varp
->tb_orig_stamp
;
407 temp_tb_to_xs
= temp_varp
->tb_to_xs
;
408 temp_stamp_xsec
= temp_varp
->stamp_xsec
;
409 xsec
= temp_stamp_xsec
+ mulhdu(tb_ticks
, temp_tb_to_xs
);
410 sec
= xsec
/ XSEC_PER_SEC
;
411 usec
= (unsigned long)xsec
& (XSEC_PER_SEC
- 1);
412 usec
= SCALE_XSEC(usec
, 1000000);
418 void do_gettimeofday(struct timeval
*tv
)
421 /* do this the old way */
422 unsigned long flags
, seq
;
423 unsigned int sec
, nsec
, usec
;
426 seq
= read_seqbegin_irqsave(&xtime_lock
, flags
);
428 nsec
= xtime
.tv_nsec
+ tb_ticks_since(tb_last_jiffy
);
429 } while (read_seqretry_irqrestore(&xtime_lock
, seq
, flags
));
431 while (usec
>= 1000000) {
439 __do_gettimeofday(tv
);
442 EXPORT_SYMBOL(do_gettimeofday
);
445 * There are two copies of tb_to_xs and stamp_xsec so that no
446 * lock is needed to access and use these values in
447 * do_gettimeofday. We alternate the copies and as long as a
448 * reasonable time elapses between changes, there will never
449 * be inconsistent values. ntpd has a minimum of one minute
452 static inline void update_gtod(u64 new_tb_stamp
, u64 new_stamp_xsec
,
456 struct gettimeofday_vars
*temp_varp
;
458 temp_idx
= (do_gtod
.var_idx
== 0);
459 temp_varp
= &do_gtod
.vars
[temp_idx
];
461 temp_varp
->tb_to_xs
= new_tb_to_xs
;
462 temp_varp
->tb_orig_stamp
= new_tb_stamp
;
463 temp_varp
->stamp_xsec
= new_stamp_xsec
;
465 do_gtod
.varp
= temp_varp
;
466 do_gtod
.var_idx
= temp_idx
;
469 * tb_update_count is used to allow the userspace gettimeofday code
470 * to assure itself that it sees a consistent view of the tb_to_xs and
471 * stamp_xsec variables. It reads the tb_update_count, then reads
472 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
473 * the two values of tb_update_count match and are even then the
474 * tb_to_xs and stamp_xsec values are consistent. If not, then it
475 * loops back and reads them again until this criteria is met.
476 * We expect the caller to have done the first increment of
477 * vdso_data->tb_update_count already.
479 vdso_data
->tb_orig_stamp
= new_tb_stamp
;
480 vdso_data
->stamp_xsec
= new_stamp_xsec
;
481 vdso_data
->tb_to_xs
= new_tb_to_xs
;
482 vdso_data
->wtom_clock_sec
= wall_to_monotonic
.tv_sec
;
483 vdso_data
->wtom_clock_nsec
= wall_to_monotonic
.tv_nsec
;
485 ++(vdso_data
->tb_update_count
);
489 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
490 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
491 * difference tb - tb_orig_stamp small enough to always fit inside a
492 * 32 bits number. This is a requirement of our fast 32 bits userland
493 * implementation in the vdso. If we "miss" a call to this function
494 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
495 * with a too big difference, then the vdso will fallback to calling
498 static __inline__
void timer_recalc_offset(u64 cur_tb
)
500 unsigned long offset
;
503 u64 tb
, xsec_old
, xsec_new
;
504 struct gettimeofday_vars
*varp
;
508 tlen
= current_tick_length();
509 offset
= cur_tb
- do_gtod
.varp
->tb_orig_stamp
;
510 if (tlen
== last_tick_len
&& offset
< 0x80000000u
)
512 if (tlen
!= last_tick_len
) {
513 t2x
= mulhdu(tlen
<< TICKLEN_SHIFT
, ticklen_to_xs
);
514 last_tick_len
= tlen
;
516 t2x
= do_gtod
.varp
->tb_to_xs
;
517 new_stamp_xsec
= (u64
) xtime
.tv_nsec
* XSEC_PER_SEC
;
518 do_div(new_stamp_xsec
, 1000000000);
519 new_stamp_xsec
+= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
521 ++vdso_data
->tb_update_count
;
525 * Make sure time doesn't go backwards for userspace gettimeofday.
529 xsec_old
= mulhdu(tb
- varp
->tb_orig_stamp
, varp
->tb_to_xs
)
531 xsec_new
= mulhdu(tb
- cur_tb
, t2x
) + new_stamp_xsec
;
532 if (xsec_new
< xsec_old
)
533 new_stamp_xsec
+= xsec_old
- xsec_new
;
535 update_gtod(cur_tb
, new_stamp_xsec
, t2x
);
539 unsigned long profile_pc(struct pt_regs
*regs
)
541 unsigned long pc
= instruction_pointer(regs
);
543 if (in_lock_functions(pc
))
548 EXPORT_SYMBOL(profile_pc
);
551 #ifdef CONFIG_PPC_ISERIES
554 * This function recalibrates the timebase based on the 49-bit time-of-day
555 * value in the Titan chip. The Titan is much more accurate than the value
556 * returned by the service processor for the timebase frequency.
559 static int __init
iSeries_tb_recal(void)
561 struct div_result divres
;
562 unsigned long titan
, tb
;
564 /* Make sure we only run on iSeries */
565 if (!firmware_has_feature(FW_FEATURE_ISERIES
))
569 titan
= HvCallXm_loadTod();
570 if ( iSeries_recal_titan
) {
571 unsigned long tb_ticks
= tb
- iSeries_recal_tb
;
572 unsigned long titan_usec
= (titan
- iSeries_recal_titan
) >> 12;
573 unsigned long new_tb_ticks_per_sec
= (tb_ticks
* USEC_PER_SEC
)/titan_usec
;
574 unsigned long new_tb_ticks_per_jiffy
= (new_tb_ticks_per_sec
+(HZ
/2))/HZ
;
575 long tick_diff
= new_tb_ticks_per_jiffy
- tb_ticks_per_jiffy
;
577 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
578 new_tb_ticks_per_sec
= new_tb_ticks_per_jiffy
* HZ
;
580 if ( tick_diff
< 0 ) {
581 tick_diff
= -tick_diff
;
585 if ( tick_diff
< tb_ticks_per_jiffy
/25 ) {
586 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
587 new_tb_ticks_per_jiffy
, sign
, tick_diff
);
588 tb_ticks_per_jiffy
= new_tb_ticks_per_jiffy
;
589 tb_ticks_per_sec
= new_tb_ticks_per_sec
;
590 calc_cputime_factors();
591 div128_by_32( XSEC_PER_SEC
, 0, tb_ticks_per_sec
, &divres
);
592 do_gtod
.tb_ticks_per_sec
= tb_ticks_per_sec
;
593 tb_to_xs
= divres
.result_low
;
594 do_gtod
.varp
->tb_to_xs
= tb_to_xs
;
595 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
596 vdso_data
->tb_to_xs
= tb_to_xs
;
599 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
600 " new tb_ticks_per_jiffy = %lu\n"
601 " old tb_ticks_per_jiffy = %lu\n",
602 new_tb_ticks_per_jiffy
, tb_ticks_per_jiffy
);
606 iSeries_recal_titan
= titan
;
607 iSeries_recal_tb
= tb
;
611 late_initcall(iSeries_tb_recal
);
613 /* Called from platform early init */
614 void __init
iSeries_time_init_early(void)
616 iSeries_recal_tb
= get_tb();
617 iSeries_recal_titan
= HvCallXm_loadTod();
619 #endif /* CONFIG_PPC_ISERIES */
622 * For iSeries shared processors, we have to let the hypervisor
623 * set the hardware decrementer. We set a virtual decrementer
624 * in the lppaca and call the hypervisor if the virtual
625 * decrementer is less than the current value in the hardware
626 * decrementer. (almost always the new decrementer value will
627 * be greater than the current hardware decementer so the hypervisor
628 * call will not be needed)
632 * timer_interrupt - gets called when the decrementer overflows,
633 * with interrupts disabled.
635 void timer_interrupt(struct pt_regs
* regs
)
637 struct pt_regs
*old_regs
;
639 int cpu
= smp_processor_id();
644 if (atomic_read(&ppc_n_lost_interrupts
) != 0)
648 old_regs
= set_irq_regs(regs
);
651 profile_tick(CPU_PROFILING
);
652 calculate_steal_time();
654 #ifdef CONFIG_PPC_ISERIES
655 if (firmware_has_feature(FW_FEATURE_ISERIES
))
656 get_lppaca()->int_dword
.fields
.decr_int
= 0;
659 while ((ticks
= tb_ticks_since(per_cpu(last_jiffy
, cpu
)))
660 >= tb_ticks_per_jiffy
) {
661 /* Update last_jiffy */
662 per_cpu(last_jiffy
, cpu
) += tb_ticks_per_jiffy
;
663 /* Handle RTCL overflow on 601 */
664 if (__USE_RTC() && per_cpu(last_jiffy
, cpu
) >= 1000000000)
665 per_cpu(last_jiffy
, cpu
) -= 1000000000;
668 * We cannot disable the decrementer, so in the period
669 * between this cpu's being marked offline in cpu_online_map
670 * and calling stop-self, it is taking timer interrupts.
671 * Avoid calling into the scheduler rebalancing code if this
674 if (!cpu_is_offline(cpu
))
675 account_process_time(regs
);
678 * No need to check whether cpu is offline here; boot_cpuid
679 * should have been fixed up by now.
681 if (cpu
!= boot_cpuid
)
684 write_seqlock(&xtime_lock
);
685 tb_next_jiffy
= tb_last_jiffy
+ tb_ticks_per_jiffy
;
686 if (per_cpu(last_jiffy
, cpu
) >= tb_next_jiffy
) {
687 tb_last_jiffy
= tb_next_jiffy
;
689 timer_recalc_offset(tb_last_jiffy
);
692 write_sequnlock(&xtime_lock
);
695 next_dec
= tb_ticks_per_jiffy
- ticks
;
698 #ifdef CONFIG_PPC_ISERIES
699 if (firmware_has_feature(FW_FEATURE_ISERIES
) && hvlpevent_is_pending())
700 process_hvlpevents();
704 /* collect purr register values often, for accurate calculations */
705 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
706 struct cpu_usage
*cu
= &__get_cpu_var(cpu_usage_array
);
707 cu
->current_tb
= mfspr(SPRN_PURR
);
712 set_irq_regs(old_regs
);
715 void wakeup_decrementer(void)
720 * The timebase gets saved on sleep and restored on wakeup,
721 * so all we need to do is to reset the decrementer.
723 ticks
= tb_ticks_since(__get_cpu_var(last_jiffy
));
724 if (ticks
< tb_ticks_per_jiffy
)
725 ticks
= tb_ticks_per_jiffy
- ticks
;
732 void __init
smp_space_timers(unsigned int max_cpus
)
735 u64 previous_tb
= per_cpu(last_jiffy
, boot_cpuid
);
737 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
738 previous_tb
-= tb_ticks_per_jiffy
;
740 for_each_possible_cpu(i
) {
743 per_cpu(last_jiffy
, i
) = previous_tb
;
749 * Scheduler clock - returns current time in nanosec units.
751 * Note: mulhdu(a, b) (multiply high double unsigned) returns
752 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
753 * are 64-bit unsigned numbers.
755 unsigned long long sched_clock(void)
759 return mulhdu(get_tb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
762 int do_settimeofday(struct timespec
*tv
)
764 time_t wtm_sec
, new_sec
= tv
->tv_sec
;
765 long wtm_nsec
, new_nsec
= tv
->tv_nsec
;
768 unsigned long tb_delta
;
770 if ((unsigned long)tv
->tv_nsec
>= NSEC_PER_SEC
)
773 write_seqlock_irqsave(&xtime_lock
, flags
);
776 * Updating the RTC is not the job of this code. If the time is
777 * stepped under NTP, the RTC will be updated after STA_UNSYNC
778 * is cleared. Tools like clock/hwclock either copy the RTC
779 * to the system time, in which case there is no point in writing
780 * to the RTC again, or write to the RTC but then they don't call
781 * settimeofday to perform this operation.
784 /* Make userspace gettimeofday spin until we're done. */
785 ++vdso_data
->tb_update_count
;
789 * Subtract off the number of nanoseconds since the
790 * beginning of the last tick.
792 tb_delta
= tb_ticks_since(tb_last_jiffy
);
793 tb_delta
= mulhdu(tb_delta
, do_gtod
.varp
->tb_to_xs
); /* in xsec */
794 new_nsec
-= SCALE_XSEC(tb_delta
, 1000000000);
796 wtm_sec
= wall_to_monotonic
.tv_sec
+ (xtime
.tv_sec
- new_sec
);
797 wtm_nsec
= wall_to_monotonic
.tv_nsec
+ (xtime
.tv_nsec
- new_nsec
);
799 set_normalized_timespec(&xtime
, new_sec
, new_nsec
);
800 set_normalized_timespec(&wall_to_monotonic
, wtm_sec
, wtm_nsec
);
802 /* In case of a large backwards jump in time with NTP, we want the
803 * clock to be updated as soon as the PLL is again in lock.
805 last_rtc_update
= new_sec
- 658;
809 new_xsec
= xtime
.tv_nsec
;
811 new_xsec
*= XSEC_PER_SEC
;
812 do_div(new_xsec
, NSEC_PER_SEC
);
814 new_xsec
+= (u64
)xtime
.tv_sec
* XSEC_PER_SEC
;
815 update_gtod(tb_last_jiffy
, new_xsec
, do_gtod
.varp
->tb_to_xs
);
817 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
818 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;
820 write_sequnlock_irqrestore(&xtime_lock
, flags
);
825 EXPORT_SYMBOL(do_settimeofday
);
827 static int __init
get_freq(char *name
, int cells
, unsigned long *val
)
829 struct device_node
*cpu
;
830 const unsigned int *fp
;
833 /* The cpu node should have timebase and clock frequency properties */
834 cpu
= of_find_node_by_type(NULL
, "cpu");
837 fp
= of_get_property(cpu
, name
, NULL
);
840 *val
= of_read_ulong(fp
, cells
);
849 void __init
generic_calibrate_decr(void)
851 ppc_tb_freq
= DEFAULT_TB_FREQ
; /* hardcoded default */
853 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq
) &&
854 !get_freq("timebase-frequency", 1, &ppc_tb_freq
)) {
856 printk(KERN_ERR
"WARNING: Estimating decrementer frequency "
860 ppc_proc_freq
= DEFAULT_PROC_FREQ
; /* hardcoded default */
862 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq
) &&
863 !get_freq("clock-frequency", 1, &ppc_proc_freq
)) {
865 printk(KERN_ERR
"WARNING: Estimating processor frequency "
870 /* Set the time base to zero */
874 /* Clear any pending timer interrupts */
875 mtspr(SPRN_TSR
, TSR_ENW
| TSR_WIS
| TSR_DIS
| TSR_FIS
);
877 /* Enable decrementer interrupt */
878 mtspr(SPRN_TCR
, TCR_DIE
);
882 unsigned long get_boot_time(void)
886 if (ppc_md
.get_boot_time
)
887 return ppc_md
.get_boot_time();
888 if (!ppc_md
.get_rtc_time
)
890 ppc_md
.get_rtc_time(&tm
);
891 return mktime(tm
.tm_year
+1900, tm
.tm_mon
+1, tm
.tm_mday
,
892 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
895 /* This function is only called on the boot processor */
896 void __init
time_init(void)
899 unsigned long tm
= 0;
900 struct div_result res
;
904 if (ppc_md
.time_init
!= NULL
)
905 timezone_offset
= ppc_md
.time_init();
908 /* 601 processor: dec counts down by 128 every 128ns */
909 ppc_tb_freq
= 1000000000;
910 tb_last_jiffy
= get_rtcl();
912 /* Normal PowerPC with timebase register */
913 ppc_md
.calibrate_decr();
914 printk(KERN_DEBUG
"time_init: decrementer frequency = %lu.%.6lu MHz\n",
915 ppc_tb_freq
/ 1000000, ppc_tb_freq
% 1000000);
916 printk(KERN_DEBUG
"time_init: processor frequency = %lu.%.6lu MHz\n",
917 ppc_proc_freq
/ 1000000, ppc_proc_freq
% 1000000);
918 tb_last_jiffy
= get_tb();
921 tb_ticks_per_jiffy
= ppc_tb_freq
/ HZ
;
922 tb_ticks_per_sec
= ppc_tb_freq
;
923 tb_ticks_per_usec
= ppc_tb_freq
/ 1000000;
924 tb_to_us
= mulhwu_scale_factor(ppc_tb_freq
, 1000000);
925 calc_cputime_factors();
928 * Calculate the length of each tick in ns. It will not be
929 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
930 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
933 x
= (u64
) NSEC_PER_SEC
* tb_ticks_per_jiffy
+ ppc_tb_freq
- 1;
934 do_div(x
, ppc_tb_freq
);
936 last_tick_len
= x
<< TICKLEN_SCALE
;
939 * Compute ticklen_to_xs, which is a factor which gets multiplied
940 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
942 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
943 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
944 * which turns out to be N = 51 - SHIFT_HZ.
945 * This gives the result as a 0.64 fixed-point fraction.
946 * That value is reduced by an offset amounting to 1 xsec per
947 * 2^31 timebase ticks to avoid problems with time going backwards
948 * by 1 xsec when we do timer_recalc_offset due to losing the
949 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
950 * since there are 2^20 xsec in a second.
952 div128_by_32((1ULL << 51) - ppc_tb_freq
, 0,
953 tb_ticks_per_jiffy
<< SHIFT_HZ
, &res
);
954 div128_by_32(res
.result_high
, res
.result_low
, NSEC_PER_SEC
, &res
);
955 ticklen_to_xs
= res
.result_low
;
957 /* Compute tb_to_xs from tick_nsec */
958 tb_to_xs
= mulhdu(last_tick_len
<< TICKLEN_SHIFT
, ticklen_to_xs
);
961 * Compute scale factor for sched_clock.
962 * The calibrate_decr() function has set tb_ticks_per_sec,
963 * which is the timebase frequency.
964 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
965 * the 128-bit result as a 64.64 fixed-point number.
966 * We then shift that number right until it is less than 1.0,
967 * giving us the scale factor and shift count to use in
970 div128_by_32(1000000000, 0, tb_ticks_per_sec
, &res
);
971 scale
= res
.result_low
;
972 for (shift
= 0; res
.result_high
!= 0; ++shift
) {
973 scale
= (scale
>> 1) | (res
.result_high
<< 63);
974 res
.result_high
>>= 1;
976 tb_to_ns_scale
= scale
;
977 tb_to_ns_shift
= shift
;
978 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
981 tm
= get_boot_time();
983 write_seqlock_irqsave(&xtime_lock
, flags
);
985 /* If platform provided a timezone (pmac), we correct the time */
986 if (timezone_offset
) {
987 sys_tz
.tz_minuteswest
= -timezone_offset
/ 60;
988 sys_tz
.tz_dsttime
= 0;
989 tm
-= timezone_offset
;
994 do_gtod
.varp
= &do_gtod
.vars
[0];
996 do_gtod
.varp
->tb_orig_stamp
= tb_last_jiffy
;
997 __get_cpu_var(last_jiffy
) = tb_last_jiffy
;
998 do_gtod
.varp
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
999 do_gtod
.tb_ticks_per_sec
= tb_ticks_per_sec
;
1000 do_gtod
.varp
->tb_to_xs
= tb_to_xs
;
1001 do_gtod
.tb_to_us
= tb_to_us
;
1003 vdso_data
->tb_orig_stamp
= tb_last_jiffy
;
1004 vdso_data
->tb_update_count
= 0;
1005 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
1006 vdso_data
->stamp_xsec
= (u64
) xtime
.tv_sec
* XSEC_PER_SEC
;
1007 vdso_data
->tb_to_xs
= tb_to_xs
;
1011 last_rtc_update
= xtime
.tv_sec
;
1012 set_normalized_timespec(&wall_to_monotonic
,
1013 -xtime
.tv_sec
, -xtime
.tv_nsec
);
1014 write_sequnlock_irqrestore(&xtime_lock
, flags
);
1016 /* Not exact, but the timer interrupt takes care of this */
1017 set_dec(tb_ticks_per_jiffy
);
1022 #define STARTOFTIME 1970
1023 #define SECDAY 86400L
1024 #define SECYR (SECDAY * 365)
1025 #define leapyear(year) ((year) % 4 == 0 && \
1026 ((year) % 100 != 0 || (year) % 400 == 0))
1027 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1028 #define days_in_month(a) (month_days[(a) - 1])
1030 static int month_days
[12] = {
1031 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1035 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1037 void GregorianDay(struct rtc_time
* tm
)
1042 int MonthOffset
[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1044 lastYear
= tm
->tm_year
- 1;
1047 * Number of leap corrections to apply up to end of last year
1049 leapsToDate
= lastYear
/ 4 - lastYear
/ 100 + lastYear
/ 400;
1052 * This year is a leap year if it is divisible by 4 except when it is
1053 * divisible by 100 unless it is divisible by 400
1055 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1057 day
= tm
->tm_mon
> 2 && leapyear(tm
->tm_year
);
1059 day
+= lastYear
*365 + leapsToDate
+ MonthOffset
[tm
->tm_mon
-1] +
1062 tm
->tm_wday
= day
% 7;
1065 void to_tm(int tim
, struct rtc_time
* tm
)
1068 register long hms
, day
;
1073 /* Hours, minutes, seconds are easy */
1074 tm
->tm_hour
= hms
/ 3600;
1075 tm
->tm_min
= (hms
% 3600) / 60;
1076 tm
->tm_sec
= (hms
% 3600) % 60;
1078 /* Number of years in days */
1079 for (i
= STARTOFTIME
; day
>= days_in_year(i
); i
++)
1080 day
-= days_in_year(i
);
1083 /* Number of months in days left */
1084 if (leapyear(tm
->tm_year
))
1085 days_in_month(FEBRUARY
) = 29;
1086 for (i
= 1; day
>= days_in_month(i
); i
++)
1087 day
-= days_in_month(i
);
1088 days_in_month(FEBRUARY
) = 28;
1091 /* Days are what is left over (+1) from all that. */
1092 tm
->tm_mday
= day
+ 1;
1095 * Determine the day of week
1100 /* Auxiliary function to compute scaling factors */
1101 /* Actually the choice of a timebase running at 1/4 the of the bus
1102 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1103 * It makes this computation very precise (27-28 bits typically) which
1104 * is optimistic considering the stability of most processor clock
1105 * oscillators and the precision with which the timebase frequency
1106 * is measured but does not harm.
1108 unsigned mulhwu_scale_factor(unsigned inscale
, unsigned outscale
)
1110 unsigned mlt
=0, tmp
, err
;
1111 /* No concern for performance, it's done once: use a stupid
1112 * but safe and compact method to find the multiplier.
1115 for (tmp
= 1U<<31; tmp
!= 0; tmp
>>= 1) {
1116 if (mulhwu(inscale
, mlt
|tmp
) < outscale
)
1120 /* We might still be off by 1 for the best approximation.
1121 * A side effect of this is that if outscale is too large
1122 * the returned value will be zero.
1123 * Many corner cases have been checked and seem to work,
1124 * some might have been forgotten in the test however.
1127 err
= inscale
* (mlt
+1);
1128 if (err
<= inscale
/2)
1134 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1137 void div128_by_32(u64 dividend_high
, u64 dividend_low
,
1138 unsigned divisor
, struct div_result
*dr
)
1140 unsigned long a
, b
, c
, d
;
1141 unsigned long w
, x
, y
, z
;
1144 a
= dividend_high
>> 32;
1145 b
= dividend_high
& 0xffffffff;
1146 c
= dividend_low
>> 32;
1147 d
= dividend_low
& 0xffffffff;
1150 ra
= ((u64
)(a
- (w
* divisor
)) << 32) + b
;
1152 rb
= ((u64
) do_div(ra
, divisor
) << 32) + c
;
1155 rc
= ((u64
) do_div(rb
, divisor
) << 32) + d
;
1158 do_div(rc
, divisor
);
1161 dr
->result_high
= ((u64
)w
<< 32) + x
;
1162 dr
->result_low
= ((u64
)y
<< 32) + z
;