2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time.
21 * - for astronomical applications: add a new function to get
22 * non ambiguous timestamps even around leap seconds. This needs
23 * a new timestamp format and a good name.
25 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
26 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
34 #include <linux/errno.h>
35 #include <linux/export.h>
36 #include <linux/sched.h>
37 #include <linux/kernel.h>
38 #include <linux/param.h>
39 #include <linux/string.h>
41 #include <linux/interrupt.h>
42 #include <linux/timex.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/time.h>
45 #include <linux/clockchips.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 #include <linux/delay.h>
56 #include <linux/irq_work.h>
57 #include <linux/clk-provider.h>
58 #include <linux/suspend.h>
59 #include <linux/rtc.h>
60 #include <asm/trace.h>
63 #include <asm/processor.h>
64 #include <asm/nvram.h>
65 #include <asm/cache.h>
66 #include <asm/machdep.h>
67 #include <linux/uaccess.h>
71 #include <asm/div64.h>
73 #include <asm/vdso_datapage.h>
74 #include <asm/firmware.h>
75 #include <asm/cputime.h>
76 #include <asm/asm-prototypes.h>
78 /* powerpc clocksource/clockevent code */
80 #include <linux/clockchips.h>
81 #include <linux/timekeeper_internal.h>
83 static u64
rtc_read(struct clocksource
*);
84 static struct clocksource clocksource_rtc
= {
87 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
88 .mask
= CLOCKSOURCE_MASK(64),
92 static u64
timebase_read(struct clocksource
*);
93 static struct clocksource clocksource_timebase
= {
96 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
97 .mask
= CLOCKSOURCE_MASK(64),
98 .read
= timebase_read
,
101 #define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
102 u64 decrementer_max
= DECREMENTER_DEFAULT_MAX
;
104 static int decrementer_set_next_event(unsigned long evt
,
105 struct clock_event_device
*dev
);
106 static int decrementer_shutdown(struct clock_event_device
*evt
);
108 struct clock_event_device decrementer_clockevent
= {
109 .name
= "decrementer",
112 .set_next_event
= decrementer_set_next_event
,
113 .set_state_shutdown
= decrementer_shutdown
,
114 .tick_resume
= decrementer_shutdown
,
115 .features
= CLOCK_EVT_FEAT_ONESHOT
|
116 CLOCK_EVT_FEAT_C3STOP
,
118 EXPORT_SYMBOL(decrementer_clockevent
);
120 DEFINE_PER_CPU(u64
, decrementers_next_tb
);
121 static DEFINE_PER_CPU(struct clock_event_device
, decrementers
);
123 #define XSEC_PER_SEC (1024*1024)
126 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
128 /* compute ((xsec << 12) * max) >> 32 */
129 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
132 unsigned long tb_ticks_per_jiffy
;
133 unsigned long tb_ticks_per_usec
= 100; /* sane default */
134 EXPORT_SYMBOL(tb_ticks_per_usec
);
135 unsigned long tb_ticks_per_sec
;
136 EXPORT_SYMBOL(tb_ticks_per_sec
); /* for cputime_t conversions */
138 DEFINE_SPINLOCK(rtc_lock
);
139 EXPORT_SYMBOL_GPL(rtc_lock
);
141 static u64 tb_to_ns_scale __read_mostly
;
142 static unsigned tb_to_ns_shift __read_mostly
;
143 static u64 boot_tb __read_mostly
;
145 extern struct timezone sys_tz
;
146 static long timezone_offset
;
148 unsigned long ppc_proc_freq
;
149 EXPORT_SYMBOL_GPL(ppc_proc_freq
);
150 unsigned long ppc_tb_freq
;
151 EXPORT_SYMBOL_GPL(ppc_tb_freq
);
153 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
155 * Factors for converting from cputime_t (timebase ticks) to
156 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
157 * These are all stored as 0.64 fixed-point binary fractions.
159 u64 __cputime_jiffies_factor
;
160 EXPORT_SYMBOL(__cputime_jiffies_factor
);
161 u64 __cputime_usec_factor
;
162 EXPORT_SYMBOL(__cputime_usec_factor
);
163 u64 __cputime_sec_factor
;
164 EXPORT_SYMBOL(__cputime_sec_factor
);
165 u64 __cputime_clockt_factor
;
166 EXPORT_SYMBOL(__cputime_clockt_factor
);
168 cputime_t cputime_one_jiffy
;
170 #ifdef CONFIG_PPC_SPLPAR
171 void (*dtl_consumer
)(struct dtl_entry
*, u64
);
175 #define get_accounting(tsk) (&get_paca()->accounting)
177 #define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
180 static void calc_cputime_factors(void)
182 struct div_result res
;
184 div128_by_32(HZ
, 0, tb_ticks_per_sec
, &res
);
185 __cputime_jiffies_factor
= res
.result_low
;
186 div128_by_32(1000000, 0, tb_ticks_per_sec
, &res
);
187 __cputime_usec_factor
= res
.result_low
;
188 div128_by_32(1, 0, tb_ticks_per_sec
, &res
);
189 __cputime_sec_factor
= res
.result_low
;
190 div128_by_32(USER_HZ
, 0, tb_ticks_per_sec
, &res
);
191 __cputime_clockt_factor
= res
.result_low
;
195 * Read the SPURR on systems that have it, otherwise the PURR,
196 * or if that doesn't exist return the timebase value passed in.
198 static unsigned long read_spurr(unsigned long tb
)
200 if (cpu_has_feature(CPU_FTR_SPURR
))
201 return mfspr(SPRN_SPURR
);
202 if (cpu_has_feature(CPU_FTR_PURR
))
203 return mfspr(SPRN_PURR
);
207 #ifdef CONFIG_PPC_SPLPAR
210 * Scan the dispatch trace log and count up the stolen time.
211 * Should be called with interrupts disabled.
213 static u64
scan_dispatch_log(u64 stop_tb
)
215 u64 i
= local_paca
->dtl_ridx
;
216 struct dtl_entry
*dtl
= local_paca
->dtl_curr
;
217 struct dtl_entry
*dtl_end
= local_paca
->dispatch_log_end
;
218 struct lppaca
*vpa
= local_paca
->lppaca_ptr
;
226 if (i
== be64_to_cpu(vpa
->dtl_idx
))
228 while (i
< be64_to_cpu(vpa
->dtl_idx
)) {
229 dtb
= be64_to_cpu(dtl
->timebase
);
230 tb_delta
= be32_to_cpu(dtl
->enqueue_to_dispatch_time
) +
231 be32_to_cpu(dtl
->ready_to_enqueue_time
);
233 if (i
+ N_DISPATCH_LOG
< be64_to_cpu(vpa
->dtl_idx
)) {
234 /* buffer has overflowed */
235 i
= be64_to_cpu(vpa
->dtl_idx
) - N_DISPATCH_LOG
;
236 dtl
= local_paca
->dispatch_log
+ (i
% N_DISPATCH_LOG
);
242 dtl_consumer(dtl
, i
);
247 dtl
= local_paca
->dispatch_log
;
249 local_paca
->dtl_ridx
= i
;
250 local_paca
->dtl_curr
= dtl
;
255 * Accumulate stolen time by scanning the dispatch trace log.
256 * Called on entry from user mode.
258 void accumulate_stolen_time(void)
261 u8 save_soft_enabled
= local_paca
->soft_enabled
;
262 struct cpu_accounting_data
*acct
= &local_paca
->accounting
;
264 /* We are called early in the exception entry, before
265 * soft/hard_enabled are sync'ed to the expected state
266 * for the exception. We are hard disabled but the PACA
267 * needs to reflect that so various debug stuff doesn't
270 local_paca
->soft_enabled
= 0;
272 sst
= scan_dispatch_log(acct
->starttime_user
);
273 ust
= scan_dispatch_log(acct
->starttime
);
274 acct
->system_time
-= sst
;
275 acct
->user_time
-= ust
;
276 local_paca
->stolen_time
+= ust
+ sst
;
278 local_paca
->soft_enabled
= save_soft_enabled
;
281 static inline u64
calculate_stolen_time(u64 stop_tb
)
285 if (get_paca()->dtl_ridx
!= be64_to_cpu(get_lppaca()->dtl_idx
)) {
286 stolen
= scan_dispatch_log(stop_tb
);
287 get_paca()->accounting
.system_time
-= stolen
;
290 stolen
+= get_paca()->stolen_time
;
291 get_paca()->stolen_time
= 0;
295 #else /* CONFIG_PPC_SPLPAR */
296 static inline u64
calculate_stolen_time(u64 stop_tb
)
301 #endif /* CONFIG_PPC_SPLPAR */
304 * Account time for a transition between system, hard irq
307 static unsigned long vtime_delta(struct task_struct
*tsk
,
308 unsigned long *sys_scaled
,
309 unsigned long *stolen
)
311 unsigned long now
, nowscaled
, deltascaled
;
312 unsigned long udelta
, delta
, user_scaled
;
313 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
315 WARN_ON_ONCE(!irqs_disabled());
318 nowscaled
= read_spurr(now
);
319 acct
->system_time
+= now
- acct
->starttime
;
320 acct
->starttime
= now
;
321 deltascaled
= nowscaled
- acct
->startspurr
;
322 acct
->startspurr
= nowscaled
;
324 *stolen
= calculate_stolen_time(now
);
326 delta
= acct
->system_time
;
327 acct
->system_time
= 0;
328 udelta
= acct
->user_time
- acct
->utime_sspurr
;
329 acct
->utime_sspurr
= acct
->user_time
;
332 * Because we don't read the SPURR on every kernel entry/exit,
333 * deltascaled includes both user and system SPURR ticks.
334 * Apportion these ticks to system SPURR ticks and user
335 * SPURR ticks in the same ratio as the system time (delta)
336 * and user time (udelta) values obtained from the timebase
337 * over the same interval. The system ticks get accounted here;
338 * the user ticks get saved up in paca->user_time_scaled to be
339 * used by account_process_tick.
342 user_scaled
= udelta
;
343 if (deltascaled
!= delta
+ udelta
) {
345 *sys_scaled
= deltascaled
* delta
/ (delta
+ udelta
);
346 user_scaled
= deltascaled
- *sys_scaled
;
348 *sys_scaled
= deltascaled
;
351 acct
->user_time_scaled
+= user_scaled
;
356 void vtime_account_system(struct task_struct
*tsk
)
358 unsigned long delta
, sys_scaled
, stolen
;
360 delta
= vtime_delta(tsk
, &sys_scaled
, &stolen
);
361 account_system_time(tsk
, 0, delta
);
362 tsk
->stimescaled
+= sys_scaled
;
364 account_steal_time(stolen
);
366 EXPORT_SYMBOL_GPL(vtime_account_system
);
368 void vtime_account_idle(struct task_struct
*tsk
)
370 unsigned long delta
, sys_scaled
, stolen
;
372 delta
= vtime_delta(tsk
, &sys_scaled
, &stolen
);
373 account_idle_time(delta
+ stolen
);
377 * Transfer the user time accumulated in the paca
378 * by the exception entry and exit code to the generic
379 * process user time records.
380 * Must be called with interrupts disabled.
381 * Assumes that vtime_account_system/idle() has been called
382 * recently (i.e. since the last entry from usermode) so that
383 * get_paca()->user_time_scaled is up to date.
385 void vtime_account_user(struct task_struct
*tsk
)
387 cputime_t utime
, utimescaled
;
388 struct cpu_accounting_data
*acct
= get_accounting(tsk
);
390 utime
= acct
->user_time
;
391 utimescaled
= acct
->user_time_scaled
;
393 acct
->user_time_scaled
= 0;
394 acct
->utime_sspurr
= 0;
395 account_user_time(tsk
, utime
);
396 tsk
->utimescaled
+= utimescaled
;
401 * Called from the context switch with interrupts disabled, to charge all
402 * accumulated times to the current process, and to prepare accounting on
405 void arch_vtime_task_switch(struct task_struct
*prev
)
407 struct cpu_accounting_data
*acct
= get_accounting(current
);
409 acct
->starttime
= get_accounting(prev
)->starttime
;
410 acct
->system_time
= 0;
413 #endif /* CONFIG_PPC32 */
415 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
416 #define calc_cputime_factors()
419 void __delay(unsigned long loops
)
427 /* the RTCL register wraps at 1000000000 */
428 diff
= get_rtcl() - start
;
431 } while (diff
< loops
);
434 while (get_tbl() - start
< loops
)
439 EXPORT_SYMBOL(__delay
);
441 void udelay(unsigned long usecs
)
443 __delay(tb_ticks_per_usec
* usecs
);
445 EXPORT_SYMBOL(udelay
);
448 unsigned long profile_pc(struct pt_regs
*regs
)
450 unsigned long pc
= instruction_pointer(regs
);
452 if (in_lock_functions(pc
))
457 EXPORT_SYMBOL(profile_pc
);
460 #ifdef CONFIG_IRQ_WORK
463 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
466 static inline unsigned long test_irq_work_pending(void)
470 asm volatile("lbz %0,%1(13)"
472 : "i" (offsetof(struct paca_struct
, irq_work_pending
)));
476 static inline void set_irq_work_pending_flag(void)
478 asm volatile("stb %0,%1(13)" : :
480 "i" (offsetof(struct paca_struct
, irq_work_pending
)));
483 static inline void clear_irq_work_pending(void)
485 asm volatile("stb %0,%1(13)" : :
487 "i" (offsetof(struct paca_struct
, irq_work_pending
)));
492 DEFINE_PER_CPU(u8
, irq_work_pending
);
494 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
495 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
496 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
498 #endif /* 32 vs 64 bit */
500 void arch_irq_work_raise(void)
503 set_irq_work_pending_flag();
508 #else /* CONFIG_IRQ_WORK */
510 #define test_irq_work_pending() 0
511 #define clear_irq_work_pending()
513 #endif /* CONFIG_IRQ_WORK */
515 static void __timer_interrupt(void)
517 struct pt_regs
*regs
= get_irq_regs();
518 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
519 struct clock_event_device
*evt
= this_cpu_ptr(&decrementers
);
522 trace_timer_interrupt_entry(regs
);
524 if (test_irq_work_pending()) {
525 clear_irq_work_pending();
529 now
= get_tb_or_rtc();
530 if (now
>= *next_tb
) {
532 if (evt
->event_handler
)
533 evt
->event_handler(evt
);
534 __this_cpu_inc(irq_stat
.timer_irqs_event
);
536 now
= *next_tb
- now
;
537 if (now
<= decrementer_max
)
539 /* We may have raced with new irq work */
540 if (test_irq_work_pending())
542 __this_cpu_inc(irq_stat
.timer_irqs_others
);
546 /* collect purr register values often, for accurate calculations */
547 if (firmware_has_feature(FW_FEATURE_SPLPAR
)) {
548 struct cpu_usage
*cu
= this_cpu_ptr(&cpu_usage_array
);
549 cu
->current_tb
= mfspr(SPRN_PURR
);
553 trace_timer_interrupt_exit(regs
);
557 * timer_interrupt - gets called when the decrementer overflows,
558 * with interrupts disabled.
560 void timer_interrupt(struct pt_regs
* regs
)
562 struct pt_regs
*old_regs
;
563 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
565 /* Ensure a positive value is written to the decrementer, or else
566 * some CPUs will continue to take decrementer exceptions.
568 set_dec(decrementer_max
);
570 /* Some implementations of hotplug will get timer interrupts while
571 * offline, just ignore these and we also need to set
572 * decrementers_next_tb as MAX to make sure __check_irq_replay
573 * don't replay timer interrupt when return, otherwise we'll trap
576 if (!cpu_online(smp_processor_id())) {
581 /* Conditionally hard-enable interrupts now that the DEC has been
582 * bumped to its maximum value
584 may_hard_irq_enable();
587 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
588 if (atomic_read(&ppc_n_lost_interrupts
) != 0)
592 old_regs
= set_irq_regs(regs
);
597 set_irq_regs(old_regs
);
599 EXPORT_SYMBOL(timer_interrupt
);
602 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
603 * left pending on exit from a KVM guest. We don't need to do anything
604 * to clear them, as they are edge-triggered.
606 void hdec_interrupt(struct pt_regs
*regs
)
610 #ifdef CONFIG_SUSPEND
611 static void generic_suspend_disable_irqs(void)
613 /* Disable the decrementer, so that it doesn't interfere
617 set_dec(decrementer_max
);
619 set_dec(decrementer_max
);
622 static void generic_suspend_enable_irqs(void)
627 /* Overrides the weak version in kernel/power/main.c */
628 void arch_suspend_disable_irqs(void)
630 if (ppc_md
.suspend_disable_irqs
)
631 ppc_md
.suspend_disable_irqs();
632 generic_suspend_disable_irqs();
635 /* Overrides the weak version in kernel/power/main.c */
636 void arch_suspend_enable_irqs(void)
638 generic_suspend_enable_irqs();
639 if (ppc_md
.suspend_enable_irqs
)
640 ppc_md
.suspend_enable_irqs();
644 unsigned long long tb_to_ns(unsigned long long ticks
)
646 return mulhdu(ticks
, tb_to_ns_scale
) << tb_to_ns_shift
;
648 EXPORT_SYMBOL_GPL(tb_to_ns
);
651 * Scheduler clock - returns current time in nanosec units.
653 * Note: mulhdu(a, b) (multiply high double unsigned) returns
654 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
655 * are 64-bit unsigned numbers.
657 unsigned long long sched_clock(void)
661 return mulhdu(get_tb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
665 #ifdef CONFIG_PPC_PSERIES
668 * Running clock - attempts to give a view of time passing for a virtualised
670 * Uses the VTB register if available otherwise a next best guess.
672 unsigned long long running_clock(void)
675 * Don't read the VTB as a host since KVM does not switch in host
676 * timebase into the VTB when it takes a guest off the CPU, reading the
677 * VTB would result in reading 'last switched out' guest VTB.
679 * Host kernels are often compiled with CONFIG_PPC_PSERIES checked, it
680 * would be unsafe to rely only on the #ifdef above.
682 if (firmware_has_feature(FW_FEATURE_LPAR
) &&
683 cpu_has_feature(CPU_FTR_ARCH_207S
))
684 return mulhdu(get_vtb() - boot_tb
, tb_to_ns_scale
) << tb_to_ns_shift
;
687 * This is a next best approximation without a VTB.
688 * On a host which is running bare metal there should never be any stolen
689 * time and on a host which doesn't do any virtualisation TB *should* equal
690 * VTB so it makes no difference anyway.
692 return local_clock() - cputime_to_nsecs(kcpustat_this_cpu
->cpustat
[CPUTIME_STEAL
]);
696 static int __init
get_freq(char *name
, int cells
, unsigned long *val
)
698 struct device_node
*cpu
;
702 /* The cpu node should have timebase and clock frequency properties */
703 cpu
= of_find_node_by_type(NULL
, "cpu");
706 fp
= of_get_property(cpu
, name
, NULL
);
709 *val
= of_read_ulong(fp
, cells
);
718 static void start_cpu_decrementer(void)
720 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
721 /* Clear any pending timer interrupts */
722 mtspr(SPRN_TSR
, TSR_ENW
| TSR_WIS
| TSR_DIS
| TSR_FIS
);
724 /* Enable decrementer interrupt */
725 mtspr(SPRN_TCR
, TCR_DIE
);
726 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
729 void __init
generic_calibrate_decr(void)
731 ppc_tb_freq
= DEFAULT_TB_FREQ
; /* hardcoded default */
733 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq
) &&
734 !get_freq("timebase-frequency", 1, &ppc_tb_freq
)) {
736 printk(KERN_ERR
"WARNING: Estimating decrementer frequency "
740 ppc_proc_freq
= DEFAULT_PROC_FREQ
; /* hardcoded default */
742 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq
) &&
743 !get_freq("clock-frequency", 1, &ppc_proc_freq
)) {
745 printk(KERN_ERR
"WARNING: Estimating processor frequency "
750 int update_persistent_clock(struct timespec now
)
754 if (!ppc_md
.set_rtc_time
)
757 to_tm(now
.tv_sec
+ 1 + timezone_offset
, &tm
);
761 return ppc_md
.set_rtc_time(&tm
);
764 static void __read_persistent_clock(struct timespec
*ts
)
767 static int first
= 1;
770 /* XXX this is a litle fragile but will work okay in the short term */
773 if (ppc_md
.time_init
)
774 timezone_offset
= ppc_md
.time_init();
776 /* get_boot_time() isn't guaranteed to be safe to call late */
777 if (ppc_md
.get_boot_time
) {
778 ts
->tv_sec
= ppc_md
.get_boot_time() - timezone_offset
;
782 if (!ppc_md
.get_rtc_time
) {
786 ppc_md
.get_rtc_time(&tm
);
788 ts
->tv_sec
= mktime(tm
.tm_year
+1900, tm
.tm_mon
+1, tm
.tm_mday
,
789 tm
.tm_hour
, tm
.tm_min
, tm
.tm_sec
);
792 void read_persistent_clock(struct timespec
*ts
)
794 __read_persistent_clock(ts
);
796 /* Sanitize it in case real time clock is set below EPOCH */
797 if (ts
->tv_sec
< 0) {
804 /* clocksource code */
805 static u64
rtc_read(struct clocksource
*cs
)
807 return (u64
)get_rtc();
810 static u64
timebase_read(struct clocksource
*cs
)
812 return (u64
)get_tb();
815 void update_vsyscall_old(struct timespec
*wall_time
, struct timespec
*wtm
,
816 struct clocksource
*clock
, u32 mult
, u64 cycle_last
)
818 u64 new_tb_to_xs
, new_stamp_xsec
;
821 if (clock
!= &clocksource_timebase
)
824 /* Make userspace gettimeofday spin until we're done. */
825 ++vdso_data
->tb_update_count
;
828 /* 19342813113834067 ~= 2^(20+64) / 1e9 */
829 new_tb_to_xs
= (u64
) mult
* (19342813113834067ULL >> clock
->shift
);
830 new_stamp_xsec
= (u64
) wall_time
->tv_nsec
* XSEC_PER_SEC
;
831 do_div(new_stamp_xsec
, 1000000000);
832 new_stamp_xsec
+= (u64
) wall_time
->tv_sec
* XSEC_PER_SEC
;
834 BUG_ON(wall_time
->tv_nsec
>= NSEC_PER_SEC
);
835 /* this is tv_nsec / 1e9 as a 0.32 fraction */
836 frac_sec
= ((u64
) wall_time
->tv_nsec
* 18446744073ULL) >> 32;
839 * tb_update_count is used to allow the userspace gettimeofday code
840 * to assure itself that it sees a consistent view of the tb_to_xs and
841 * stamp_xsec variables. It reads the tb_update_count, then reads
842 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
843 * the two values of tb_update_count match and are even then the
844 * tb_to_xs and stamp_xsec values are consistent. If not, then it
845 * loops back and reads them again until this criteria is met.
846 * We expect the caller to have done the first increment of
847 * vdso_data->tb_update_count already.
849 vdso_data
->tb_orig_stamp
= cycle_last
;
850 vdso_data
->stamp_xsec
= new_stamp_xsec
;
851 vdso_data
->tb_to_xs
= new_tb_to_xs
;
852 vdso_data
->wtom_clock_sec
= wtm
->tv_sec
;
853 vdso_data
->wtom_clock_nsec
= wtm
->tv_nsec
;
854 vdso_data
->stamp_xtime
= *wall_time
;
855 vdso_data
->stamp_sec_fraction
= frac_sec
;
857 ++(vdso_data
->tb_update_count
);
860 void update_vsyscall_tz(void)
862 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
863 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;
866 static void __init
clocksource_init(void)
868 struct clocksource
*clock
;
871 clock
= &clocksource_rtc
;
873 clock
= &clocksource_timebase
;
875 if (clocksource_register_hz(clock
, tb_ticks_per_sec
)) {
876 printk(KERN_ERR
"clocksource: %s is already registered\n",
881 printk(KERN_INFO
"clocksource: %s mult[%x] shift[%d] registered\n",
882 clock
->name
, clock
->mult
, clock
->shift
);
885 static int decrementer_set_next_event(unsigned long evt
,
886 struct clock_event_device
*dev
)
888 __this_cpu_write(decrementers_next_tb
, get_tb_or_rtc() + evt
);
891 /* We may have raced with new irq work */
892 if (test_irq_work_pending())
898 static int decrementer_shutdown(struct clock_event_device
*dev
)
900 decrementer_set_next_event(decrementer_max
, dev
);
904 /* Interrupt handler for the timer broadcast IPI */
905 void tick_broadcast_ipi_handler(void)
907 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
909 *next_tb
= get_tb_or_rtc();
913 static void register_decrementer_clockevent(int cpu
)
915 struct clock_event_device
*dec
= &per_cpu(decrementers
, cpu
);
917 *dec
= decrementer_clockevent
;
918 dec
->cpumask
= cpumask_of(cpu
);
920 printk_once(KERN_DEBUG
"clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
921 dec
->name
, dec
->mult
, dec
->shift
, cpu
);
923 clockevents_register_device(dec
);
926 static void enable_large_decrementer(void)
928 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
931 if (decrementer_max
<= DECREMENTER_DEFAULT_MAX
)
935 * If we're running as the hypervisor we need to enable the LD manually
936 * otherwise firmware should have done it for us.
938 if (cpu_has_feature(CPU_FTR_HVMODE
))
939 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_LD
);
942 static void __init
set_decrementer_max(void)
944 struct device_node
*cpu
;
947 /* Prior to ISAv3 the decrementer is always 32 bit */
948 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
951 cpu
= of_find_node_by_type(NULL
, "cpu");
953 if (of_property_read_u32(cpu
, "ibm,dec-bits", &bits
) == 0) {
954 if (bits
> 64 || bits
< 32) {
955 pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
959 /* calculate the signed maximum given this many bits */
960 decrementer_max
= (1ul << (bits
- 1)) - 1;
965 pr_info("time_init: %u bit decrementer (max: %llx)\n",
966 bits
, decrementer_max
);
969 static void __init
init_decrementer_clockevent(void)
971 int cpu
= smp_processor_id();
973 clockevents_calc_mult_shift(&decrementer_clockevent
, ppc_tb_freq
, 4);
975 decrementer_clockevent
.max_delta_ns
=
976 clockevent_delta2ns(decrementer_max
, &decrementer_clockevent
);
977 decrementer_clockevent
.min_delta_ns
=
978 clockevent_delta2ns(2, &decrementer_clockevent
);
980 register_decrementer_clockevent(cpu
);
983 void secondary_cpu_time_init(void)
985 /* Enable and test the large decrementer for this cpu */
986 enable_large_decrementer();
988 /* Start the decrementer on CPUs that have manual control
991 start_cpu_decrementer();
993 /* FIME: Should make unrelatred change to move snapshot_timebase
995 register_decrementer_clockevent(smp_processor_id());
998 /* This function is only called on the boot processor */
999 void __init
time_init(void)
1001 struct div_result res
;
1006 /* 601 processor: dec counts down by 128 every 128ns */
1007 ppc_tb_freq
= 1000000000;
1009 /* Normal PowerPC with timebase register */
1010 ppc_md
.calibrate_decr();
1011 printk(KERN_DEBUG
"time_init: decrementer frequency = %lu.%.6lu MHz\n",
1012 ppc_tb_freq
/ 1000000, ppc_tb_freq
% 1000000);
1013 printk(KERN_DEBUG
"time_init: processor frequency = %lu.%.6lu MHz\n",
1014 ppc_proc_freq
/ 1000000, ppc_proc_freq
% 1000000);
1017 tb_ticks_per_jiffy
= ppc_tb_freq
/ HZ
;
1018 tb_ticks_per_sec
= ppc_tb_freq
;
1019 tb_ticks_per_usec
= ppc_tb_freq
/ 1000000;
1020 calc_cputime_factors();
1021 setup_cputime_one_jiffy();
1024 * Compute scale factor for sched_clock.
1025 * The calibrate_decr() function has set tb_ticks_per_sec,
1026 * which is the timebase frequency.
1027 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
1028 * the 128-bit result as a 64.64 fixed-point number.
1029 * We then shift that number right until it is less than 1.0,
1030 * giving us the scale factor and shift count to use in
1033 div128_by_32(1000000000, 0, tb_ticks_per_sec
, &res
);
1034 scale
= res
.result_low
;
1035 for (shift
= 0; res
.result_high
!= 0; ++shift
) {
1036 scale
= (scale
>> 1) | (res
.result_high
<< 63);
1037 res
.result_high
>>= 1;
1039 tb_to_ns_scale
= scale
;
1040 tb_to_ns_shift
= shift
;
1041 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1042 boot_tb
= get_tb_or_rtc();
1044 /* If platform provided a timezone (pmac), we correct the time */
1045 if (timezone_offset
) {
1046 sys_tz
.tz_minuteswest
= -timezone_offset
/ 60;
1047 sys_tz
.tz_dsttime
= 0;
1050 vdso_data
->tb_update_count
= 0;
1051 vdso_data
->tb_ticks_per_sec
= tb_ticks_per_sec
;
1053 /* initialise and enable the large decrementer (if we have one) */
1054 set_decrementer_max();
1055 enable_large_decrementer();
1057 /* Start the decrementer on CPUs that have manual control
1060 start_cpu_decrementer();
1062 /* Register the clocksource */
1065 init_decrementer_clockevent();
1066 tick_setup_hrtimer_broadcast();
1068 #ifdef CONFIG_COMMON_CLK
1075 #define STARTOFTIME 1970
1076 #define SECDAY 86400L
1077 #define SECYR (SECDAY * 365)
1078 #define leapyear(year) ((year) % 4 == 0 && \
1079 ((year) % 100 != 0 || (year) % 400 == 0))
1080 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1081 #define days_in_month(a) (month_days[(a) - 1])
1083 static int month_days
[12] = {
1084 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1087 void to_tm(int tim
, struct rtc_time
* tm
)
1090 register long hms
, day
;
1095 /* Hours, minutes, seconds are easy */
1096 tm
->tm_hour
= hms
/ 3600;
1097 tm
->tm_min
= (hms
% 3600) / 60;
1098 tm
->tm_sec
= (hms
% 3600) % 60;
1100 /* Number of years in days */
1101 for (i
= STARTOFTIME
; day
>= days_in_year(i
); i
++)
1102 day
-= days_in_year(i
);
1105 /* Number of months in days left */
1106 if (leapyear(tm
->tm_year
))
1107 days_in_month(FEBRUARY
) = 29;
1108 for (i
= 1; day
>= days_in_month(i
); i
++)
1109 day
-= days_in_month(i
);
1110 days_in_month(FEBRUARY
) = 28;
1113 /* Days are what is left over (+1) from all that. */
1114 tm
->tm_mday
= day
+ 1;
1117 * No-one uses the day of the week.
1121 EXPORT_SYMBOL(to_tm
);
1124 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1127 void div128_by_32(u64 dividend_high
, u64 dividend_low
,
1128 unsigned divisor
, struct div_result
*dr
)
1130 unsigned long a
, b
, c
, d
;
1131 unsigned long w
, x
, y
, z
;
1134 a
= dividend_high
>> 32;
1135 b
= dividend_high
& 0xffffffff;
1136 c
= dividend_low
>> 32;
1137 d
= dividend_low
& 0xffffffff;
1140 ra
= ((u64
)(a
- (w
* divisor
)) << 32) + b
;
1142 rb
= ((u64
) do_div(ra
, divisor
) << 32) + c
;
1145 rc
= ((u64
) do_div(rb
, divisor
) << 32) + d
;
1148 do_div(rc
, divisor
);
1151 dr
->result_high
= ((u64
)w
<< 32) + x
;
1152 dr
->result_low
= ((u64
)y
<< 32) + z
;
1156 /* We don't need to calibrate delay, we use the CPU timebase for that */
1157 void calibrate_delay(void)
1159 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1160 * as the number of __delay(1) in a jiffy, so make it so
1162 loops_per_jiffy
= tb_ticks_per_jiffy
;
1165 #if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
1166 static int rtc_generic_get_time(struct device
*dev
, struct rtc_time
*tm
)
1168 ppc_md
.get_rtc_time(tm
);
1169 return rtc_valid_tm(tm
);
1172 static int rtc_generic_set_time(struct device
*dev
, struct rtc_time
*tm
)
1174 if (!ppc_md
.set_rtc_time
)
1177 if (ppc_md
.set_rtc_time(tm
) < 0)
1183 static const struct rtc_class_ops rtc_generic_ops
= {
1184 .read_time
= rtc_generic_get_time
,
1185 .set_time
= rtc_generic_set_time
,
1188 static int __init
rtc_init(void)
1190 struct platform_device
*pdev
;
1192 if (!ppc_md
.get_rtc_time
)
1195 pdev
= platform_device_register_data(NULL
, "rtc-generic", -1,
1197 sizeof(rtc_generic_ops
));
1199 return PTR_ERR_OR_ZERO(pdev
);
1202 device_initcall(rtc_init
);