]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kernel/time.c
powerpc/time: Remove unnecessary sanity check of decrementer expiration
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / time.c
1 /*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55 #include <linux/delay.h>
56 #include <linux/irq_work.h>
57 #include <asm/trace.h>
58
59 #include <asm/io.h>
60 #include <asm/processor.h>
61 #include <asm/nvram.h>
62 #include <asm/cache.h>
63 #include <asm/machdep.h>
64 #include <asm/uaccess.h>
65 #include <asm/time.h>
66 #include <asm/prom.h>
67 #include <asm/irq.h>
68 #include <asm/div64.h>
69 #include <asm/smp.h>
70 #include <asm/vdso_datapage.h>
71 #include <asm/firmware.h>
72 #include <asm/cputime.h>
73 #ifdef CONFIG_PPC_ISERIES
74 #include <asm/iseries/it_lp_queue.h>
75 #include <asm/iseries/hv_call_xm.h>
76 #endif
77
78 /* powerpc clocksource/clockevent code */
79
80 #include <linux/clockchips.h>
81 #include <linux/clocksource.h>
82
83 static cycle_t rtc_read(struct clocksource *);
84 static struct clocksource clocksource_rtc = {
85 .name = "rtc",
86 .rating = 400,
87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
88 .mask = CLOCKSOURCE_MASK(64),
89 .read = rtc_read,
90 };
91
92 static cycle_t timebase_read(struct clocksource *);
93 static struct clocksource clocksource_timebase = {
94 .name = "timebase",
95 .rating = 400,
96 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
97 .mask = CLOCKSOURCE_MASK(64),
98 .read = timebase_read,
99 };
100
101 #define DECREMENTER_MAX 0x7fffffff
102
103 static int decrementer_set_next_event(unsigned long evt,
104 struct clock_event_device *dev);
105 static void decrementer_set_mode(enum clock_event_mode mode,
106 struct clock_event_device *dev);
107
108 static struct clock_event_device decrementer_clockevent = {
109 .name = "decrementer",
110 .rating = 200,
111 .irq = 0,
112 .set_next_event = decrementer_set_next_event,
113 .set_mode = decrementer_set_mode,
114 .features = CLOCK_EVT_FEAT_ONESHOT,
115 };
116
117 struct decrementer_clock {
118 struct clock_event_device event;
119 u64 next_tb;
120 };
121
122 static DEFINE_PER_CPU(struct decrementer_clock, decrementers);
123
124 #ifdef CONFIG_PPC_ISERIES
125 static unsigned long __initdata iSeries_recal_titan;
126 static signed long __initdata iSeries_recal_tb;
127
128 /* Forward declaration is only needed for iSereis compiles */
129 static void __init clocksource_init(void);
130 #endif
131
132 #define XSEC_PER_SEC (1024*1024)
133
134 #ifdef CONFIG_PPC64
135 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
136 #else
137 /* compute ((xsec << 12) * max) >> 32 */
138 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
139 #endif
140
141 unsigned long tb_ticks_per_jiffy;
142 unsigned long tb_ticks_per_usec = 100; /* sane default */
143 EXPORT_SYMBOL(tb_ticks_per_usec);
144 unsigned long tb_ticks_per_sec;
145 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
146
147 DEFINE_SPINLOCK(rtc_lock);
148 EXPORT_SYMBOL_GPL(rtc_lock);
149
150 static u64 tb_to_ns_scale __read_mostly;
151 static unsigned tb_to_ns_shift __read_mostly;
152 static u64 boot_tb __read_mostly;
153
154 extern struct timezone sys_tz;
155 static long timezone_offset;
156
157 unsigned long ppc_proc_freq;
158 EXPORT_SYMBOL_GPL(ppc_proc_freq);
159 unsigned long ppc_tb_freq;
160 EXPORT_SYMBOL_GPL(ppc_tb_freq);
161
162 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
163 /*
164 * Factors for converting from cputime_t (timebase ticks) to
165 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
166 * These are all stored as 0.64 fixed-point binary fractions.
167 */
168 u64 __cputime_jiffies_factor;
169 EXPORT_SYMBOL(__cputime_jiffies_factor);
170 u64 __cputime_msec_factor;
171 EXPORT_SYMBOL(__cputime_msec_factor);
172 u64 __cputime_sec_factor;
173 EXPORT_SYMBOL(__cputime_sec_factor);
174 u64 __cputime_clockt_factor;
175 EXPORT_SYMBOL(__cputime_clockt_factor);
176 DEFINE_PER_CPU(unsigned long, cputime_last_delta);
177 DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
178
179 cputime_t cputime_one_jiffy;
180
181 void (*dtl_consumer)(struct dtl_entry *, u64);
182
183 static void calc_cputime_factors(void)
184 {
185 struct div_result res;
186
187 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
188 __cputime_jiffies_factor = res.result_low;
189 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
190 __cputime_msec_factor = res.result_low;
191 div128_by_32(1, 0, tb_ticks_per_sec, &res);
192 __cputime_sec_factor = res.result_low;
193 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
194 __cputime_clockt_factor = res.result_low;
195 }
196
197 /*
198 * Read the SPURR on systems that have it, otherwise the PURR,
199 * or if that doesn't exist return the timebase value passed in.
200 */
201 static u64 read_spurr(u64 tb)
202 {
203 if (cpu_has_feature(CPU_FTR_SPURR))
204 return mfspr(SPRN_SPURR);
205 if (cpu_has_feature(CPU_FTR_PURR))
206 return mfspr(SPRN_PURR);
207 return tb;
208 }
209
210 #ifdef CONFIG_PPC_SPLPAR
211
212 /*
213 * Scan the dispatch trace log and count up the stolen time.
214 * Should be called with interrupts disabled.
215 */
216 static u64 scan_dispatch_log(u64 stop_tb)
217 {
218 u64 i = local_paca->dtl_ridx;
219 struct dtl_entry *dtl = local_paca->dtl_curr;
220 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
221 struct lppaca *vpa = local_paca->lppaca_ptr;
222 u64 tb_delta;
223 u64 stolen = 0;
224 u64 dtb;
225
226 if (!dtl)
227 return 0;
228
229 if (i == vpa->dtl_idx)
230 return 0;
231 while (i < vpa->dtl_idx) {
232 if (dtl_consumer)
233 dtl_consumer(dtl, i);
234 dtb = dtl->timebase;
235 tb_delta = dtl->enqueue_to_dispatch_time +
236 dtl->ready_to_enqueue_time;
237 barrier();
238 if (i + N_DISPATCH_LOG < vpa->dtl_idx) {
239 /* buffer has overflowed */
240 i = vpa->dtl_idx - N_DISPATCH_LOG;
241 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
242 continue;
243 }
244 if (dtb > stop_tb)
245 break;
246 stolen += tb_delta;
247 ++i;
248 ++dtl;
249 if (dtl == dtl_end)
250 dtl = local_paca->dispatch_log;
251 }
252 local_paca->dtl_ridx = i;
253 local_paca->dtl_curr = dtl;
254 return stolen;
255 }
256
257 /*
258 * Accumulate stolen time by scanning the dispatch trace log.
259 * Called on entry from user mode.
260 */
261 void accumulate_stolen_time(void)
262 {
263 u64 sst, ust;
264
265 u8 save_soft_enabled = local_paca->soft_enabled;
266 u8 save_hard_enabled = local_paca->hard_enabled;
267
268 /* We are called early in the exception entry, before
269 * soft/hard_enabled are sync'ed to the expected state
270 * for the exception. We are hard disabled but the PACA
271 * needs to reflect that so various debug stuff doesn't
272 * complain
273 */
274 local_paca->soft_enabled = 0;
275 local_paca->hard_enabled = 0;
276
277 sst = scan_dispatch_log(local_paca->starttime_user);
278 ust = scan_dispatch_log(local_paca->starttime);
279 local_paca->system_time -= sst;
280 local_paca->user_time -= ust;
281 local_paca->stolen_time += ust + sst;
282
283 local_paca->soft_enabled = save_soft_enabled;
284 local_paca->hard_enabled = save_hard_enabled;
285 }
286
287 static inline u64 calculate_stolen_time(u64 stop_tb)
288 {
289 u64 stolen = 0;
290
291 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) {
292 stolen = scan_dispatch_log(stop_tb);
293 get_paca()->system_time -= stolen;
294 }
295
296 stolen += get_paca()->stolen_time;
297 get_paca()->stolen_time = 0;
298 return stolen;
299 }
300
301 #else /* CONFIG_PPC_SPLPAR */
302 static inline u64 calculate_stolen_time(u64 stop_tb)
303 {
304 return 0;
305 }
306
307 #endif /* CONFIG_PPC_SPLPAR */
308
309 /*
310 * Account time for a transition between system, hard irq
311 * or soft irq state.
312 */
313 void account_system_vtime(struct task_struct *tsk)
314 {
315 u64 now, nowscaled, delta, deltascaled;
316 unsigned long flags;
317 u64 stolen, udelta, sys_scaled, user_scaled;
318
319 local_irq_save(flags);
320 now = mftb();
321 nowscaled = read_spurr(now);
322 get_paca()->system_time += now - get_paca()->starttime;
323 get_paca()->starttime = now;
324 deltascaled = nowscaled - get_paca()->startspurr;
325 get_paca()->startspurr = nowscaled;
326
327 stolen = calculate_stolen_time(now);
328
329 delta = get_paca()->system_time;
330 get_paca()->system_time = 0;
331 udelta = get_paca()->user_time - get_paca()->utime_sspurr;
332 get_paca()->utime_sspurr = get_paca()->user_time;
333
334 /*
335 * Because we don't read the SPURR on every kernel entry/exit,
336 * deltascaled includes both user and system SPURR ticks.
337 * Apportion these ticks to system SPURR ticks and user
338 * SPURR ticks in the same ratio as the system time (delta)
339 * and user time (udelta) values obtained from the timebase
340 * over the same interval. The system ticks get accounted here;
341 * the user ticks get saved up in paca->user_time_scaled to be
342 * used by account_process_tick.
343 */
344 sys_scaled = delta;
345 user_scaled = udelta;
346 if (deltascaled != delta + udelta) {
347 if (udelta) {
348 sys_scaled = deltascaled * delta / (delta + udelta);
349 user_scaled = deltascaled - sys_scaled;
350 } else {
351 sys_scaled = deltascaled;
352 }
353 }
354 get_paca()->user_time_scaled += user_scaled;
355
356 if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
357 account_system_time(tsk, 0, delta, sys_scaled);
358 if (stolen)
359 account_steal_time(stolen);
360 } else {
361 account_idle_time(delta + stolen);
362 }
363 local_irq_restore(flags);
364 }
365 EXPORT_SYMBOL_GPL(account_system_vtime);
366
367 /*
368 * Transfer the user and system times accumulated in the paca
369 * by the exception entry and exit code to the generic process
370 * user and system time records.
371 * Must be called with interrupts disabled.
372 * Assumes that account_system_vtime() has been called recently
373 * (i.e. since the last entry from usermode) so that
374 * get_paca()->user_time_scaled is up to date.
375 */
376 void account_process_tick(struct task_struct *tsk, int user_tick)
377 {
378 cputime_t utime, utimescaled;
379
380 utime = get_paca()->user_time;
381 utimescaled = get_paca()->user_time_scaled;
382 get_paca()->user_time = 0;
383 get_paca()->user_time_scaled = 0;
384 get_paca()->utime_sspurr = 0;
385 account_user_time(tsk, utime, utimescaled);
386 }
387
388 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
389 #define calc_cputime_factors()
390 #endif
391
392 void __delay(unsigned long loops)
393 {
394 unsigned long start;
395 int diff;
396
397 if (__USE_RTC()) {
398 start = get_rtcl();
399 do {
400 /* the RTCL register wraps at 1000000000 */
401 diff = get_rtcl() - start;
402 if (diff < 0)
403 diff += 1000000000;
404 } while (diff < loops);
405 } else {
406 start = get_tbl();
407 while (get_tbl() - start < loops)
408 HMT_low();
409 HMT_medium();
410 }
411 }
412 EXPORT_SYMBOL(__delay);
413
414 void udelay(unsigned long usecs)
415 {
416 __delay(tb_ticks_per_usec * usecs);
417 }
418 EXPORT_SYMBOL(udelay);
419
420 #ifdef CONFIG_SMP
421 unsigned long profile_pc(struct pt_regs *regs)
422 {
423 unsigned long pc = instruction_pointer(regs);
424
425 if (in_lock_functions(pc))
426 return regs->link;
427
428 return pc;
429 }
430 EXPORT_SYMBOL(profile_pc);
431 #endif
432
433 #ifdef CONFIG_PPC_ISERIES
434
435 /*
436 * This function recalibrates the timebase based on the 49-bit time-of-day
437 * value in the Titan chip. The Titan is much more accurate than the value
438 * returned by the service processor for the timebase frequency.
439 */
440
441 static int __init iSeries_tb_recal(void)
442 {
443 unsigned long titan, tb;
444
445 /* Make sure we only run on iSeries */
446 if (!firmware_has_feature(FW_FEATURE_ISERIES))
447 return -ENODEV;
448
449 tb = get_tb();
450 titan = HvCallXm_loadTod();
451 if ( iSeries_recal_titan ) {
452 unsigned long tb_ticks = tb - iSeries_recal_tb;
453 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
454 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
455 unsigned long new_tb_ticks_per_jiffy =
456 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
457 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
458 char sign = '+';
459 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
460 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
461
462 if ( tick_diff < 0 ) {
463 tick_diff = -tick_diff;
464 sign = '-';
465 }
466 if ( tick_diff ) {
467 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
468 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
469 new_tb_ticks_per_jiffy, sign, tick_diff );
470 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
471 tb_ticks_per_sec = new_tb_ticks_per_sec;
472 calc_cputime_factors();
473 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
474 setup_cputime_one_jiffy();
475 }
476 else {
477 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
478 " new tb_ticks_per_jiffy = %lu\n"
479 " old tb_ticks_per_jiffy = %lu\n",
480 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
481 }
482 }
483 }
484 iSeries_recal_titan = titan;
485 iSeries_recal_tb = tb;
486
487 /* Called here as now we know accurate values for the timebase */
488 clocksource_init();
489 return 0;
490 }
491 late_initcall(iSeries_tb_recal);
492
493 /* Called from platform early init */
494 void __init iSeries_time_init_early(void)
495 {
496 iSeries_recal_tb = get_tb();
497 iSeries_recal_titan = HvCallXm_loadTod();
498 }
499 #endif /* CONFIG_PPC_ISERIES */
500
501 #ifdef CONFIG_IRQ_WORK
502
503 /*
504 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
505 */
506 #ifdef CONFIG_PPC64
507 static inline unsigned long test_irq_work_pending(void)
508 {
509 unsigned long x;
510
511 asm volatile("lbz %0,%1(13)"
512 : "=r" (x)
513 : "i" (offsetof(struct paca_struct, irq_work_pending)));
514 return x;
515 }
516
517 static inline void set_irq_work_pending_flag(void)
518 {
519 asm volatile("stb %0,%1(13)" : :
520 "r" (1),
521 "i" (offsetof(struct paca_struct, irq_work_pending)));
522 }
523
524 static inline void clear_irq_work_pending(void)
525 {
526 asm volatile("stb %0,%1(13)" : :
527 "r" (0),
528 "i" (offsetof(struct paca_struct, irq_work_pending)));
529 }
530
531 #else /* 32-bit */
532
533 DEFINE_PER_CPU(u8, irq_work_pending);
534
535 #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
536 #define test_irq_work_pending() __get_cpu_var(irq_work_pending)
537 #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
538
539 #endif /* 32 vs 64 bit */
540
541 void arch_irq_work_raise(void)
542 {
543 preempt_disable();
544 set_irq_work_pending_flag();
545 set_dec(1);
546 preempt_enable();
547 }
548
549 #else /* CONFIG_IRQ_WORK */
550
551 #define test_irq_work_pending() 0
552 #define clear_irq_work_pending()
553
554 #endif /* CONFIG_IRQ_WORK */
555
556 /*
557 * For iSeries shared processors, we have to let the hypervisor
558 * set the hardware decrementer. We set a virtual decrementer
559 * in the lppaca and call the hypervisor if the virtual
560 * decrementer is less than the current value in the hardware
561 * decrementer. (almost always the new decrementer value will
562 * be greater than the current hardware decementer so the hypervisor
563 * call will not be needed)
564 */
565
566 /*
567 * timer_interrupt - gets called when the decrementer overflows,
568 * with interrupts disabled.
569 */
570 void timer_interrupt(struct pt_regs * regs)
571 {
572 struct pt_regs *old_regs;
573 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
574 struct clock_event_device *evt = &decrementer->event;
575
576 /* Ensure a positive value is written to the decrementer, or else
577 * some CPUs will continue to take decrementer exceptions.
578 */
579 set_dec(DECREMENTER_MAX);
580
581 /* Some implementations of hotplug will get timer interrupts while
582 * offline, just ignore these
583 */
584 if (!cpu_online(smp_processor_id()))
585 return;
586
587 trace_timer_interrupt_entry(regs);
588
589 __get_cpu_var(irq_stat).timer_irqs++;
590
591 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
592 if (atomic_read(&ppc_n_lost_interrupts) != 0)
593 do_IRQ(regs);
594 #endif
595
596 old_regs = set_irq_regs(regs);
597 irq_enter();
598
599 if (test_irq_work_pending()) {
600 clear_irq_work_pending();
601 irq_work_run();
602 }
603
604 #ifdef CONFIG_PPC_ISERIES
605 if (firmware_has_feature(FW_FEATURE_ISERIES))
606 get_lppaca()->int_dword.fields.decr_int = 0;
607 #endif
608
609 decrementer->next_tb = ~(u64)0;
610 if (evt->event_handler)
611 evt->event_handler(evt);
612
613 #ifdef CONFIG_PPC_ISERIES
614 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
615 process_hvlpevents();
616 #endif
617
618 #ifdef CONFIG_PPC64
619 /* collect purr register values often, for accurate calculations */
620 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
621 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
622 cu->current_tb = mfspr(SPRN_PURR);
623 }
624 #endif
625
626 irq_exit();
627 set_irq_regs(old_regs);
628
629 trace_timer_interrupt_exit(regs);
630 }
631
632 #ifdef CONFIG_SUSPEND
633 static void generic_suspend_disable_irqs(void)
634 {
635 /* Disable the decrementer, so that it doesn't interfere
636 * with suspending.
637 */
638
639 set_dec(0x7fffffff);
640 local_irq_disable();
641 set_dec(0x7fffffff);
642 }
643
644 static void generic_suspend_enable_irqs(void)
645 {
646 local_irq_enable();
647 }
648
649 /* Overrides the weak version in kernel/power/main.c */
650 void arch_suspend_disable_irqs(void)
651 {
652 if (ppc_md.suspend_disable_irqs)
653 ppc_md.suspend_disable_irqs();
654 generic_suspend_disable_irqs();
655 }
656
657 /* Overrides the weak version in kernel/power/main.c */
658 void arch_suspend_enable_irqs(void)
659 {
660 generic_suspend_enable_irqs();
661 if (ppc_md.suspend_enable_irqs)
662 ppc_md.suspend_enable_irqs();
663 }
664 #endif
665
666 /*
667 * Scheduler clock - returns current time in nanosec units.
668 *
669 * Note: mulhdu(a, b) (multiply high double unsigned) returns
670 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
671 * are 64-bit unsigned numbers.
672 */
673 unsigned long long sched_clock(void)
674 {
675 if (__USE_RTC())
676 return get_rtc();
677 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
678 }
679
680 static int __init get_freq(char *name, int cells, unsigned long *val)
681 {
682 struct device_node *cpu;
683 const unsigned int *fp;
684 int found = 0;
685
686 /* The cpu node should have timebase and clock frequency properties */
687 cpu = of_find_node_by_type(NULL, "cpu");
688
689 if (cpu) {
690 fp = of_get_property(cpu, name, NULL);
691 if (fp) {
692 found = 1;
693 *val = of_read_ulong(fp, cells);
694 }
695
696 of_node_put(cpu);
697 }
698
699 return found;
700 }
701
702 /* should become __cpuinit when secondary_cpu_time_init also is */
703 void start_cpu_decrementer(void)
704 {
705 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
706 /* Clear any pending timer interrupts */
707 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
708
709 /* Enable decrementer interrupt */
710 mtspr(SPRN_TCR, TCR_DIE);
711 #endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
712 }
713
714 void __init generic_calibrate_decr(void)
715 {
716 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
717
718 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
719 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
720
721 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
722 "(not found)\n");
723 }
724
725 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
726
727 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
728 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
729
730 printk(KERN_ERR "WARNING: Estimating processor frequency "
731 "(not found)\n");
732 }
733 }
734
735 int update_persistent_clock(struct timespec now)
736 {
737 struct rtc_time tm;
738
739 if (!ppc_md.set_rtc_time)
740 return 0;
741
742 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
743 tm.tm_year -= 1900;
744 tm.tm_mon -= 1;
745
746 return ppc_md.set_rtc_time(&tm);
747 }
748
749 static void __read_persistent_clock(struct timespec *ts)
750 {
751 struct rtc_time tm;
752 static int first = 1;
753
754 ts->tv_nsec = 0;
755 /* XXX this is a litle fragile but will work okay in the short term */
756 if (first) {
757 first = 0;
758 if (ppc_md.time_init)
759 timezone_offset = ppc_md.time_init();
760
761 /* get_boot_time() isn't guaranteed to be safe to call late */
762 if (ppc_md.get_boot_time) {
763 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
764 return;
765 }
766 }
767 if (!ppc_md.get_rtc_time) {
768 ts->tv_sec = 0;
769 return;
770 }
771 ppc_md.get_rtc_time(&tm);
772
773 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
774 tm.tm_hour, tm.tm_min, tm.tm_sec);
775 }
776
777 void read_persistent_clock(struct timespec *ts)
778 {
779 __read_persistent_clock(ts);
780
781 /* Sanitize it in case real time clock is set below EPOCH */
782 if (ts->tv_sec < 0) {
783 ts->tv_sec = 0;
784 ts->tv_nsec = 0;
785 }
786
787 }
788
789 /* clocksource code */
790 static cycle_t rtc_read(struct clocksource *cs)
791 {
792 return (cycle_t)get_rtc();
793 }
794
795 static cycle_t timebase_read(struct clocksource *cs)
796 {
797 return (cycle_t)get_tb();
798 }
799
800 void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
801 struct clocksource *clock, u32 mult)
802 {
803 u64 new_tb_to_xs, new_stamp_xsec;
804 u32 frac_sec;
805
806 if (clock != &clocksource_timebase)
807 return;
808
809 /* Make userspace gettimeofday spin until we're done. */
810 ++vdso_data->tb_update_count;
811 smp_mb();
812
813 /* 19342813113834067 ~= 2^(20+64) / 1e9 */
814 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
815 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
816 do_div(new_stamp_xsec, 1000000000);
817 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
818
819 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
820 /* this is tv_nsec / 1e9 as a 0.32 fraction */
821 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
822
823 /*
824 * tb_update_count is used to allow the userspace gettimeofday code
825 * to assure itself that it sees a consistent view of the tb_to_xs and
826 * stamp_xsec variables. It reads the tb_update_count, then reads
827 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
828 * the two values of tb_update_count match and are even then the
829 * tb_to_xs and stamp_xsec values are consistent. If not, then it
830 * loops back and reads them again until this criteria is met.
831 * We expect the caller to have done the first increment of
832 * vdso_data->tb_update_count already.
833 */
834 vdso_data->tb_orig_stamp = clock->cycle_last;
835 vdso_data->stamp_xsec = new_stamp_xsec;
836 vdso_data->tb_to_xs = new_tb_to_xs;
837 vdso_data->wtom_clock_sec = wtm->tv_sec;
838 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
839 vdso_data->stamp_xtime = *wall_time;
840 vdso_data->stamp_sec_fraction = frac_sec;
841 smp_wmb();
842 ++(vdso_data->tb_update_count);
843 }
844
845 void update_vsyscall_tz(void)
846 {
847 /* Make userspace gettimeofday spin until we're done. */
848 ++vdso_data->tb_update_count;
849 smp_mb();
850 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
851 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
852 smp_mb();
853 ++vdso_data->tb_update_count;
854 }
855
856 static void __init clocksource_init(void)
857 {
858 struct clocksource *clock;
859
860 if (__USE_RTC())
861 clock = &clocksource_rtc;
862 else
863 clock = &clocksource_timebase;
864
865 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
866 printk(KERN_ERR "clocksource: %s is already registered\n",
867 clock->name);
868 return;
869 }
870
871 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
872 clock->name, clock->mult, clock->shift);
873 }
874
875 void decrementer_check_overflow(void)
876 {
877 u64 now = get_tb_or_rtc();
878 struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
879
880 if (now >= decrementer->next_tb)
881 set_dec(1);
882 }
883
884 static int decrementer_set_next_event(unsigned long evt,
885 struct clock_event_device *dev)
886 {
887 __get_cpu_var(decrementers).next_tb = get_tb_or_rtc() + evt;
888 set_dec(evt);
889 return 0;
890 }
891
892 static void decrementer_set_mode(enum clock_event_mode mode,
893 struct clock_event_device *dev)
894 {
895 if (mode != CLOCK_EVT_MODE_ONESHOT)
896 decrementer_set_next_event(DECREMENTER_MAX, dev);
897 }
898
899 static void register_decrementer_clockevent(int cpu)
900 {
901 struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
902
903 *dec = decrementer_clockevent;
904 dec->cpumask = cpumask_of(cpu);
905
906 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
907 dec->name, dec->mult, dec->shift, cpu);
908
909 clockevents_register_device(dec);
910 }
911
912 static void __init init_decrementer_clockevent(void)
913 {
914 int cpu = smp_processor_id();
915
916 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
917
918 decrementer_clockevent.max_delta_ns =
919 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
920 decrementer_clockevent.min_delta_ns =
921 clockevent_delta2ns(2, &decrementer_clockevent);
922
923 register_decrementer_clockevent(cpu);
924 }
925
926 void secondary_cpu_time_init(void)
927 {
928 /* Start the decrementer on CPUs that have manual control
929 * such as BookE
930 */
931 start_cpu_decrementer();
932
933 /* FIME: Should make unrelatred change to move snapshot_timebase
934 * call here ! */
935 register_decrementer_clockevent(smp_processor_id());
936 }
937
938 /* This function is only called on the boot processor */
939 void __init time_init(void)
940 {
941 struct div_result res;
942 u64 scale;
943 unsigned shift;
944
945 if (__USE_RTC()) {
946 /* 601 processor: dec counts down by 128 every 128ns */
947 ppc_tb_freq = 1000000000;
948 } else {
949 /* Normal PowerPC with timebase register */
950 ppc_md.calibrate_decr();
951 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
952 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
953 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
954 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
955 }
956
957 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
958 tb_ticks_per_sec = ppc_tb_freq;
959 tb_ticks_per_usec = ppc_tb_freq / 1000000;
960 calc_cputime_factors();
961 setup_cputime_one_jiffy();
962
963 /*
964 * Compute scale factor for sched_clock.
965 * The calibrate_decr() function has set tb_ticks_per_sec,
966 * which is the timebase frequency.
967 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
968 * the 128-bit result as a 64.64 fixed-point number.
969 * We then shift that number right until it is less than 1.0,
970 * giving us the scale factor and shift count to use in
971 * sched_clock().
972 */
973 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
974 scale = res.result_low;
975 for (shift = 0; res.result_high != 0; ++shift) {
976 scale = (scale >> 1) | (res.result_high << 63);
977 res.result_high >>= 1;
978 }
979 tb_to_ns_scale = scale;
980 tb_to_ns_shift = shift;
981 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
982 boot_tb = get_tb_or_rtc();
983
984 /* If platform provided a timezone (pmac), we correct the time */
985 if (timezone_offset) {
986 sys_tz.tz_minuteswest = -timezone_offset / 60;
987 sys_tz.tz_dsttime = 0;
988 }
989
990 vdso_data->tb_update_count = 0;
991 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
992
993 /* Start the decrementer on CPUs that have manual control
994 * such as BookE
995 */
996 start_cpu_decrementer();
997
998 /* Register the clocksource, if we're not running on iSeries */
999 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1000 clocksource_init();
1001
1002 init_decrementer_clockevent();
1003 }
1004
1005
1006 #define FEBRUARY 2
1007 #define STARTOFTIME 1970
1008 #define SECDAY 86400L
1009 #define SECYR (SECDAY * 365)
1010 #define leapyear(year) ((year) % 4 == 0 && \
1011 ((year) % 100 != 0 || (year) % 400 == 0))
1012 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1013 #define days_in_month(a) (month_days[(a) - 1])
1014
1015 static int month_days[12] = {
1016 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1017 };
1018
1019 /*
1020 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1021 */
1022 void GregorianDay(struct rtc_time * tm)
1023 {
1024 int leapsToDate;
1025 int lastYear;
1026 int day;
1027 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1028
1029 lastYear = tm->tm_year - 1;
1030
1031 /*
1032 * Number of leap corrections to apply up to end of last year
1033 */
1034 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1035
1036 /*
1037 * This year is a leap year if it is divisible by 4 except when it is
1038 * divisible by 100 unless it is divisible by 400
1039 *
1040 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1041 */
1042 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1043
1044 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1045 tm->tm_mday;
1046
1047 tm->tm_wday = day % 7;
1048 }
1049
1050 void to_tm(int tim, struct rtc_time * tm)
1051 {
1052 register int i;
1053 register long hms, day;
1054
1055 day = tim / SECDAY;
1056 hms = tim % SECDAY;
1057
1058 /* Hours, minutes, seconds are easy */
1059 tm->tm_hour = hms / 3600;
1060 tm->tm_min = (hms % 3600) / 60;
1061 tm->tm_sec = (hms % 3600) % 60;
1062
1063 /* Number of years in days */
1064 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1065 day -= days_in_year(i);
1066 tm->tm_year = i;
1067
1068 /* Number of months in days left */
1069 if (leapyear(tm->tm_year))
1070 days_in_month(FEBRUARY) = 29;
1071 for (i = 1; day >= days_in_month(i); i++)
1072 day -= days_in_month(i);
1073 days_in_month(FEBRUARY) = 28;
1074 tm->tm_mon = i;
1075
1076 /* Days are what is left over (+1) from all that. */
1077 tm->tm_mday = day + 1;
1078
1079 /*
1080 * Determine the day of week
1081 */
1082 GregorianDay(tm);
1083 }
1084
1085 /*
1086 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1087 * result.
1088 */
1089 void div128_by_32(u64 dividend_high, u64 dividend_low,
1090 unsigned divisor, struct div_result *dr)
1091 {
1092 unsigned long a, b, c, d;
1093 unsigned long w, x, y, z;
1094 u64 ra, rb, rc;
1095
1096 a = dividend_high >> 32;
1097 b = dividend_high & 0xffffffff;
1098 c = dividend_low >> 32;
1099 d = dividend_low & 0xffffffff;
1100
1101 w = a / divisor;
1102 ra = ((u64)(a - (w * divisor)) << 32) + b;
1103
1104 rb = ((u64) do_div(ra, divisor) << 32) + c;
1105 x = ra;
1106
1107 rc = ((u64) do_div(rb, divisor) << 32) + d;
1108 y = rb;
1109
1110 do_div(rc, divisor);
1111 z = rc;
1112
1113 dr->result_high = ((u64)w << 32) + x;
1114 dr->result_low = ((u64)y << 32) + z;
1115
1116 }
1117
1118 /* We don't need to calibrate delay, we use the CPU timebase for that */
1119 void calibrate_delay(void)
1120 {
1121 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1122 * as the number of __delay(1) in a jiffy, so make it so
1123 */
1124 loops_per_jiffy = tb_ticks_per_jiffy;
1125 }
1126
1127 static int __init rtc_init(void)
1128 {
1129 struct platform_device *pdev;
1130
1131 if (!ppc_md.get_rtc_time)
1132 return -ENODEV;
1133
1134 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1135 if (IS_ERR(pdev))
1136 return PTR_ERR(pdev);
1137
1138 return 0;
1139 }
1140
1141 module_init(rtc_init);