]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kernel/time.c
727a6699f2f41c36ffaac9a205a1aaf3405ec554
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kernel / time.c
1 /*
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
25 *
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
55
56 #include <asm/io.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
62 #include <asm/time.h>
63 #include <asm/prom.h>
64 #include <asm/irq.h>
65 #include <asm/div64.h>
66 #include <asm/smp.h>
67 #include <asm/vdso_datapage.h>
68 #ifdef CONFIG_PPC64
69 #include <asm/firmware.h>
70 #endif
71 #ifdef CONFIG_PPC_ISERIES
72 #include <asm/iseries/it_lp_queue.h>
73 #include <asm/iseries/hv_call_xm.h>
74 #endif
75 #include <asm/smp.h>
76
77 /* keep track of when we need to update the rtc */
78 time_t last_rtc_update;
79 #ifdef CONFIG_PPC_ISERIES
80 static unsigned long __initdata iSeries_recal_titan;
81 static signed long __initdata iSeries_recal_tb;
82 #endif
83
84 /* The decrementer counts down by 128 every 128ns on a 601. */
85 #define DECREMENTER_COUNT_601 (1000000000 / HZ)
86
87 #define XSEC_PER_SEC (1024*1024)
88
89 #ifdef CONFIG_PPC64
90 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
91 #else
92 /* compute ((xsec << 12) * max) >> 32 */
93 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
94 #endif
95
96 unsigned long tb_ticks_per_jiffy;
97 unsigned long tb_ticks_per_usec = 100; /* sane default */
98 EXPORT_SYMBOL(tb_ticks_per_usec);
99 unsigned long tb_ticks_per_sec;
100 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
101 u64 tb_to_xs;
102 unsigned tb_to_us;
103
104 #define TICKLEN_SCALE TICK_LENGTH_SHIFT
105 u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
106 u64 ticklen_to_xs; /* 0.64 fraction */
107
108 /* If last_tick_len corresponds to about 1/HZ seconds, then
109 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
110 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
111
112 DEFINE_SPINLOCK(rtc_lock);
113 EXPORT_SYMBOL_GPL(rtc_lock);
114
115 static u64 tb_to_ns_scale __read_mostly;
116 static unsigned tb_to_ns_shift __read_mostly;
117 static unsigned long boot_tb __read_mostly;
118
119 struct gettimeofday_struct do_gtod;
120
121 extern struct timezone sys_tz;
122 static long timezone_offset;
123
124 unsigned long ppc_proc_freq;
125 EXPORT_SYMBOL(ppc_proc_freq);
126 unsigned long ppc_tb_freq;
127
128 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
129 static DEFINE_PER_CPU(u64, last_jiffy);
130
131 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
132 /*
133 * Factors for converting from cputime_t (timebase ticks) to
134 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
135 * These are all stored as 0.64 fixed-point binary fractions.
136 */
137 u64 __cputime_jiffies_factor;
138 EXPORT_SYMBOL(__cputime_jiffies_factor);
139 u64 __cputime_msec_factor;
140 EXPORT_SYMBOL(__cputime_msec_factor);
141 u64 __cputime_sec_factor;
142 EXPORT_SYMBOL(__cputime_sec_factor);
143 u64 __cputime_clockt_factor;
144 EXPORT_SYMBOL(__cputime_clockt_factor);
145
146 static void calc_cputime_factors(void)
147 {
148 struct div_result res;
149
150 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
151 __cputime_jiffies_factor = res.result_low;
152 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
153 __cputime_msec_factor = res.result_low;
154 div128_by_32(1, 0, tb_ticks_per_sec, &res);
155 __cputime_sec_factor = res.result_low;
156 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
157 __cputime_clockt_factor = res.result_low;
158 }
159
160 /*
161 * Read the PURR on systems that have it, otherwise the timebase.
162 */
163 static u64 read_purr(void)
164 {
165 if (cpu_has_feature(CPU_FTR_PURR))
166 return mfspr(SPRN_PURR);
167 return mftb();
168 }
169
170 /*
171 * Account time for a transition between system, hard irq
172 * or soft irq state.
173 */
174 void account_system_vtime(struct task_struct *tsk)
175 {
176 u64 now, delta;
177 unsigned long flags;
178
179 local_irq_save(flags);
180 now = read_purr();
181 delta = now - get_paca()->startpurr;
182 get_paca()->startpurr = now;
183 if (!in_interrupt()) {
184 delta += get_paca()->system_time;
185 get_paca()->system_time = 0;
186 }
187 account_system_time(tsk, 0, delta);
188 local_irq_restore(flags);
189 }
190
191 /*
192 * Transfer the user and system times accumulated in the paca
193 * by the exception entry and exit code to the generic process
194 * user and system time records.
195 * Must be called with interrupts disabled.
196 */
197 void account_process_vtime(struct task_struct *tsk)
198 {
199 cputime_t utime;
200
201 utime = get_paca()->user_time;
202 get_paca()->user_time = 0;
203 account_user_time(tsk, utime);
204 }
205
206 static void account_process_time(struct pt_regs *regs)
207 {
208 int cpu = smp_processor_id();
209
210 account_process_vtime(current);
211 run_local_timers();
212 if (rcu_pending(cpu))
213 rcu_check_callbacks(cpu, user_mode(regs));
214 scheduler_tick();
215 run_posix_cpu_timers(current);
216 }
217
218 /*
219 * Stuff for accounting stolen time.
220 */
221 struct cpu_purr_data {
222 int initialized; /* thread is running */
223 u64 tb; /* last TB value read */
224 u64 purr; /* last PURR value read */
225 };
226
227 /*
228 * Each entry in the cpu_purr_data array is manipulated only by its
229 * "owner" cpu -- usually in the timer interrupt but also occasionally
230 * in process context for cpu online. As long as cpus do not touch
231 * each others' cpu_purr_data, disabling local interrupts is
232 * sufficient to serialize accesses.
233 */
234 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
235
236 static void snapshot_tb_and_purr(void *data)
237 {
238 unsigned long flags;
239 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
240
241 local_irq_save(flags);
242 p->tb = mftb();
243 p->purr = mfspr(SPRN_PURR);
244 wmb();
245 p->initialized = 1;
246 local_irq_restore(flags);
247 }
248
249 /*
250 * Called during boot when all cpus have come up.
251 */
252 void snapshot_timebases(void)
253 {
254 if (!cpu_has_feature(CPU_FTR_PURR))
255 return;
256 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
257 }
258
259 /*
260 * Must be called with interrupts disabled.
261 */
262 void calculate_steal_time(void)
263 {
264 u64 tb, purr;
265 s64 stolen;
266 struct cpu_purr_data *pme;
267
268 if (!cpu_has_feature(CPU_FTR_PURR))
269 return;
270 pme = &per_cpu(cpu_purr_data, smp_processor_id());
271 if (!pme->initialized)
272 return; /* this can happen in early boot */
273 tb = mftb();
274 purr = mfspr(SPRN_PURR);
275 stolen = (tb - pme->tb) - (purr - pme->purr);
276 if (stolen > 0)
277 account_steal_time(current, stolen);
278 pme->tb = tb;
279 pme->purr = purr;
280 }
281
282 #ifdef CONFIG_PPC_SPLPAR
283 /*
284 * Must be called before the cpu is added to the online map when
285 * a cpu is being brought up at runtime.
286 */
287 static void snapshot_purr(void)
288 {
289 struct cpu_purr_data *pme;
290 unsigned long flags;
291
292 if (!cpu_has_feature(CPU_FTR_PURR))
293 return;
294 local_irq_save(flags);
295 pme = &per_cpu(cpu_purr_data, smp_processor_id());
296 pme->tb = mftb();
297 pme->purr = mfspr(SPRN_PURR);
298 pme->initialized = 1;
299 local_irq_restore(flags);
300 }
301
302 #endif /* CONFIG_PPC_SPLPAR */
303
304 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
305 #define calc_cputime_factors()
306 #define account_process_time(regs) update_process_times(user_mode(regs))
307 #define calculate_steal_time() do { } while (0)
308 #endif
309
310 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
311 #define snapshot_purr() do { } while (0)
312 #endif
313
314 /*
315 * Called when a cpu comes up after the system has finished booting,
316 * i.e. as a result of a hotplug cpu action.
317 */
318 void snapshot_timebase(void)
319 {
320 __get_cpu_var(last_jiffy) = get_tb();
321 snapshot_purr();
322 }
323
324 void __delay(unsigned long loops)
325 {
326 unsigned long start;
327 int diff;
328
329 if (__USE_RTC()) {
330 start = get_rtcl();
331 do {
332 /* the RTCL register wraps at 1000000000 */
333 diff = get_rtcl() - start;
334 if (diff < 0)
335 diff += 1000000000;
336 } while (diff < loops);
337 } else {
338 start = get_tbl();
339 while (get_tbl() - start < loops)
340 HMT_low();
341 HMT_medium();
342 }
343 }
344 EXPORT_SYMBOL(__delay);
345
346 void udelay(unsigned long usecs)
347 {
348 __delay(tb_ticks_per_usec * usecs);
349 }
350 EXPORT_SYMBOL(udelay);
351
352 static __inline__ void timer_check_rtc(void)
353 {
354 /*
355 * update the rtc when needed, this should be performed on the
356 * right fraction of a second. Half or full second ?
357 * Full second works on mk48t59 clocks, others need testing.
358 * Note that this update is basically only used through
359 * the adjtimex system calls. Setting the HW clock in
360 * any other way is a /dev/rtc and userland business.
361 * This is still wrong by -0.5/+1.5 jiffies because of the
362 * timer interrupt resolution and possible delay, but here we
363 * hit a quantization limit which can only be solved by higher
364 * resolution timers and decoupling time management from timer
365 * interrupts. This is also wrong on the clocks
366 * which require being written at the half second boundary.
367 * We should have an rtc call that only sets the minutes and
368 * seconds like on Intel to avoid problems with non UTC clocks.
369 */
370 if (ppc_md.set_rtc_time && ntp_synced() &&
371 xtime.tv_sec - last_rtc_update >= 659 &&
372 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
373 struct rtc_time tm;
374 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
375 tm.tm_year -= 1900;
376 tm.tm_mon -= 1;
377 if (ppc_md.set_rtc_time(&tm) == 0)
378 last_rtc_update = xtime.tv_sec + 1;
379 else
380 /* Try again one minute later */
381 last_rtc_update += 60;
382 }
383 }
384
385 /*
386 * This version of gettimeofday has microsecond resolution.
387 */
388 static inline void __do_gettimeofday(struct timeval *tv)
389 {
390 unsigned long sec, usec;
391 u64 tb_ticks, xsec;
392 struct gettimeofday_vars *temp_varp;
393 u64 temp_tb_to_xs, temp_stamp_xsec;
394
395 /*
396 * These calculations are faster (gets rid of divides)
397 * if done in units of 1/2^20 rather than microseconds.
398 * The conversion to microseconds at the end is done
399 * without a divide (and in fact, without a multiply)
400 */
401 temp_varp = do_gtod.varp;
402
403 /* Sampling the time base must be done after loading
404 * do_gtod.varp in order to avoid racing with update_gtod.
405 */
406 data_barrier(temp_varp);
407 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
408 temp_tb_to_xs = temp_varp->tb_to_xs;
409 temp_stamp_xsec = temp_varp->stamp_xsec;
410 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
411 sec = xsec / XSEC_PER_SEC;
412 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
413 usec = SCALE_XSEC(usec, 1000000);
414
415 tv->tv_sec = sec;
416 tv->tv_usec = usec;
417 }
418
419 void do_gettimeofday(struct timeval *tv)
420 {
421 if (__USE_RTC()) {
422 /* do this the old way */
423 unsigned long flags, seq;
424 unsigned int sec, nsec, usec;
425
426 do {
427 seq = read_seqbegin_irqsave(&xtime_lock, flags);
428 sec = xtime.tv_sec;
429 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
430 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
431 usec = nsec / 1000;
432 while (usec >= 1000000) {
433 usec -= 1000000;
434 ++sec;
435 }
436 tv->tv_sec = sec;
437 tv->tv_usec = usec;
438 return;
439 }
440 __do_gettimeofday(tv);
441 }
442
443 EXPORT_SYMBOL(do_gettimeofday);
444
445 /*
446 * There are two copies of tb_to_xs and stamp_xsec so that no
447 * lock is needed to access and use these values in
448 * do_gettimeofday. We alternate the copies and as long as a
449 * reasonable time elapses between changes, there will never
450 * be inconsistent values. ntpd has a minimum of one minute
451 * between updates.
452 */
453 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
454 u64 new_tb_to_xs)
455 {
456 unsigned temp_idx;
457 struct gettimeofday_vars *temp_varp;
458
459 temp_idx = (do_gtod.var_idx == 0);
460 temp_varp = &do_gtod.vars[temp_idx];
461
462 temp_varp->tb_to_xs = new_tb_to_xs;
463 temp_varp->tb_orig_stamp = new_tb_stamp;
464 temp_varp->stamp_xsec = new_stamp_xsec;
465 smp_mb();
466 do_gtod.varp = temp_varp;
467 do_gtod.var_idx = temp_idx;
468
469 /*
470 * tb_update_count is used to allow the userspace gettimeofday code
471 * to assure itself that it sees a consistent view of the tb_to_xs and
472 * stamp_xsec variables. It reads the tb_update_count, then reads
473 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
474 * the two values of tb_update_count match and are even then the
475 * tb_to_xs and stamp_xsec values are consistent. If not, then it
476 * loops back and reads them again until this criteria is met.
477 * We expect the caller to have done the first increment of
478 * vdso_data->tb_update_count already.
479 */
480 vdso_data->tb_orig_stamp = new_tb_stamp;
481 vdso_data->stamp_xsec = new_stamp_xsec;
482 vdso_data->tb_to_xs = new_tb_to_xs;
483 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
484 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
485 smp_wmb();
486 ++(vdso_data->tb_update_count);
487 }
488
489 /*
490 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
491 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
492 * difference tb - tb_orig_stamp small enough to always fit inside a
493 * 32 bits number. This is a requirement of our fast 32 bits userland
494 * implementation in the vdso. If we "miss" a call to this function
495 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
496 * with a too big difference, then the vdso will fallback to calling
497 * the syscall
498 */
499 static __inline__ void timer_recalc_offset(u64 cur_tb)
500 {
501 unsigned long offset;
502 u64 new_stamp_xsec;
503 u64 tlen, t2x;
504 u64 tb, xsec_old, xsec_new;
505 struct gettimeofday_vars *varp;
506
507 if (__USE_RTC())
508 return;
509 tlen = current_tick_length();
510 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
511 if (tlen == last_tick_len && offset < 0x80000000u)
512 return;
513 if (tlen != last_tick_len) {
514 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
515 last_tick_len = tlen;
516 } else
517 t2x = do_gtod.varp->tb_to_xs;
518 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
519 do_div(new_stamp_xsec, 1000000000);
520 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
521
522 ++vdso_data->tb_update_count;
523 smp_mb();
524
525 /*
526 * Make sure time doesn't go backwards for userspace gettimeofday.
527 */
528 tb = get_tb();
529 varp = do_gtod.varp;
530 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
531 + varp->stamp_xsec;
532 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
533 if (xsec_new < xsec_old)
534 new_stamp_xsec += xsec_old - xsec_new;
535
536 update_gtod(cur_tb, new_stamp_xsec, t2x);
537 }
538
539 #ifdef CONFIG_SMP
540 unsigned long profile_pc(struct pt_regs *regs)
541 {
542 unsigned long pc = instruction_pointer(regs);
543
544 if (in_lock_functions(pc))
545 return regs->link;
546
547 return pc;
548 }
549 EXPORT_SYMBOL(profile_pc);
550 #endif
551
552 #ifdef CONFIG_PPC_ISERIES
553
554 /*
555 * This function recalibrates the timebase based on the 49-bit time-of-day
556 * value in the Titan chip. The Titan is much more accurate than the value
557 * returned by the service processor for the timebase frequency.
558 */
559
560 static int __init iSeries_tb_recal(void)
561 {
562 struct div_result divres;
563 unsigned long titan, tb;
564
565 /* Make sure we only run on iSeries */
566 if (!firmware_has_feature(FW_FEATURE_ISERIES))
567 return -ENODEV;
568
569 tb = get_tb();
570 titan = HvCallXm_loadTod();
571 if ( iSeries_recal_titan ) {
572 unsigned long tb_ticks = tb - iSeries_recal_tb;
573 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
574 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
575 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
576 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
577 char sign = '+';
578 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
579 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
580
581 if ( tick_diff < 0 ) {
582 tick_diff = -tick_diff;
583 sign = '-';
584 }
585 if ( tick_diff ) {
586 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
587 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
588 new_tb_ticks_per_jiffy, sign, tick_diff );
589 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
590 tb_ticks_per_sec = new_tb_ticks_per_sec;
591 calc_cputime_factors();
592 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
593 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
594 tb_to_xs = divres.result_low;
595 do_gtod.varp->tb_to_xs = tb_to_xs;
596 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
597 vdso_data->tb_to_xs = tb_to_xs;
598 }
599 else {
600 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
601 " new tb_ticks_per_jiffy = %lu\n"
602 " old tb_ticks_per_jiffy = %lu\n",
603 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
604 }
605 }
606 }
607 iSeries_recal_titan = titan;
608 iSeries_recal_tb = tb;
609
610 return 0;
611 }
612 late_initcall(iSeries_tb_recal);
613
614 /* Called from platform early init */
615 void __init iSeries_time_init_early(void)
616 {
617 iSeries_recal_tb = get_tb();
618 iSeries_recal_titan = HvCallXm_loadTod();
619 }
620 #endif /* CONFIG_PPC_ISERIES */
621
622 /*
623 * For iSeries shared processors, we have to let the hypervisor
624 * set the hardware decrementer. We set a virtual decrementer
625 * in the lppaca and call the hypervisor if the virtual
626 * decrementer is less than the current value in the hardware
627 * decrementer. (almost always the new decrementer value will
628 * be greater than the current hardware decementer so the hypervisor
629 * call will not be needed)
630 */
631
632 /*
633 * timer_interrupt - gets called when the decrementer overflows,
634 * with interrupts disabled.
635 */
636 void timer_interrupt(struct pt_regs * regs)
637 {
638 struct pt_regs *old_regs;
639 int next_dec;
640 int cpu = smp_processor_id();
641 unsigned long ticks;
642 u64 tb_next_jiffy;
643
644 #ifdef CONFIG_PPC32
645 if (atomic_read(&ppc_n_lost_interrupts) != 0)
646 do_IRQ(regs);
647 #endif
648
649 old_regs = set_irq_regs(regs);
650 irq_enter();
651
652 profile_tick(CPU_PROFILING);
653 calculate_steal_time();
654
655 #ifdef CONFIG_PPC_ISERIES
656 if (firmware_has_feature(FW_FEATURE_ISERIES))
657 get_lppaca()->int_dword.fields.decr_int = 0;
658 #endif
659
660 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
661 >= tb_ticks_per_jiffy) {
662 /* Update last_jiffy */
663 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
664 /* Handle RTCL overflow on 601 */
665 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
666 per_cpu(last_jiffy, cpu) -= 1000000000;
667
668 /*
669 * We cannot disable the decrementer, so in the period
670 * between this cpu's being marked offline in cpu_online_map
671 * and calling stop-self, it is taking timer interrupts.
672 * Avoid calling into the scheduler rebalancing code if this
673 * is the case.
674 */
675 if (!cpu_is_offline(cpu))
676 account_process_time(regs);
677
678 /*
679 * No need to check whether cpu is offline here; boot_cpuid
680 * should have been fixed up by now.
681 */
682 if (cpu != boot_cpuid)
683 continue;
684
685 write_seqlock(&xtime_lock);
686 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
687 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
688 tb_last_jiffy = tb_next_jiffy;
689 do_timer(1);
690 timer_recalc_offset(tb_last_jiffy);
691 timer_check_rtc();
692 }
693 write_sequnlock(&xtime_lock);
694 }
695
696 next_dec = tb_ticks_per_jiffy - ticks;
697 set_dec(next_dec);
698
699 #ifdef CONFIG_PPC_ISERIES
700 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
701 process_hvlpevents();
702 #endif
703
704 #ifdef CONFIG_PPC64
705 /* collect purr register values often, for accurate calculations */
706 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
707 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
708 cu->current_tb = mfspr(SPRN_PURR);
709 }
710 #endif
711
712 irq_exit();
713 set_irq_regs(old_regs);
714 }
715
716 void wakeup_decrementer(void)
717 {
718 unsigned long ticks;
719
720 /*
721 * The timebase gets saved on sleep and restored on wakeup,
722 * so all we need to do is to reset the decrementer.
723 */
724 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
725 if (ticks < tb_ticks_per_jiffy)
726 ticks = tb_ticks_per_jiffy - ticks;
727 else
728 ticks = 1;
729 set_dec(ticks);
730 }
731
732 #ifdef CONFIG_SMP
733 void __init smp_space_timers(unsigned int max_cpus)
734 {
735 int i;
736 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
737
738 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
739 previous_tb -= tb_ticks_per_jiffy;
740
741 for_each_possible_cpu(i) {
742 if (i == boot_cpuid)
743 continue;
744 per_cpu(last_jiffy, i) = previous_tb;
745 }
746 }
747 #endif
748
749 /*
750 * Scheduler clock - returns current time in nanosec units.
751 *
752 * Note: mulhdu(a, b) (multiply high double unsigned) returns
753 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
754 * are 64-bit unsigned numbers.
755 */
756 unsigned long long sched_clock(void)
757 {
758 if (__USE_RTC())
759 return get_rtc();
760 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
761 }
762
763 int do_settimeofday(struct timespec *tv)
764 {
765 time_t wtm_sec, new_sec = tv->tv_sec;
766 long wtm_nsec, new_nsec = tv->tv_nsec;
767 unsigned long flags;
768 u64 new_xsec;
769 unsigned long tb_delta;
770
771 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
772 return -EINVAL;
773
774 write_seqlock_irqsave(&xtime_lock, flags);
775
776 /*
777 * Updating the RTC is not the job of this code. If the time is
778 * stepped under NTP, the RTC will be updated after STA_UNSYNC
779 * is cleared. Tools like clock/hwclock either copy the RTC
780 * to the system time, in which case there is no point in writing
781 * to the RTC again, or write to the RTC but then they don't call
782 * settimeofday to perform this operation.
783 */
784
785 /* Make userspace gettimeofday spin until we're done. */
786 ++vdso_data->tb_update_count;
787 smp_mb();
788
789 /*
790 * Subtract off the number of nanoseconds since the
791 * beginning of the last tick.
792 */
793 tb_delta = tb_ticks_since(tb_last_jiffy);
794 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
795 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
796
797 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
798 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
799
800 set_normalized_timespec(&xtime, new_sec, new_nsec);
801 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
802
803 /* In case of a large backwards jump in time with NTP, we want the
804 * clock to be updated as soon as the PLL is again in lock.
805 */
806 last_rtc_update = new_sec - 658;
807
808 ntp_clear();
809
810 new_xsec = xtime.tv_nsec;
811 if (new_xsec != 0) {
812 new_xsec *= XSEC_PER_SEC;
813 do_div(new_xsec, NSEC_PER_SEC);
814 }
815 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
816 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
817
818 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
819 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
820
821 write_sequnlock_irqrestore(&xtime_lock, flags);
822 clock_was_set();
823 return 0;
824 }
825
826 EXPORT_SYMBOL(do_settimeofday);
827
828 static int __init get_freq(char *name, int cells, unsigned long *val)
829 {
830 struct device_node *cpu;
831 const unsigned int *fp;
832 int found = 0;
833
834 /* The cpu node should have timebase and clock frequency properties */
835 cpu = of_find_node_by_type(NULL, "cpu");
836
837 if (cpu) {
838 fp = of_get_property(cpu, name, NULL);
839 if (fp) {
840 found = 1;
841 *val = of_read_ulong(fp, cells);
842 }
843
844 of_node_put(cpu);
845 }
846
847 return found;
848 }
849
850 void __init generic_calibrate_decr(void)
851 {
852 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
853
854 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
855 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
856
857 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
858 "(not found)\n");
859 }
860
861 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
862
863 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
864 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
865
866 printk(KERN_ERR "WARNING: Estimating processor frequency "
867 "(not found)\n");
868 }
869
870 #ifdef CONFIG_BOOKE
871 /* Set the time base to zero */
872 mtspr(SPRN_TBWL, 0);
873 mtspr(SPRN_TBWU, 0);
874
875 /* Clear any pending timer interrupts */
876 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
877
878 /* Enable decrementer interrupt */
879 mtspr(SPRN_TCR, TCR_DIE);
880 #endif
881 }
882
883 unsigned long get_boot_time(void)
884 {
885 struct rtc_time tm;
886
887 if (ppc_md.get_boot_time)
888 return ppc_md.get_boot_time();
889 if (!ppc_md.get_rtc_time)
890 return 0;
891 ppc_md.get_rtc_time(&tm);
892 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
893 tm.tm_hour, tm.tm_min, tm.tm_sec);
894 }
895
896 /* This function is only called on the boot processor */
897 void __init time_init(void)
898 {
899 unsigned long flags;
900 unsigned long tm = 0;
901 struct div_result res;
902 u64 scale, x;
903 unsigned shift;
904
905 if (ppc_md.time_init != NULL)
906 timezone_offset = ppc_md.time_init();
907
908 if (__USE_RTC()) {
909 /* 601 processor: dec counts down by 128 every 128ns */
910 ppc_tb_freq = 1000000000;
911 tb_last_jiffy = get_rtcl();
912 } else {
913 /* Normal PowerPC with timebase register */
914 ppc_md.calibrate_decr();
915 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
916 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
917 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
918 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
919 tb_last_jiffy = get_tb();
920 }
921
922 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
923 tb_ticks_per_sec = ppc_tb_freq;
924 tb_ticks_per_usec = ppc_tb_freq / 1000000;
925 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
926 calc_cputime_factors();
927
928 /*
929 * Calculate the length of each tick in ns. It will not be
930 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
931 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
932 * rounded up.
933 */
934 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
935 do_div(x, ppc_tb_freq);
936 tick_nsec = x;
937 last_tick_len = x << TICKLEN_SCALE;
938
939 /*
940 * Compute ticklen_to_xs, which is a factor which gets multiplied
941 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
942 * It is computed as:
943 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
944 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
945 * which turns out to be N = 51 - SHIFT_HZ.
946 * This gives the result as a 0.64 fixed-point fraction.
947 * That value is reduced by an offset amounting to 1 xsec per
948 * 2^31 timebase ticks to avoid problems with time going backwards
949 * by 1 xsec when we do timer_recalc_offset due to losing the
950 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
951 * since there are 2^20 xsec in a second.
952 */
953 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
954 tb_ticks_per_jiffy << SHIFT_HZ, &res);
955 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
956 ticklen_to_xs = res.result_low;
957
958 /* Compute tb_to_xs from tick_nsec */
959 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
960
961 /*
962 * Compute scale factor for sched_clock.
963 * The calibrate_decr() function has set tb_ticks_per_sec,
964 * which is the timebase frequency.
965 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
966 * the 128-bit result as a 64.64 fixed-point number.
967 * We then shift that number right until it is less than 1.0,
968 * giving us the scale factor and shift count to use in
969 * sched_clock().
970 */
971 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
972 scale = res.result_low;
973 for (shift = 0; res.result_high != 0; ++shift) {
974 scale = (scale >> 1) | (res.result_high << 63);
975 res.result_high >>= 1;
976 }
977 tb_to_ns_scale = scale;
978 tb_to_ns_shift = shift;
979 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
980 boot_tb = get_tb();
981
982 tm = get_boot_time();
983
984 write_seqlock_irqsave(&xtime_lock, flags);
985
986 /* If platform provided a timezone (pmac), we correct the time */
987 if (timezone_offset) {
988 sys_tz.tz_minuteswest = -timezone_offset / 60;
989 sys_tz.tz_dsttime = 0;
990 tm -= timezone_offset;
991 }
992
993 xtime.tv_sec = tm;
994 xtime.tv_nsec = 0;
995 do_gtod.varp = &do_gtod.vars[0];
996 do_gtod.var_idx = 0;
997 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
998 __get_cpu_var(last_jiffy) = tb_last_jiffy;
999 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1000 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
1001 do_gtod.varp->tb_to_xs = tb_to_xs;
1002 do_gtod.tb_to_us = tb_to_us;
1003
1004 vdso_data->tb_orig_stamp = tb_last_jiffy;
1005 vdso_data->tb_update_count = 0;
1006 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1007 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
1008 vdso_data->tb_to_xs = tb_to_xs;
1009
1010 time_freq = 0;
1011
1012 last_rtc_update = xtime.tv_sec;
1013 set_normalized_timespec(&wall_to_monotonic,
1014 -xtime.tv_sec, -xtime.tv_nsec);
1015 write_sequnlock_irqrestore(&xtime_lock, flags);
1016
1017 /* Not exact, but the timer interrupt takes care of this */
1018 set_dec(tb_ticks_per_jiffy);
1019 }
1020
1021
1022 #define FEBRUARY 2
1023 #define STARTOFTIME 1970
1024 #define SECDAY 86400L
1025 #define SECYR (SECDAY * 365)
1026 #define leapyear(year) ((year) % 4 == 0 && \
1027 ((year) % 100 != 0 || (year) % 400 == 0))
1028 #define days_in_year(a) (leapyear(a) ? 366 : 365)
1029 #define days_in_month(a) (month_days[(a) - 1])
1030
1031 static int month_days[12] = {
1032 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1033 };
1034
1035 /*
1036 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1037 */
1038 void GregorianDay(struct rtc_time * tm)
1039 {
1040 int leapsToDate;
1041 int lastYear;
1042 int day;
1043 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1044
1045 lastYear = tm->tm_year - 1;
1046
1047 /*
1048 * Number of leap corrections to apply up to end of last year
1049 */
1050 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1051
1052 /*
1053 * This year is a leap year if it is divisible by 4 except when it is
1054 * divisible by 100 unless it is divisible by 400
1055 *
1056 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1057 */
1058 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1059
1060 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1061 tm->tm_mday;
1062
1063 tm->tm_wday = day % 7;
1064 }
1065
1066 void to_tm(int tim, struct rtc_time * tm)
1067 {
1068 register int i;
1069 register long hms, day;
1070
1071 day = tim / SECDAY;
1072 hms = tim % SECDAY;
1073
1074 /* Hours, minutes, seconds are easy */
1075 tm->tm_hour = hms / 3600;
1076 tm->tm_min = (hms % 3600) / 60;
1077 tm->tm_sec = (hms % 3600) % 60;
1078
1079 /* Number of years in days */
1080 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1081 day -= days_in_year(i);
1082 tm->tm_year = i;
1083
1084 /* Number of months in days left */
1085 if (leapyear(tm->tm_year))
1086 days_in_month(FEBRUARY) = 29;
1087 for (i = 1; day >= days_in_month(i); i++)
1088 day -= days_in_month(i);
1089 days_in_month(FEBRUARY) = 28;
1090 tm->tm_mon = i;
1091
1092 /* Days are what is left over (+1) from all that. */
1093 tm->tm_mday = day + 1;
1094
1095 /*
1096 * Determine the day of week
1097 */
1098 GregorianDay(tm);
1099 }
1100
1101 /* Auxiliary function to compute scaling factors */
1102 /* Actually the choice of a timebase running at 1/4 the of the bus
1103 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1104 * It makes this computation very precise (27-28 bits typically) which
1105 * is optimistic considering the stability of most processor clock
1106 * oscillators and the precision with which the timebase frequency
1107 * is measured but does not harm.
1108 */
1109 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1110 {
1111 unsigned mlt=0, tmp, err;
1112 /* No concern for performance, it's done once: use a stupid
1113 * but safe and compact method to find the multiplier.
1114 */
1115
1116 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1117 if (mulhwu(inscale, mlt|tmp) < outscale)
1118 mlt |= tmp;
1119 }
1120
1121 /* We might still be off by 1 for the best approximation.
1122 * A side effect of this is that if outscale is too large
1123 * the returned value will be zero.
1124 * Many corner cases have been checked and seem to work,
1125 * some might have been forgotten in the test however.
1126 */
1127
1128 err = inscale * (mlt+1);
1129 if (err <= inscale/2)
1130 mlt++;
1131 return mlt;
1132 }
1133
1134 /*
1135 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1136 * result.
1137 */
1138 void div128_by_32(u64 dividend_high, u64 dividend_low,
1139 unsigned divisor, struct div_result *dr)
1140 {
1141 unsigned long a, b, c, d;
1142 unsigned long w, x, y, z;
1143 u64 ra, rb, rc;
1144
1145 a = dividend_high >> 32;
1146 b = dividend_high & 0xffffffff;
1147 c = dividend_low >> 32;
1148 d = dividend_low & 0xffffffff;
1149
1150 w = a / divisor;
1151 ra = ((u64)(a - (w * divisor)) << 32) + b;
1152
1153 rb = ((u64) do_div(ra, divisor) << 32) + c;
1154 x = ra;
1155
1156 rc = ((u64) do_div(rb, divisor) << 32) + d;
1157 y = rb;
1158
1159 do_div(rc, divisor);
1160 z = rc;
1161
1162 dr->result_high = ((u64)w << 32) + x;
1163 dr->result_low = ((u64)y << 32) + z;
1164
1165 }