]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kernel/time.c
Move precessing of MCE queued event out from syscall exit path.
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / time.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Common time routines among all ppc machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
8 *
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
14 *
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
17 *
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
f5339277 20 * measurement at boot time.
1da177e4
LT
21 * - for astronomical applications: add a new function to get
22 * non ambiguous timestamps even around leap seconds. This needs
23 * a new timestamp format and a good name.
24 *
25 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
26 * "A Kernel Model for Precision Timekeeping" by Dave Mills
27 *
28 * This program is free software; you can redistribute it and/or
29 * modify it under the terms of the GNU General Public License
30 * as published by the Free Software Foundation; either version
31 * 2 of the License, or (at your option) any later version.
32 */
33
1da177e4 34#include <linux/errno.h>
4b16f8e2 35#include <linux/export.h>
1da177e4
LT
36#include <linux/sched.h>
37#include <linux/kernel.h>
38#include <linux/param.h>
39#include <linux/string.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/timex.h>
43#include <linux/kernel_stat.h>
1da177e4
LT
44#include <linux/time.h>
45#include <linux/init.h>
46#include <linux/profile.h>
47#include <linux/cpu.h>
48#include <linux/security.h>
f2783c15
PM
49#include <linux/percpu.h>
50#include <linux/rtc.h>
092b8f34 51#include <linux/jiffies.h>
c6622f63 52#include <linux/posix-timers.h>
7d12e780 53#include <linux/irq.h>
177996e6 54#include <linux/delay.h>
e360adbe 55#include <linux/irq_work.h>
6795b85c 56#include <asm/trace.h>
1da177e4 57
1da177e4
LT
58#include <asm/io.h>
59#include <asm/processor.h>
60#include <asm/nvram.h>
61#include <asm/cache.h>
62#include <asm/machdep.h>
1da177e4
LT
63#include <asm/uaccess.h>
64#include <asm/time.h>
1da177e4 65#include <asm/prom.h>
f2783c15
PM
66#include <asm/irq.h>
67#include <asm/div64.h>
2249ca9d 68#include <asm/smp.h>
a7f290da 69#include <asm/vdso_datapage.h>
1ababe11 70#include <asm/firmware.h>
06b8e878 71#include <asm/cputime.h>
1da177e4 72
4a4cfe38
TB
73/* powerpc clocksource/clockevent code */
74
d831d0b8 75#include <linux/clockchips.h>
189374ae 76#include <linux/timekeeper_internal.h>
4a4cfe38 77
8e19608e 78static cycle_t rtc_read(struct clocksource *);
4a4cfe38
TB
79static struct clocksource clocksource_rtc = {
80 .name = "rtc",
81 .rating = 400,
82 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
83 .mask = CLOCKSOURCE_MASK(64),
4a4cfe38
TB
84 .read = rtc_read,
85};
86
8e19608e 87static cycle_t timebase_read(struct clocksource *);
4a4cfe38
TB
88static struct clocksource clocksource_timebase = {
89 .name = "timebase",
90 .rating = 400,
91 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
92 .mask = CLOCKSOURCE_MASK(64),
4a4cfe38
TB
93 .read = timebase_read,
94};
95
d831d0b8
TB
96#define DECREMENTER_MAX 0x7fffffff
97
98static int decrementer_set_next_event(unsigned long evt,
99 struct clock_event_device *dev);
100static void decrementer_set_mode(enum clock_event_mode mode,
101 struct clock_event_device *dev);
102
6e35994d 103struct clock_event_device decrementer_clockevent = {
621692cb
AB
104 .name = "decrementer",
105 .rating = 200,
106 .irq = 0,
107 .set_next_event = decrementer_set_next_event,
108 .set_mode = decrementer_set_mode,
109 .features = CLOCK_EVT_FEAT_ONESHOT,
d831d0b8 110};
6e35994d 111EXPORT_SYMBOL(decrementer_clockevent);
d831d0b8 112
7df10275
AB
113DEFINE_PER_CPU(u64, decrementers_next_tb);
114static DEFINE_PER_CPU(struct clock_event_device, decrementers);
d831d0b8 115
1da177e4
LT
116#define XSEC_PER_SEC (1024*1024)
117
f2783c15
PM
118#ifdef CONFIG_PPC64
119#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
120#else
121/* compute ((xsec << 12) * max) >> 32 */
122#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
123#endif
124
1da177e4
LT
125unsigned long tb_ticks_per_jiffy;
126unsigned long tb_ticks_per_usec = 100; /* sane default */
127EXPORT_SYMBOL(tb_ticks_per_usec);
128unsigned long tb_ticks_per_sec;
2cf82c02 129EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
092b8f34 130
1da177e4 131DEFINE_SPINLOCK(rtc_lock);
6ae3db11 132EXPORT_SYMBOL_GPL(rtc_lock);
1da177e4 133
fc9069fe
TB
134static u64 tb_to_ns_scale __read_mostly;
135static unsigned tb_to_ns_shift __read_mostly;
364a1246 136static u64 boot_tb __read_mostly;
1da177e4 137
1da177e4 138extern struct timezone sys_tz;
f2783c15 139static long timezone_offset;
1da177e4 140
10f7e7c1 141unsigned long ppc_proc_freq;
55ec2fca 142EXPORT_SYMBOL_GPL(ppc_proc_freq);
10f7e7c1 143unsigned long ppc_tb_freq;
55ec2fca 144EXPORT_SYMBOL_GPL(ppc_tb_freq);
96c44507 145
abf917cd 146#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
c6622f63
PM
147/*
148 * Factors for converting from cputime_t (timebase ticks) to
9f5072d4 149 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
c6622f63
PM
150 * These are all stored as 0.64 fixed-point binary fractions.
151 */
152u64 __cputime_jiffies_factor;
2cf82c02 153EXPORT_SYMBOL(__cputime_jiffies_factor);
9f5072d4
AS
154u64 __cputime_usec_factor;
155EXPORT_SYMBOL(__cputime_usec_factor);
c6622f63 156u64 __cputime_sec_factor;
2cf82c02 157EXPORT_SYMBOL(__cputime_sec_factor);
c6622f63 158u64 __cputime_clockt_factor;
2cf82c02 159EXPORT_SYMBOL(__cputime_clockt_factor);
06b8e878
MN
160DEFINE_PER_CPU(unsigned long, cputime_last_delta);
161DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
c6622f63 162
a42548a1
SG
163cputime_t cputime_one_jiffy;
164
872e439a
PM
165void (*dtl_consumer)(struct dtl_entry *, u64);
166
c6622f63
PM
167static void calc_cputime_factors(void)
168{
169 struct div_result res;
170
171 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
172 __cputime_jiffies_factor = res.result_low;
9f5072d4
AS
173 div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
174 __cputime_usec_factor = res.result_low;
c6622f63
PM
175 div128_by_32(1, 0, tb_ticks_per_sec, &res);
176 __cputime_sec_factor = res.result_low;
177 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
178 __cputime_clockt_factor = res.result_low;
179}
180
181/*
cf9efce0
PM
182 * Read the SPURR on systems that have it, otherwise the PURR,
183 * or if that doesn't exist return the timebase value passed in.
c6622f63 184 */
cf9efce0 185static u64 read_spurr(u64 tb)
c6622f63 186{
cf9efce0
PM
187 if (cpu_has_feature(CPU_FTR_SPURR))
188 return mfspr(SPRN_SPURR);
c6622f63
PM
189 if (cpu_has_feature(CPU_FTR_PURR))
190 return mfspr(SPRN_PURR);
cf9efce0 191 return tb;
c6622f63
PM
192}
193
cf9efce0
PM
194#ifdef CONFIG_PPC_SPLPAR
195
4603ac18 196/*
cf9efce0
PM
197 * Scan the dispatch trace log and count up the stolen time.
198 * Should be called with interrupts disabled.
4603ac18 199 */
cf9efce0 200static u64 scan_dispatch_log(u64 stop_tb)
4603ac18 201{
872e439a 202 u64 i = local_paca->dtl_ridx;
cf9efce0
PM
203 struct dtl_entry *dtl = local_paca->dtl_curr;
204 struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
205 struct lppaca *vpa = local_paca->lppaca_ptr;
206 u64 tb_delta;
207 u64 stolen = 0;
208 u64 dtb;
209
84ffae55
AB
210 if (!dtl)
211 return 0;
212
7ffcf8ec 213 if (i == be64_to_cpu(vpa->dtl_idx))
cf9efce0 214 return 0;
7ffcf8ec 215 while (i < be64_to_cpu(vpa->dtl_idx)) {
7ffcf8ec
AB
216 dtb = be64_to_cpu(dtl->timebase);
217 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
218 be32_to_cpu(dtl->ready_to_enqueue_time);
cf9efce0 219 barrier();
7ffcf8ec 220 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
cf9efce0 221 /* buffer has overflowed */
7ffcf8ec 222 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
cf9efce0
PM
223 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
224 continue;
225 }
226 if (dtb > stop_tb)
227 break;
84b07386
AB
228 if (dtl_consumer)
229 dtl_consumer(dtl, i);
cf9efce0
PM
230 stolen += tb_delta;
231 ++i;
232 ++dtl;
233 if (dtl == dtl_end)
234 dtl = local_paca->dispatch_log;
235 }
236 local_paca->dtl_ridx = i;
237 local_paca->dtl_curr = dtl;
238 return stolen;
4603ac18
MN
239}
240
cf9efce0
PM
241/*
242 * Accumulate stolen time by scanning the dispatch trace log.
243 * Called on entry from user mode.
244 */
245void accumulate_stolen_time(void)
246{
247 u64 sst, ust;
248
b18ae08d 249 u8 save_soft_enabled = local_paca->soft_enabled;
b18ae08d
TH
250
251 /* We are called early in the exception entry, before
252 * soft/hard_enabled are sync'ed to the expected state
253 * for the exception. We are hard disabled but the PACA
254 * needs to reflect that so various debug stuff doesn't
255 * complain
256 */
257 local_paca->soft_enabled = 0;
b18ae08d
TH
258
259 sst = scan_dispatch_log(local_paca->starttime_user);
260 ust = scan_dispatch_log(local_paca->starttime);
261 local_paca->system_time -= sst;
262 local_paca->user_time -= ust;
263 local_paca->stolen_time += ust + sst;
264
265 local_paca->soft_enabled = save_soft_enabled;
cf9efce0
PM
266}
267
268static inline u64 calculate_stolen_time(u64 stop_tb)
269{
270 u64 stolen = 0;
271
7ffcf8ec 272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
cf9efce0
PM
273 stolen = scan_dispatch_log(stop_tb);
274 get_paca()->system_time -= stolen;
275 }
276
277 stolen += get_paca()->stolen_time;
278 get_paca()->stolen_time = 0;
279 return stolen;
4603ac18
MN
280}
281
cf9efce0
PM
282#else /* CONFIG_PPC_SPLPAR */
283static inline u64 calculate_stolen_time(u64 stop_tb)
284{
285 return 0;
286}
287
288#endif /* CONFIG_PPC_SPLPAR */
289
c6622f63
PM
290/*
291 * Account time for a transition between system, hard irq
292 * or soft irq state.
293 */
a7e1a9e3
FW
294static u64 vtime_delta(struct task_struct *tsk,
295 u64 *sys_scaled, u64 *stolen)
c6622f63 296{
a7e1a9e3
FW
297 u64 now, nowscaled, deltascaled;
298 u64 udelta, delta, user_scaled;
c6622f63 299
1b2852b1
FW
300 WARN_ON_ONCE(!irqs_disabled());
301
cf9efce0 302 now = mftb();
4603ac18 303 nowscaled = read_spurr(now);
cf9efce0
PM
304 get_paca()->system_time += now - get_paca()->starttime;
305 get_paca()->starttime = now;
4603ac18
MN
306 deltascaled = nowscaled - get_paca()->startspurr;
307 get_paca()->startspurr = nowscaled;
cf9efce0 308
a7e1a9e3 309 *stolen = calculate_stolen_time(now);
cf9efce0
PM
310
311 delta = get_paca()->system_time;
312 get_paca()->system_time = 0;
313 udelta = get_paca()->user_time - get_paca()->utime_sspurr;
314 get_paca()->utime_sspurr = get_paca()->user_time;
315
316 /*
317 * Because we don't read the SPURR on every kernel entry/exit,
318 * deltascaled includes both user and system SPURR ticks.
319 * Apportion these ticks to system SPURR ticks and user
320 * SPURR ticks in the same ratio as the system time (delta)
321 * and user time (udelta) values obtained from the timebase
322 * over the same interval. The system ticks get accounted here;
323 * the user ticks get saved up in paca->user_time_scaled to be
324 * used by account_process_tick.
325 */
a7e1a9e3 326 *sys_scaled = delta;
cf9efce0
PM
327 user_scaled = udelta;
328 if (deltascaled != delta + udelta) {
329 if (udelta) {
a7e1a9e3
FW
330 *sys_scaled = deltascaled * delta / (delta + udelta);
331 user_scaled = deltascaled - *sys_scaled;
cf9efce0 332 } else {
a7e1a9e3 333 *sys_scaled = deltascaled;
cf9efce0
PM
334 }
335 }
336 get_paca()->user_time_scaled += user_scaled;
337
a7e1a9e3
FW
338 return delta;
339}
340
fd25b4c2 341void vtime_account_system(struct task_struct *tsk)
a7e1a9e3
FW
342{
343 u64 delta, sys_scaled, stolen;
344
345 delta = vtime_delta(tsk, &sys_scaled, &stolen);
346 account_system_time(tsk, 0, delta, sys_scaled);
347 if (stolen)
348 account_steal_time(stolen);
349}
c11f11fc 350EXPORT_SYMBOL_GPL(vtime_account_system);
a7e1a9e3 351
fd25b4c2 352void vtime_account_idle(struct task_struct *tsk)
a7e1a9e3
FW
353{
354 u64 delta, sys_scaled, stolen;
355
356 delta = vtime_delta(tsk, &sys_scaled, &stolen);
357 account_idle_time(delta + stolen);
c6622f63
PM
358}
359
360/*
bcebdf84
FW
361 * Transfer the user time accumulated in the paca
362 * by the exception entry and exit code to the generic
363 * process user time records.
c6622f63 364 * Must be called with interrupts disabled.
bcebdf84
FW
365 * Assumes that vtime_account_system/idle() has been called
366 * recently (i.e. since the last entry from usermode) so that
cf9efce0 367 * get_paca()->user_time_scaled is up to date.
c6622f63 368 */
bcebdf84 369void vtime_account_user(struct task_struct *tsk)
c6622f63 370{
4603ac18 371 cputime_t utime, utimescaled;
c6622f63
PM
372
373 utime = get_paca()->user_time;
cf9efce0 374 utimescaled = get_paca()->user_time_scaled;
c6622f63 375 get_paca()->user_time = 0;
cf9efce0
PM
376 get_paca()->user_time_scaled = 0;
377 get_paca()->utime_sspurr = 0;
457533a7 378 account_user_time(tsk, utime, utimescaled);
c6622f63
PM
379}
380
abf917cd 381#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
c6622f63 382#define calc_cputime_factors()
c6622f63
PM
383#endif
384
6defa38b
PM
385void __delay(unsigned long loops)
386{
387 unsigned long start;
388 int diff;
389
390 if (__USE_RTC()) {
391 start = get_rtcl();
392 do {
393 /* the RTCL register wraps at 1000000000 */
394 diff = get_rtcl() - start;
395 if (diff < 0)
396 diff += 1000000000;
397 } while (diff < loops);
398 } else {
399 start = get_tbl();
400 while (get_tbl() - start < loops)
401 HMT_low();
402 HMT_medium();
403 }
404}
405EXPORT_SYMBOL(__delay);
406
407void udelay(unsigned long usecs)
408{
409 __delay(tb_ticks_per_usec * usecs);
410}
411EXPORT_SYMBOL(udelay);
412
1da177e4
LT
413#ifdef CONFIG_SMP
414unsigned long profile_pc(struct pt_regs *regs)
415{
416 unsigned long pc = instruction_pointer(regs);
417
418 if (in_lock_functions(pc))
419 return regs->link;
420
421 return pc;
422}
423EXPORT_SYMBOL(profile_pc);
424#endif
425
e360adbe 426#ifdef CONFIG_IRQ_WORK
105988c0 427
0fe1ac48
PM
428/*
429 * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
430 */
431#ifdef CONFIG_PPC64
e360adbe 432static inline unsigned long test_irq_work_pending(void)
105988c0 433{
0fe1ac48
PM
434 unsigned long x;
435
436 asm volatile("lbz %0,%1(13)"
437 : "=r" (x)
e360adbe 438 : "i" (offsetof(struct paca_struct, irq_work_pending)));
0fe1ac48
PM
439 return x;
440}
441
e360adbe 442static inline void set_irq_work_pending_flag(void)
0fe1ac48
PM
443{
444 asm volatile("stb %0,%1(13)" : :
445 "r" (1),
e360adbe 446 "i" (offsetof(struct paca_struct, irq_work_pending)));
0fe1ac48
PM
447}
448
e360adbe 449static inline void clear_irq_work_pending(void)
0fe1ac48
PM
450{
451 asm volatile("stb %0,%1(13)" : :
452 "r" (0),
e360adbe 453 "i" (offsetof(struct paca_struct, irq_work_pending)));
105988c0
PM
454}
455
0fe1ac48
PM
456#else /* 32-bit */
457
e360adbe 458DEFINE_PER_CPU(u8, irq_work_pending);
0fe1ac48 459
e360adbe
PZ
460#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
461#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
462#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
105988c0 463
0fe1ac48
PM
464#endif /* 32 vs 64 bit */
465
4f8b50bb 466void arch_irq_work_raise(void)
0fe1ac48
PM
467{
468 preempt_disable();
e360adbe 469 set_irq_work_pending_flag();
0fe1ac48
PM
470 set_dec(1);
471 preempt_enable();
472}
473
e360adbe 474#else /* CONFIG_IRQ_WORK */
105988c0 475
e360adbe
PZ
476#define test_irq_work_pending() 0
477#define clear_irq_work_pending()
105988c0 478
e360adbe 479#endif /* CONFIG_IRQ_WORK */
105988c0 480
1da177e4
LT
481/*
482 * timer_interrupt - gets called when the decrementer overflows,
483 * with interrupts disabled.
484 */
c7aeffc4 485void timer_interrupt(struct pt_regs * regs)
1da177e4 486{
7d12e780 487 struct pt_regs *old_regs;
7df10275
AB
488 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
489 struct clock_event_device *evt = &__get_cpu_var(decrementers);
860aed25 490 u64 now;
d831d0b8 491
963e5d3b
BH
492 /* Ensure a positive value is written to the decrementer, or else
493 * some CPUs will continue to take decrementer exceptions.
494 */
495 set_dec(DECREMENTER_MAX);
496
497 /* Some implementations of hotplug will get timer interrupts while
689dfa89
TC
498 * offline, just ignore these and we also need to set
499 * decrementers_next_tb as MAX to make sure __check_irq_replay
500 * don't replay timer interrupt when return, otherwise we'll trap
501 * here infinitely :(
963e5d3b 502 */
689dfa89
TC
503 if (!cpu_online(smp_processor_id())) {
504 *next_tb = ~(u64)0;
963e5d3b 505 return;
689dfa89 506 }
963e5d3b 507
7230c564
BH
508 /* Conditionally hard-enable interrupts now that the DEC has been
509 * bumped to its maximum value
510 */
511 may_hard_irq_enable();
512
89713ed1 513
b0d278b7 514#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
f2783c15
PM
515 if (atomic_read(&ppc_n_lost_interrupts) != 0)
516 do_IRQ(regs);
517#endif
1da177e4 518
7d12e780 519 old_regs = set_irq_regs(regs);
1da177e4
LT
520 irq_enter();
521
e72bbbab
LZ
522 trace_timer_interrupt_entry(regs);
523
e360adbe
PZ
524 if (test_irq_work_pending()) {
525 clear_irq_work_pending();
526 irq_work_run();
0fe1ac48
PM
527 }
528
860aed25
PM
529 now = get_tb_or_rtc();
530 if (now >= *next_tb) {
531 *next_tb = ~(u64)0;
532 if (evt->event_handler)
533 evt->event_handler(evt);
c041cfa2 534 __get_cpu_var(irq_stat).timer_irqs_event++;
860aed25
PM
535 } else {
536 now = *next_tb - now;
537 if (now <= DECREMENTER_MAX)
538 set_dec((int)now);
c041cfa2 539 __get_cpu_var(irq_stat).timer_irqs_others++;
860aed25 540 }
1da177e4 541
f2783c15 542#ifdef CONFIG_PPC64
8d15a3e5 543 /* collect purr register values often, for accurate calculations */
1ababe11 544 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1da177e4
LT
545 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
546 cu->current_tb = mfspr(SPRN_PURR);
547 }
f2783c15 548#endif
1da177e4 549
e72bbbab
LZ
550 trace_timer_interrupt_exit(regs);
551
1da177e4 552 irq_exit();
7d12e780 553 set_irq_regs(old_regs);
1da177e4
LT
554}
555
dabe859e
PM
556/*
557 * Hypervisor decrementer interrupts shouldn't occur but are sometimes
558 * left pending on exit from a KVM guest. We don't need to do anything
559 * to clear them, as they are edge-triggered.
560 */
561void hdec_interrupt(struct pt_regs *regs)
562{
563}
564
7ac5dde9 565#ifdef CONFIG_SUSPEND
d75d68cf 566static void generic_suspend_disable_irqs(void)
7ac5dde9 567{
7ac5dde9
SW
568 /* Disable the decrementer, so that it doesn't interfere
569 * with suspending.
570 */
571
621692cb 572 set_dec(DECREMENTER_MAX);
7ac5dde9 573 local_irq_disable();
621692cb 574 set_dec(DECREMENTER_MAX);
7ac5dde9
SW
575}
576
d75d68cf 577static void generic_suspend_enable_irqs(void)
7ac5dde9 578{
7ac5dde9 579 local_irq_enable();
7ac5dde9
SW
580}
581
582/* Overrides the weak version in kernel/power/main.c */
583void arch_suspend_disable_irqs(void)
584{
585 if (ppc_md.suspend_disable_irqs)
586 ppc_md.suspend_disable_irqs();
587 generic_suspend_disable_irqs();
588}
589
590/* Overrides the weak version in kernel/power/main.c */
591void arch_suspend_enable_irqs(void)
592{
593 generic_suspend_enable_irqs();
594 if (ppc_md.suspend_enable_irqs)
595 ppc_md.suspend_enable_irqs();
596}
597#endif
598
1da177e4
LT
599/*
600 * Scheduler clock - returns current time in nanosec units.
601 *
602 * Note: mulhdu(a, b) (multiply high double unsigned) returns
603 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
604 * are 64-bit unsigned numbers.
605 */
606unsigned long long sched_clock(void)
607{
96c44507
PM
608 if (__USE_RTC())
609 return get_rtc();
fc9069fe 610 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
1da177e4
LT
611}
612
0bb474a4 613static int __init get_freq(char *name, int cells, unsigned long *val)
10f7e7c1
AB
614{
615 struct device_node *cpu;
6f7aba7b 616 const __be32 *fp;
0bb474a4 617 int found = 0;
10f7e7c1 618
0bb474a4 619 /* The cpu node should have timebase and clock frequency properties */
10f7e7c1
AB
620 cpu = of_find_node_by_type(NULL, "cpu");
621
d8a8188d 622 if (cpu) {
e2eb6392 623 fp = of_get_property(cpu, name, NULL);
d8a8188d 624 if (fp) {
0bb474a4 625 found = 1;
a4dc7ff0 626 *val = of_read_ulong(fp, cells);
10f7e7c1 627 }
0bb474a4
AB
628
629 of_node_put(cpu);
10f7e7c1 630 }
0bb474a4
AB
631
632 return found;
633}
634
77c0a700
BH
635void start_cpu_decrementer(void)
636{
637#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
638 /* Clear any pending timer interrupts */
639 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
640
641 /* Enable decrementer interrupt */
642 mtspr(SPRN_TCR, TCR_DIE);
643#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
644}
645
0bb474a4
AB
646void __init generic_calibrate_decr(void)
647{
648 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
649
650 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
651 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
652
10f7e7c1
AB
653 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
654 "(not found)\n");
0bb474a4 655 }
10f7e7c1 656
0bb474a4
AB
657 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
658
659 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
660 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
661
662 printk(KERN_ERR "WARNING: Estimating processor frequency "
663 "(not found)\n");
10f7e7c1 664 }
10f7e7c1 665}
10f7e7c1 666
aa3be5f3 667int update_persistent_clock(struct timespec now)
f2783c15
PM
668{
669 struct rtc_time tm;
670
aa3be5f3 671 if (!ppc_md.set_rtc_time)
023f333a 672 return -ENODEV;
aa3be5f3
TB
673
674 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
675 tm.tm_year -= 1900;
676 tm.tm_mon -= 1;
677
678 return ppc_md.set_rtc_time(&tm);
679}
680
978d7eb3 681static void __read_persistent_clock(struct timespec *ts)
aa3be5f3
TB
682{
683 struct rtc_time tm;
684 static int first = 1;
685
d90246cd 686 ts->tv_nsec = 0;
aa3be5f3
TB
687 /* XXX this is a litle fragile but will work okay in the short term */
688 if (first) {
689 first = 0;
690 if (ppc_md.time_init)
691 timezone_offset = ppc_md.time_init();
692
693 /* get_boot_time() isn't guaranteed to be safe to call late */
d90246cd
MS
694 if (ppc_md.get_boot_time) {
695 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
696 return;
697 }
698 }
699 if (!ppc_md.get_rtc_time) {
700 ts->tv_sec = 0;
701 return;
aa3be5f3 702 }
f2783c15 703 ppc_md.get_rtc_time(&tm);
978d7eb3 704
d4f587c6
MS
705 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
706 tm.tm_hour, tm.tm_min, tm.tm_sec);
f2783c15
PM
707}
708
978d7eb3
BH
709void read_persistent_clock(struct timespec *ts)
710{
711 __read_persistent_clock(ts);
712
713 /* Sanitize it in case real time clock is set below EPOCH */
714 if (ts->tv_sec < 0) {
715 ts->tv_sec = 0;
716 ts->tv_nsec = 0;
717 }
718
719}
720
4a4cfe38 721/* clocksource code */
8e19608e 722static cycle_t rtc_read(struct clocksource *cs)
4a4cfe38
TB
723{
724 return (cycle_t)get_rtc();
725}
726
8e19608e 727static cycle_t timebase_read(struct clocksource *cs)
4a4cfe38
TB
728{
729 return (cycle_t)get_tb();
730}
731
70639421 732void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
7615856e 733 struct clocksource *clock, u32 mult)
4a4cfe38 734{
b0797b60 735 u64 new_tb_to_xs, new_stamp_xsec;
47916be4 736 u32 frac_sec;
4a4cfe38
TB
737
738 if (clock != &clocksource_timebase)
739 return;
740
741 /* Make userspace gettimeofday spin until we're done. */
742 ++vdso_data->tb_update_count;
743 smp_mb();
744
11b8633a
AB
745 /* 19342813113834067 ~= 2^(20+64) / 1e9 */
746 new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
06d518e3 747 new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
b0797b60 748 do_div(new_stamp_xsec, 1000000000);
06d518e3 749 new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
b0797b60 750
47916be4
TG
751 BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
752 /* this is tv_nsec / 1e9 as a 0.32 fraction */
753 frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
754
b0797b60
JS
755 /*
756 * tb_update_count is used to allow the userspace gettimeofday code
757 * to assure itself that it sees a consistent view of the tb_to_xs and
758 * stamp_xsec variables. It reads the tb_update_count, then reads
759 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
760 * the two values of tb_update_count match and are even then the
761 * tb_to_xs and stamp_xsec values are consistent. If not, then it
762 * loops back and reads them again until this criteria is met.
763 * We expect the caller to have done the first increment of
764 * vdso_data->tb_update_count already.
765 */
766 vdso_data->tb_orig_stamp = clock->cycle_last;
767 vdso_data->stamp_xsec = new_stamp_xsec;
768 vdso_data->tb_to_xs = new_tb_to_xs;
7615856e
JS
769 vdso_data->wtom_clock_sec = wtm->tv_sec;
770 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
06d518e3 771 vdso_data->stamp_xtime = *wall_time;
0e469db8 772 vdso_data->stamp_sec_fraction = frac_sec;
b0797b60
JS
773 smp_wmb();
774 ++(vdso_data->tb_update_count);
4a4cfe38
TB
775}
776
777void update_vsyscall_tz(void)
778{
4a4cfe38
TB
779 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
780 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
4a4cfe38
TB
781}
782
1c21a293 783static void __init clocksource_init(void)
4a4cfe38
TB
784{
785 struct clocksource *clock;
786
787 if (__USE_RTC())
788 clock = &clocksource_rtc;
789 else
790 clock = &clocksource_timebase;
791
11b8633a 792 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
4a4cfe38
TB
793 printk(KERN_ERR "clocksource: %s is already registered\n",
794 clock->name);
795 return;
796 }
797
798 printk(KERN_INFO "clocksource: %s mult[%x] shift[%d] registered\n",
799 clock->name, clock->mult, clock->shift);
800}
801
d831d0b8
TB
802static int decrementer_set_next_event(unsigned long evt,
803 struct clock_event_device *dev)
804{
7df10275 805 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
d831d0b8
TB
806 set_dec(evt);
807 return 0;
808}
809
810static void decrementer_set_mode(enum clock_event_mode mode,
811 struct clock_event_device *dev)
812{
813 if (mode != CLOCK_EVT_MODE_ONESHOT)
814 decrementer_set_next_event(DECREMENTER_MAX, dev);
815}
816
817static void register_decrementer_clockevent(int cpu)
818{
7df10275 819 struct clock_event_device *dec = &per_cpu(decrementers, cpu);
d831d0b8
TB
820
821 *dec = decrementer_clockevent;
320ab2b0 822 dec->cpumask = cpumask_of(cpu);
d831d0b8 823
b919ee82
AB
824 printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
825 dec->name, dec->mult, dec->shift, cpu);
d831d0b8
TB
826
827 clockevents_register_device(dec);
828}
829
c481887f 830static void __init init_decrementer_clockevent(void)
d831d0b8
TB
831{
832 int cpu = smp_processor_id();
833
d8afc6fd
AB
834 clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
835
d831d0b8
TB
836 decrementer_clockevent.max_delta_ns =
837 clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
43875cc0
PM
838 decrementer_clockevent.min_delta_ns =
839 clockevent_delta2ns(2, &decrementer_clockevent);
d831d0b8
TB
840
841 register_decrementer_clockevent(cpu);
842}
843
844void secondary_cpu_time_init(void)
845{
77c0a700
BH
846 /* Start the decrementer on CPUs that have manual control
847 * such as BookE
848 */
849 start_cpu_decrementer();
850
d831d0b8
TB
851 /* FIME: Should make unrelatred change to move snapshot_timebase
852 * call here ! */
853 register_decrementer_clockevent(smp_processor_id());
854}
855
f2783c15 856/* This function is only called on the boot processor */
1da177e4
LT
857void __init time_init(void)
858{
1da177e4 859 struct div_result res;
d75d68cf 860 u64 scale;
f2783c15
PM
861 unsigned shift;
862
96c44507
PM
863 if (__USE_RTC()) {
864 /* 601 processor: dec counts down by 128 every 128ns */
865 ppc_tb_freq = 1000000000;
96c44507
PM
866 } else {
867 /* Normal PowerPC with timebase register */
868 ppc_md.calibrate_decr();
224ad80a 869 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
96c44507 870 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
224ad80a 871 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
96c44507 872 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
96c44507 873 }
374e99d4
PM
874
875 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
092b8f34 876 tb_ticks_per_sec = ppc_tb_freq;
374e99d4 877 tb_ticks_per_usec = ppc_tb_freq / 1000000;
c6622f63 878 calc_cputime_factors();
a42548a1 879 setup_cputime_one_jiffy();
092b8f34 880
1da177e4
LT
881 /*
882 * Compute scale factor for sched_clock.
883 * The calibrate_decr() function has set tb_ticks_per_sec,
884 * which is the timebase frequency.
885 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
886 * the 128-bit result as a 64.64 fixed-point number.
887 * We then shift that number right until it is less than 1.0,
888 * giving us the scale factor and shift count to use in
889 * sched_clock().
890 */
891 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
892 scale = res.result_low;
893 for (shift = 0; res.result_high != 0; ++shift) {
894 scale = (scale >> 1) | (res.result_high << 63);
895 res.result_high >>= 1;
896 }
897 tb_to_ns_scale = scale;
898 tb_to_ns_shift = shift;
fc9069fe 899 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
c27da339 900 boot_tb = get_tb_or_rtc();
1da177e4 901
092b8f34 902 /* If platform provided a timezone (pmac), we correct the time */
621692cb 903 if (timezone_offset) {
092b8f34
PM
904 sys_tz.tz_minuteswest = -timezone_offset / 60;
905 sys_tz.tz_dsttime = 0;
621692cb 906 }
092b8f34 907
a7f290da
BH
908 vdso_data->tb_update_count = 0;
909 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
1da177e4 910
77c0a700
BH
911 /* Start the decrementer on CPUs that have manual control
912 * such as BookE
913 */
914 start_cpu_decrementer();
915
f5339277
SR
916 /* Register the clocksource */
917 clocksource_init();
4a4cfe38 918
d831d0b8 919 init_decrementer_clockevent();
1da177e4
LT
920}
921
1da177e4 922
1da177e4
LT
923#define FEBRUARY 2
924#define STARTOFTIME 1970
925#define SECDAY 86400L
926#define SECYR (SECDAY * 365)
f2783c15
PM
927#define leapyear(year) ((year) % 4 == 0 && \
928 ((year) % 100 != 0 || (year) % 400 == 0))
1da177e4
LT
929#define days_in_year(a) (leapyear(a) ? 366 : 365)
930#define days_in_month(a) (month_days[(a) - 1])
931
932static int month_days[12] = {
933 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
934};
935
936/*
937 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
938 */
939void GregorianDay(struct rtc_time * tm)
940{
941 int leapsToDate;
942 int lastYear;
943 int day;
944 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
945
f2783c15 946 lastYear = tm->tm_year - 1;
1da177e4
LT
947
948 /*
949 * Number of leap corrections to apply up to end of last year
950 */
f2783c15 951 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1da177e4
LT
952
953 /*
954 * This year is a leap year if it is divisible by 4 except when it is
955 * divisible by 100 unless it is divisible by 400
956 *
f2783c15 957 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1da177e4 958 */
f2783c15 959 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1da177e4
LT
960
961 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
962 tm->tm_mday;
963
f2783c15 964 tm->tm_wday = day % 7;
1da177e4
LT
965}
966
967void to_tm(int tim, struct rtc_time * tm)
968{
969 register int i;
970 register long hms, day;
971
972 day = tim / SECDAY;
973 hms = tim % SECDAY;
974
975 /* Hours, minutes, seconds are easy */
976 tm->tm_hour = hms / 3600;
977 tm->tm_min = (hms % 3600) / 60;
978 tm->tm_sec = (hms % 3600) % 60;
979
980 /* Number of years in days */
981 for (i = STARTOFTIME; day >= days_in_year(i); i++)
982 day -= days_in_year(i);
983 tm->tm_year = i;
984
985 /* Number of months in days left */
986 if (leapyear(tm->tm_year))
987 days_in_month(FEBRUARY) = 29;
988 for (i = 1; day >= days_in_month(i); i++)
989 day -= days_in_month(i);
990 days_in_month(FEBRUARY) = 28;
991 tm->tm_mon = i;
992
993 /* Days are what is left over (+1) from all that. */
994 tm->tm_mday = day + 1;
995
996 /*
997 * Determine the day of week
998 */
999 GregorianDay(tm);
1000}
1001
1da177e4
LT
1002/*
1003 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1004 * result.
1005 */
f2783c15
PM
1006void div128_by_32(u64 dividend_high, u64 dividend_low,
1007 unsigned divisor, struct div_result *dr)
1da177e4 1008{
f2783c15
PM
1009 unsigned long a, b, c, d;
1010 unsigned long w, x, y, z;
1011 u64 ra, rb, rc;
1da177e4
LT
1012
1013 a = dividend_high >> 32;
1014 b = dividend_high & 0xffffffff;
1015 c = dividend_low >> 32;
1016 d = dividend_low & 0xffffffff;
1017
f2783c15
PM
1018 w = a / divisor;
1019 ra = ((u64)(a - (w * divisor)) << 32) + b;
1020
f2783c15
PM
1021 rb = ((u64) do_div(ra, divisor) << 32) + c;
1022 x = ra;
1da177e4 1023
f2783c15
PM
1024 rc = ((u64) do_div(rb, divisor) << 32) + d;
1025 y = rb;
1026
1027 do_div(rc, divisor);
1028 z = rc;
1da177e4 1029
f2783c15
PM
1030 dr->result_high = ((u64)w << 32) + x;
1031 dr->result_low = ((u64)y << 32) + z;
1da177e4
LT
1032
1033}
bcd68a70 1034
177996e6
BH
1035/* We don't need to calibrate delay, we use the CPU timebase for that */
1036void calibrate_delay(void)
1037{
1038 /* Some generic code (such as spinlock debug) use loops_per_jiffy
1039 * as the number of __delay(1) in a jiffy, so make it so
1040 */
1041 loops_per_jiffy = tb_ticks_per_jiffy;
1042}
1043
bcd68a70
GU
1044static int __init rtc_init(void)
1045{
1046 struct platform_device *pdev;
1047
1048 if (!ppc_md.get_rtc_time)
1049 return -ENODEV;
1050
1051 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
bcd68a70 1052
8c6ffba0 1053 return PTR_ERR_OR_ZERO(pdev);
bcd68a70
GU
1054}
1055
1056module_init(rtc_init);