]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/kernel/time.c
Pull acpi-debug into release branch
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / time.c
1 /*
2 * Copyright 2001 MontaVista Software Inc.
3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4 * Copyright (c) 2003, 2004 Maciej W. Rozycki
5 *
6 * Common time service routines for MIPS machines. See
7 * Documentation/mips/time.README.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/param.h>
19 #include <linux/time.h>
20 #include <linux/timex.h>
21 #include <linux/smp.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26
27 #include <asm/bootinfo.h>
28 #include <asm/cache.h>
29 #include <asm/compiler.h>
30 #include <asm/cpu.h>
31 #include <asm/cpu-features.h>
32 #include <asm/div64.h>
33 #include <asm/sections.h>
34 #include <asm/time.h>
35
36 /*
37 * The integer part of the number of usecs per jiffy is taken from tick,
38 * but the fractional part is not recorded, so we calculate it using the
39 * initial value of HZ. This aids systems where tick isn't really an
40 * integer (e.g. for HZ = 128).
41 */
42 #define USECS_PER_JIFFY TICK_SIZE
43 #define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))
44
45 #define TICK_SIZE (tick_nsec / 1000)
46
47 /*
48 * forward reference
49 */
50 DEFINE_SPINLOCK(rtc_lock);
51
52 /*
53 * By default we provide the null RTC ops
54 */
55 static unsigned long null_rtc_get_time(void)
56 {
57 return mktime(2000, 1, 1, 0, 0, 0);
58 }
59
60 static int null_rtc_set_time(unsigned long sec)
61 {
62 return 0;
63 }
64
65 unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
66 int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
67 int (*rtc_mips_set_mmss)(unsigned long);
68
69
70 /* how many counter cycles in a jiffy */
71 static unsigned long cycles_per_jiffy __read_mostly;
72
73 /* expirelo is the count value for next CPU timer interrupt */
74 static unsigned int expirelo;
75
76
77 /*
78 * Null timer ack for systems not needing one (e.g. i8254).
79 */
80 static void null_timer_ack(void) { /* nothing */ }
81
82 /*
83 * Null high precision timer functions for systems lacking one.
84 */
85 static cycle_t null_hpt_read(void)
86 {
87 return 0;
88 }
89
90 /*
91 * Timer ack for an R4k-compatible timer of a known frequency.
92 */
93 static void c0_timer_ack(void)
94 {
95 unsigned int count;
96
97 /* Ack this timer interrupt and set the next one. */
98 expirelo += cycles_per_jiffy;
99 write_c0_compare(expirelo);
100
101 /* Check to see if we have missed any timer interrupts. */
102 while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
103 /* missed_timer_count++; */
104 expirelo = count + cycles_per_jiffy;
105 write_c0_compare(expirelo);
106 }
107 }
108
109 /*
110 * High precision timer functions for a R4k-compatible timer.
111 */
112 static cycle_t c0_hpt_read(void)
113 {
114 return read_c0_count();
115 }
116
117 /* For use both as a high precision timer and an interrupt source. */
118 static void __init c0_hpt_timer_init(void)
119 {
120 expirelo = read_c0_count() + cycles_per_jiffy;
121 write_c0_compare(expirelo);
122 }
123
124 int (*mips_timer_state)(void);
125 void (*mips_timer_ack)(void);
126
127 /* last time when xtime and rtc are sync'ed up */
128 static long last_rtc_update;
129
130 /*
131 * local_timer_interrupt() does profiling and process accounting
132 * on a per-CPU basis.
133 *
134 * In UP mode, it is invoked from the (global) timer_interrupt.
135 *
136 * In SMP mode, it might invoked by per-CPU timer interrupt, or
137 * a broadcasted inter-processor interrupt which itself is triggered
138 * by the global timer interrupt.
139 */
140 void local_timer_interrupt(int irq, void *dev_id)
141 {
142 profile_tick(CPU_PROFILING);
143 update_process_times(user_mode(get_irq_regs()));
144 }
145
146 /*
147 * High-level timer interrupt service routines. This function
148 * is set as irqaction->handler and is invoked through do_IRQ.
149 */
150 irqreturn_t timer_interrupt(int irq, void *dev_id)
151 {
152 write_seqlock(&xtime_lock);
153
154 mips_timer_ack();
155
156 /*
157 * call the generic timer interrupt handling
158 */
159 do_timer(1);
160
161 /*
162 * If we have an externally synchronized Linux clock, then update
163 * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
164 * called as close as possible to 500 ms before the new second starts.
165 */
166 if (ntp_synced() &&
167 xtime.tv_sec > last_rtc_update + 660 &&
168 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
169 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
170 if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
171 last_rtc_update = xtime.tv_sec;
172 } else {
173 /* do it again in 60 s */
174 last_rtc_update = xtime.tv_sec - 600;
175 }
176 }
177
178 write_sequnlock(&xtime_lock);
179
180 /*
181 * In UP mode, we call local_timer_interrupt() to do profiling
182 * and process accouting.
183 *
184 * In SMP mode, local_timer_interrupt() is invoked by appropriate
185 * low-level local timer interrupt handler.
186 */
187 local_timer_interrupt(irq, dev_id);
188
189 return IRQ_HANDLED;
190 }
191
192 int null_perf_irq(void)
193 {
194 return 0;
195 }
196
197 int (*perf_irq)(void) = null_perf_irq;
198
199 EXPORT_SYMBOL(null_perf_irq);
200 EXPORT_SYMBOL(perf_irq);
201
202 /*
203 * Timer interrupt
204 */
205 int cp0_compare_irq;
206
207 /*
208 * Performance counter IRQ or -1 if shared with timer
209 */
210 int cp0_perfcount_irq;
211 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
212
213 /*
214 * Possibly handle a performance counter interrupt.
215 * Return true if the timer interrupt should not be checked
216 */
217 static inline int handle_perf_irq (int r2)
218 {
219 /*
220 * The performance counter overflow interrupt may be shared with the
221 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
222 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
223 * and we can't reliably determine if a counter interrupt has also
224 * happened (!r2) then don't check for a timer interrupt.
225 */
226 return (cp0_perfcount_irq < 0) &&
227 perf_irq() == IRQ_HANDLED &&
228 !r2;
229 }
230
231 asmlinkage void ll_timer_interrupt(int irq)
232 {
233 int r2 = cpu_has_mips_r2;
234
235 irq_enter();
236 kstat_this_cpu.irqs[irq]++;
237
238 if (handle_perf_irq(r2))
239 goto out;
240
241 if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
242 goto out;
243
244 timer_interrupt(irq, NULL);
245
246 out:
247 irq_exit();
248 }
249
250 asmlinkage void ll_local_timer_interrupt(int irq)
251 {
252 irq_enter();
253 if (smp_processor_id() != 0)
254 kstat_this_cpu.irqs[irq]++;
255
256 /* we keep interrupt disabled all the time */
257 local_timer_interrupt(irq, NULL);
258
259 irq_exit();
260 }
261
262 /*
263 * time_init() - it does the following things.
264 *
265 * 1) board_time_init() -
266 * a) (optional) set up RTC routines,
267 * b) (optional) calibrate and set the mips_hpt_frequency
268 * (only needed if you intended to use cpu counter as timer interrupt
269 * source)
270 * 2) setup xtime based on rtc_mips_get_time().
271 * 3) calculate a couple of cached variables for later usage
272 * 4) plat_timer_setup() -
273 * a) (optional) over-write any choices made above by time_init().
274 * b) machine specific code should setup the timer irqaction.
275 * c) enable the timer interrupt
276 */
277
278 void (*board_time_init)(void);
279
280 unsigned int mips_hpt_frequency;
281
282 static struct irqaction timer_irqaction = {
283 .handler = timer_interrupt,
284 .flags = IRQF_DISABLED | IRQF_PERCPU,
285 .name = "timer",
286 };
287
288 static unsigned int __init calibrate_hpt(void)
289 {
290 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
291
292 const int loops = HZ / 10;
293 int log_2_loops = 0;
294 int i;
295
296 /*
297 * We want to calibrate for 0.1s, but to avoid a 64-bit
298 * division we round the number of loops up to the nearest
299 * power of 2.
300 */
301 while (loops > 1 << log_2_loops)
302 log_2_loops++;
303 i = 1 << log_2_loops;
304
305 /*
306 * Wait for a rising edge of the timer interrupt.
307 */
308 while (mips_timer_state());
309 while (!mips_timer_state());
310
311 /*
312 * Now see how many high precision timer ticks happen
313 * during the calculated number of periods between timer
314 * interrupts.
315 */
316 hpt_start = clocksource_mips.read();
317 do {
318 while (mips_timer_state());
319 while (!mips_timer_state());
320 } while (--i);
321 hpt_end = clocksource_mips.read();
322
323 hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
324 hz = HZ;
325 frequency = hpt_count * hz;
326
327 return frequency >> log_2_loops;
328 }
329
330 struct clocksource clocksource_mips = {
331 .name = "MIPS",
332 .mask = CLOCKSOURCE_MASK(32),
333 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
334 };
335
336 static void __init init_mips_clocksource(void)
337 {
338 u64 temp;
339 u32 shift;
340
341 if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
342 return;
343
344 /* Calclate a somewhat reasonable rating value */
345 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
346 /* Find a shift value */
347 for (shift = 32; shift > 0; shift--) {
348 temp = (u64) NSEC_PER_SEC << shift;
349 do_div(temp, mips_hpt_frequency);
350 if ((temp >> 32) == 0)
351 break;
352 }
353 clocksource_mips.shift = shift;
354 clocksource_mips.mult = (u32)temp;
355
356 clocksource_register(&clocksource_mips);
357 }
358
359 void __init time_init(void)
360 {
361 if (board_time_init)
362 board_time_init();
363
364 if (!rtc_mips_set_mmss)
365 rtc_mips_set_mmss = rtc_mips_set_time;
366
367 xtime.tv_sec = rtc_mips_get_time();
368 xtime.tv_nsec = 0;
369
370 set_normalized_timespec(&wall_to_monotonic,
371 -xtime.tv_sec, -xtime.tv_nsec);
372
373 /* Choose appropriate high precision timer routines. */
374 if (!cpu_has_counter && !clocksource_mips.read)
375 /* No high precision timer -- sorry. */
376 clocksource_mips.read = null_hpt_read;
377 else if (!mips_hpt_frequency && !mips_timer_state) {
378 /* A high precision timer of unknown frequency. */
379 if (!clocksource_mips.read)
380 /* No external high precision timer -- use R4k. */
381 clocksource_mips.read = c0_hpt_read;
382 } else {
383 /* We know counter frequency. Or we can get it. */
384 if (!clocksource_mips.read) {
385 /* No external high precision timer -- use R4k. */
386 clocksource_mips.read = c0_hpt_read;
387
388 if (!mips_timer_state) {
389 /* No external timer interrupt -- use R4k. */
390 mips_timer_ack = c0_timer_ack;
391 /* Calculate cache parameters. */
392 cycles_per_jiffy =
393 (mips_hpt_frequency + HZ / 2) / HZ;
394 /*
395 * This sets up the high precision
396 * timer for the first interrupt.
397 */
398 c0_hpt_timer_init();
399 }
400 }
401 if (!mips_hpt_frequency)
402 mips_hpt_frequency = calibrate_hpt();
403
404 /* Report the high precision timer rate for a reference. */
405 printk("Using %u.%03u MHz high precision timer.\n",
406 ((mips_hpt_frequency + 500) / 1000) / 1000,
407 ((mips_hpt_frequency + 500) / 1000) % 1000);
408 }
409
410 if (!mips_timer_ack)
411 /* No timer interrupt ack (e.g. i8254). */
412 mips_timer_ack = null_timer_ack;
413
414 /*
415 * Call board specific timer interrupt setup.
416 *
417 * this pointer must be setup in machine setup routine.
418 *
419 * Even if a machine chooses to use a low-level timer interrupt,
420 * it still needs to setup the timer_irqaction.
421 * In that case, it might be better to set timer_irqaction.handler
422 * to be NULL function so that we are sure the high-level code
423 * is not invoked accidentally.
424 */
425 plat_timer_setup(&timer_irqaction);
426
427 init_mips_clocksource();
428 }
429
430 #define FEBRUARY 2
431 #define STARTOFTIME 1970
432 #define SECDAY 86400L
433 #define SECYR (SECDAY * 365)
434 #define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
435 #define days_in_year(y) (leapyear(y) ? 366 : 365)
436 #define days_in_month(m) (month_days[(m) - 1])
437
438 static int month_days[12] = {
439 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
440 };
441
442 void to_tm(unsigned long tim, struct rtc_time *tm)
443 {
444 long hms, day, gday;
445 int i;
446
447 gday = day = tim / SECDAY;
448 hms = tim % SECDAY;
449
450 /* Hours, minutes, seconds are easy */
451 tm->tm_hour = hms / 3600;
452 tm->tm_min = (hms % 3600) / 60;
453 tm->tm_sec = (hms % 3600) % 60;
454
455 /* Number of years in days */
456 for (i = STARTOFTIME; day >= days_in_year(i); i++)
457 day -= days_in_year(i);
458 tm->tm_year = i;
459
460 /* Number of months in days left */
461 if (leapyear(tm->tm_year))
462 days_in_month(FEBRUARY) = 29;
463 for (i = 1; day >= days_in_month(i); i++)
464 day -= days_in_month(i);
465 days_in_month(FEBRUARY) = 28;
466 tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
467
468 /* Days are what is left over (+1) from all that. */
469 tm->tm_mday = day + 1;
470
471 /*
472 * Determine the day of week
473 */
474 tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
475 }
476
477 EXPORT_SYMBOL(rtc_lock);
478 EXPORT_SYMBOL(to_tm);
479 EXPORT_SYMBOL(rtc_mips_set_time);
480 EXPORT_SYMBOL(rtc_mips_get_time);