]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/ia64/kernel/time.c | |
3 | * | |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
5 | * Stephane Eranian <eranian@hpl.hp.com> | |
6 | * David Mosberger <davidm@hpl.hp.com> | |
7 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
8 | * Copyright (C) 1999-2000 VA Linux Systems | |
9 | * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> | |
10 | */ | |
1da177e4 LT |
11 | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/profile.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/time.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/efi.h> | |
1da177e4 | 21 | #include <linux/timex.h> |
0aa366f3 | 22 | #include <linux/clocksource.h> |
1da177e4 LT |
23 | |
24 | #include <asm/machvec.h> | |
25 | #include <asm/delay.h> | |
26 | #include <asm/hw_irq.h> | |
00d21d82 | 27 | #include <asm/paravirt.h> |
1da177e4 LT |
28 | #include <asm/ptrace.h> |
29 | #include <asm/sal.h> | |
30 | #include <asm/sections.h> | |
31 | #include <asm/system.h> | |
32 | ||
0aa366f3 TL |
33 | #include "fsyscall_gtod_data.h" |
34 | ||
35 | static cycle_t itc_get_cycles(void); | |
36 | ||
37 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { | |
38 | .lock = SEQLOCK_UNLOCKED, | |
39 | }; | |
40 | ||
41 | struct itc_jitter_data_t itc_jitter_data; | |
42 | ||
ff741906 | 43 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
1da177e4 LT |
44 | |
45 | #ifdef CONFIG_IA64_DEBUG_IRQ | |
46 | ||
47 | unsigned long last_cli_ip; | |
48 | EXPORT_SYMBOL(last_cli_ip); | |
49 | ||
50 | #endif | |
51 | ||
00d21d82 IY |
52 | #ifdef CONFIG_PARAVIRT |
53 | static void | |
54 | paravirt_clocksource_resume(void) | |
55 | { | |
56 | if (pv_time_ops.clocksource_resume) | |
57 | pv_time_ops.clocksource_resume(); | |
58 | } | |
59 | #endif | |
60 | ||
0aa366f3 | 61 | static struct clocksource clocksource_itc = { |
3eb05676 LZ |
62 | .name = "itc", |
63 | .rating = 350, | |
64 | .read = itc_get_cycles, | |
65 | .mask = CLOCKSOURCE_MASK(64), | |
66 | .mult = 0, /*to be calculated*/ | |
67 | .shift = 16, | |
68 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
00d21d82 IY |
69 | #ifdef CONFIG_PARAVIRT |
70 | .resume = paravirt_clocksource_resume, | |
71 | #endif | |
1da177e4 | 72 | }; |
0aa366f3 | 73 | static struct clocksource *itc_clocksource; |
1da177e4 | 74 | |
b64f34cd HS |
75 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
76 | ||
77 | #include <linux/kernel_stat.h> | |
78 | ||
79 | extern cputime_t cycle_to_cputime(u64 cyc); | |
80 | ||
81 | /* | |
82 | * Called from the context switch with interrupts disabled, to charge all | |
83 | * accumulated times to the current process, and to prepare accounting on | |
84 | * the next process. | |
85 | */ | |
86 | void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) | |
87 | { | |
88 | struct thread_info *pi = task_thread_info(prev); | |
89 | struct thread_info *ni = task_thread_info(next); | |
90 | cputime_t delta_stime, delta_utime; | |
91 | __u64 now; | |
92 | ||
93 | now = ia64_get_itc(); | |
94 | ||
95 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); | |
96 | account_system_time(prev, 0, delta_stime); | |
97 | account_system_time_scaled(prev, delta_stime); | |
98 | ||
99 | if (pi->ac_utime) { | |
100 | delta_utime = cycle_to_cputime(pi->ac_utime); | |
101 | account_user_time(prev, delta_utime); | |
102 | account_user_time_scaled(prev, delta_utime); | |
103 | } | |
104 | ||
105 | pi->ac_stamp = ni->ac_stamp = now; | |
106 | ni->ac_stime = ni->ac_utime = 0; | |
107 | } | |
108 | ||
109 | /* | |
110 | * Account time for a transition between system, hard irq or soft irq state. | |
111 | * Note that this function is called with interrupts enabled. | |
112 | */ | |
113 | void account_system_vtime(struct task_struct *tsk) | |
114 | { | |
115 | struct thread_info *ti = task_thread_info(tsk); | |
116 | unsigned long flags; | |
117 | cputime_t delta_stime; | |
118 | __u64 now; | |
119 | ||
120 | local_irq_save(flags); | |
121 | ||
122 | now = ia64_get_itc(); | |
123 | ||
124 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); | |
125 | account_system_time(tsk, 0, delta_stime); | |
126 | account_system_time_scaled(tsk, delta_stime); | |
127 | ti->ac_stime = 0; | |
128 | ||
129 | ti->ac_stamp = now; | |
130 | ||
131 | local_irq_restore(flags); | |
132 | } | |
3a677d21 | 133 | EXPORT_SYMBOL_GPL(account_system_vtime); |
b64f34cd HS |
134 | |
135 | /* | |
136 | * Called from the timer interrupt handler to charge accumulated user time | |
137 | * to the current process. Must be called with interrupts disabled. | |
138 | */ | |
139 | void account_process_tick(struct task_struct *p, int user_tick) | |
140 | { | |
141 | struct thread_info *ti = task_thread_info(p); | |
142 | cputime_t delta_utime; | |
143 | ||
144 | if (ti->ac_utime) { | |
145 | delta_utime = cycle_to_cputime(ti->ac_utime); | |
146 | account_user_time(p, delta_utime); | |
147 | account_user_time_scaled(p, delta_utime); | |
148 | ti->ac_utime = 0; | |
149 | } | |
150 | } | |
151 | ||
152 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | |
153 | ||
1da177e4 | 154 | static irqreturn_t |
7d12e780 | 155 | timer_interrupt (int irq, void *dev_id) |
1da177e4 LT |
156 | { |
157 | unsigned long new_itm; | |
158 | ||
159 | if (unlikely(cpu_is_offline(smp_processor_id()))) { | |
160 | return IRQ_HANDLED; | |
161 | } | |
162 | ||
7d12e780 | 163 | platform_timer_interrupt(irq, dev_id); |
1da177e4 LT |
164 | |
165 | new_itm = local_cpu_data->itm_next; | |
166 | ||
167 | if (!time_after(ia64_get_itc(), new_itm)) | |
168 | printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", | |
169 | ia64_get_itc(), new_itm); | |
170 | ||
7d12e780 | 171 | profile_tick(CPU_PROFILING); |
1da177e4 | 172 | |
00d21d82 IY |
173 | if (paravirt_do_steal_accounting(&new_itm)) |
174 | goto skip_process_time_accounting; | |
175 | ||
1da177e4 | 176 | while (1) { |
7d12e780 | 177 | update_process_times(user_mode(get_irq_regs())); |
1da177e4 LT |
178 | |
179 | new_itm += local_cpu_data->itm_delta; | |
180 | ||
ff741906 | 181 | if (smp_processor_id() == time_keeper_id) { |
1da177e4 LT |
182 | /* |
183 | * Here we are in the timer irq handler. We have irqs locally | |
184 | * disabled, but we don't know if the timer_bh is running on | |
185 | * another CPU. We need to avoid to SMP race by acquiring the | |
186 | * xtime_lock. | |
187 | */ | |
188 | write_seqlock(&xtime_lock); | |
3171a030 | 189 | do_timer(1); |
1da177e4 LT |
190 | local_cpu_data->itm_next = new_itm; |
191 | write_sequnlock(&xtime_lock); | |
192 | } else | |
193 | local_cpu_data->itm_next = new_itm; | |
194 | ||
195 | if (time_after(new_itm, ia64_get_itc())) | |
196 | break; | |
accaddb2 JS |
197 | |
198 | /* | |
199 | * Allow IPIs to interrupt the timer loop. | |
200 | */ | |
201 | local_irq_enable(); | |
202 | local_irq_disable(); | |
1da177e4 LT |
203 | } |
204 | ||
00d21d82 IY |
205 | skip_process_time_accounting: |
206 | ||
1da177e4 LT |
207 | do { |
208 | /* | |
209 | * If we're too close to the next clock tick for | |
210 | * comfort, we increase the safety margin by | |
211 | * intentionally dropping the next tick(s). We do NOT | |
212 | * update itm.next because that would force us to call | |
213 | * do_timer() which in turn would let our clock run | |
214 | * too fast (with the potentially devastating effect | |
215 | * of losing monotony of time). | |
216 | */ | |
217 | while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) | |
218 | new_itm += local_cpu_data->itm_delta; | |
219 | ia64_set_itm(new_itm); | |
220 | /* double check, in case we got hit by a (slow) PMI: */ | |
221 | } while (time_after_eq(ia64_get_itc(), new_itm)); | |
222 | return IRQ_HANDLED; | |
223 | } | |
224 | ||
225 | /* | |
226 | * Encapsulate access to the itm structure for SMP. | |
227 | */ | |
228 | void | |
229 | ia64_cpu_local_tick (void) | |
230 | { | |
231 | int cpu = smp_processor_id(); | |
232 | unsigned long shift = 0, delta; | |
233 | ||
234 | /* arrange for the cycle counter to generate a timer interrupt: */ | |
235 | ia64_set_itv(IA64_TIMER_VECTOR); | |
236 | ||
237 | delta = local_cpu_data->itm_delta; | |
238 | /* | |
239 | * Stagger the timer tick for each CPU so they don't occur all at (almost) the | |
240 | * same time: | |
241 | */ | |
242 | if (cpu) { | |
243 | unsigned long hi = 1UL << ia64_fls(cpu); | |
244 | shift = (2*(cpu - hi) + 1) * delta/hi/2; | |
245 | } | |
246 | local_cpu_data->itm_next = ia64_get_itc() + delta + shift; | |
247 | ia64_set_itm(local_cpu_data->itm_next); | |
248 | } | |
249 | ||
250 | static int nojitter; | |
251 | ||
252 | static int __init nojitter_setup(char *str) | |
253 | { | |
254 | nojitter = 1; | |
255 | printk("Jitter checking for ITC timers disabled\n"); | |
256 | return 1; | |
257 | } | |
258 | ||
259 | __setup("nojitter", nojitter_setup); | |
260 | ||
261 | ||
262 | void __devinit | |
263 | ia64_init_itm (void) | |
264 | { | |
265 | unsigned long platform_base_freq, itc_freq; | |
266 | struct pal_freq_ratio itc_ratio, proc_ratio; | |
267 | long status, platform_base_drift, itc_drift; | |
268 | ||
269 | /* | |
270 | * According to SAL v2.6, we need to use a SAL call to determine the platform base | |
271 | * frequency and then a PAL call to determine the frequency ratio between the ITC | |
272 | * and the base frequency. | |
273 | */ | |
274 | status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, | |
275 | &platform_base_freq, &platform_base_drift); | |
276 | if (status != 0) { | |
277 | printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); | |
278 | } else { | |
279 | status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); | |
280 | if (status != 0) | |
281 | printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); | |
282 | } | |
283 | if (status != 0) { | |
284 | /* invent "random" values */ | |
285 | printk(KERN_ERR | |
286 | "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); | |
287 | platform_base_freq = 100000000; | |
288 | platform_base_drift = -1; /* no drift info */ | |
289 | itc_ratio.num = 3; | |
290 | itc_ratio.den = 1; | |
291 | } | |
292 | if (platform_base_freq < 40000000) { | |
293 | printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", | |
294 | platform_base_freq); | |
295 | platform_base_freq = 75000000; | |
296 | platform_base_drift = -1; | |
297 | } | |
298 | if (!proc_ratio.den) | |
299 | proc_ratio.den = 1; /* avoid division by zero */ | |
300 | if (!itc_ratio.den) | |
301 | itc_ratio.den = 1; /* avoid division by zero */ | |
302 | ||
303 | itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; | |
304 | ||
305 | local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; | |
2ab9391d | 306 | printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " |
1da177e4 LT |
307 | "ITC freq=%lu.%03luMHz", smp_processor_id(), |
308 | platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, | |
309 | itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); | |
310 | ||
311 | if (platform_base_drift != -1) { | |
312 | itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; | |
313 | printk("+/-%ldppm\n", itc_drift); | |
314 | } else { | |
315 | itc_drift = -1; | |
316 | printk("\n"); | |
317 | } | |
318 | ||
319 | local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; | |
320 | local_cpu_data->itc_freq = itc_freq; | |
321 | local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; | |
322 | local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) | |
323 | + itc_freq/2)/itc_freq; | |
324 | ||
325 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { | |
1da177e4 LT |
326 | #ifdef CONFIG_SMP |
327 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. | |
328 | * Jitter compensation requires a cmpxchg which may limit | |
329 | * the scalability of the syscalls for retrieving time. | |
330 | * The ITC synchronization is usually successful to within a few | |
331 | * ITC ticks but this is not a sure thing. If you need to improve | |
332 | * timer performance in SMP situations then boot the kernel with the | |
333 | * "nojitter" option. However, doing so may result in time fluctuating (maybe | |
334 | * even going backward) if the ITC offsets between the individual CPUs | |
335 | * are too large. | |
336 | */ | |
0aa366f3 TL |
337 | if (!nojitter) |
338 | itc_jitter_data.itc_jitter = 1; | |
1da177e4 | 339 | #endif |
b718f91c CL |
340 | } else |
341 | /* | |
342 | * ITC is drifty and we have not synchronized the ITCs in smpboot.c. | |
343 | * ITC values may fluctuate significantly between processors. | |
344 | * Clock should not be used for hrtimers. Mark itc as only | |
345 | * useful for boot and testing. | |
346 | * | |
347 | * Note that jitter compensation is off! There is no point of | |
348 | * synchronizing ITCs since they may be large differentials | |
349 | * that change over time. | |
350 | * | |
351 | * The only way to fix this would be to repeatedly sync the | |
352 | * ITCs. Until that time we have to avoid ITC. | |
353 | */ | |
354 | clocksource_itc.rating = 50; | |
1da177e4 | 355 | |
00d21d82 IY |
356 | paravirt_init_missing_ticks_accounting(smp_processor_id()); |
357 | ||
358 | /* avoid softlock up message when cpu is unplug and plugged again. */ | |
359 | touch_softlockup_watchdog(); | |
360 | ||
1da177e4 LT |
361 | /* Setup the CPU local timer tick */ |
362 | ia64_cpu_local_tick(); | |
0aa366f3 TL |
363 | |
364 | if (!itc_clocksource) { | |
365 | /* Sort out mult/shift values: */ | |
366 | clocksource_itc.mult = | |
367 | clocksource_hz2mult(local_cpu_data->itc_freq, | |
368 | clocksource_itc.shift); | |
369 | clocksource_register(&clocksource_itc); | |
370 | itc_clocksource = &clocksource_itc; | |
371 | } | |
1da177e4 LT |
372 | } |
373 | ||
8dc94630 | 374 | static cycle_t itc_get_cycles(void) |
0aa366f3 TL |
375 | { |
376 | u64 lcycle, now, ret; | |
377 | ||
378 | if (!itc_jitter_data.itc_jitter) | |
379 | return get_cycles(); | |
380 | ||
381 | lcycle = itc_jitter_data.itc_lastcycle; | |
382 | now = get_cycles(); | |
383 | if (lcycle && time_after(lcycle, now)) | |
384 | return lcycle; | |
385 | ||
386 | /* | |
387 | * Keep track of the last timer value returned. | |
388 | * In an SMP environment, you could lose out in contention of | |
389 | * cmpxchg. If so, your cmpxchg returns new value which the | |
390 | * winner of contention updated to. Use the new value instead. | |
391 | */ | |
392 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); | |
393 | if (unlikely(ret != lcycle)) | |
394 | return ret; | |
395 | ||
396 | return now; | |
397 | } | |
398 | ||
399 | ||
1da177e4 LT |
400 | static struct irqaction timer_irqaction = { |
401 | .handler = timer_interrupt, | |
d217c265 | 402 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, |
1da177e4 LT |
403 | .name = "timer" |
404 | }; | |
405 | ||
406 | void __init | |
407 | time_init (void) | |
408 | { | |
409 | register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); | |
410 | efi_gettimeofday(&xtime); | |
411 | ia64_init_itm(); | |
412 | ||
413 | /* | |
414 | * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the | |
415 | * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC). | |
416 | */ | |
417 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | |
418 | } | |
f5899b5d | 419 | |
defbb2c9 | 420 | /* |
421 | * Generic udelay assumes that if preemption is allowed and the thread | |
422 | * migrates to another CPU, that the ITC values are synchronized across | |
423 | * all CPUs. | |
424 | */ | |
425 | static void | |
426 | ia64_itc_udelay (unsigned long usecs) | |
f5899b5d | 427 | { |
defbb2c9 | 428 | unsigned long start = ia64_get_itc(); |
429 | unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; | |
f5899b5d | 430 | |
defbb2c9 | 431 | while (time_before(ia64_get_itc(), end)) |
432 | cpu_relax(); | |
433 | } | |
f5899b5d | 434 | |
defbb2c9 | 435 | void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; |
f5899b5d | 436 | |
defbb2c9 | 437 | void |
438 | udelay (unsigned long usecs) | |
439 | { | |
440 | (*ia64_udelay)(usecs); | |
f5899b5d JH |
441 | } |
442 | EXPORT_SYMBOL(udelay); | |
d6e56a2a | 443 | |
2c622148 TB |
444 | /* IA64 doesn't cache the timezone */ |
445 | void update_vsyscall_tz(void) | |
446 | { | |
447 | } | |
448 | ||
0aa366f3 TL |
449 | void update_vsyscall(struct timespec *wall, struct clocksource *c) |
450 | { | |
451 | unsigned long flags; | |
452 | ||
453 | write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); | |
454 | ||
455 | /* copy fsyscall clock data */ | |
456 | fsyscall_gtod_data.clk_mask = c->mask; | |
457 | fsyscall_gtod_data.clk_mult = c->mult; | |
458 | fsyscall_gtod_data.clk_shift = c->shift; | |
459 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | |
460 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | |
461 | ||
462 | /* copy kernel time structures */ | |
463 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; | |
464 | fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; | |
465 | fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec | |
466 | + wall->tv_sec; | |
467 | fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec | |
468 | + wall->tv_nsec; | |
469 | ||
470 | /* normalize */ | |
471 | while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { | |
472 | fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; | |
473 | fsyscall_gtod_data.monotonic_time.tv_sec++; | |
474 | } | |
475 | ||
476 | write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); | |
477 | } | |
478 |