2 * linux/arch/ia64/kernel/time.c
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/profile.h>
17 #include <linux/sched.h>
18 #include <linux/time.h>
19 #include <linux/interrupt.h>
20 #include <linux/efi.h>
21 #include <linux/timex.h>
22 #include <linux/clocksource.h>
24 #include <asm/machvec.h>
25 #include <asm/delay.h>
26 #include <asm/hw_irq.h>
27 #include <asm/ptrace.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
32 #include "fsyscall_gtod_data.h"
34 static cycle_t
itc_get_cycles(void);
36 struct fsyscall_gtod_data_t fsyscall_gtod_data
= {
37 .lock
= SEQLOCK_UNLOCKED
,
40 struct itc_jitter_data_t itc_jitter_data
;
42 volatile int time_keeper_id
= 0; /* smp_processor_id() of time-keeper */
44 #ifdef CONFIG_IA64_DEBUG_IRQ
46 unsigned long last_cli_ip
;
47 EXPORT_SYMBOL(last_cli_ip
);
51 static struct clocksource clocksource_itc
= {
54 .read
= itc_get_cycles
,
55 .mask
= CLOCKSOURCE_MASK(64),
56 .mult
= 0, /*to be calculated*/
58 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
60 static struct clocksource
*itc_clocksource
;
62 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
64 #include <linux/kernel_stat.h>
66 extern cputime_t
cycle_to_cputime(u64 cyc
);
69 * Called from the context switch with interrupts disabled, to charge all
70 * accumulated times to the current process, and to prepare accounting on
73 void ia64_account_on_switch(struct task_struct
*prev
, struct task_struct
*next
)
75 struct thread_info
*pi
= task_thread_info(prev
);
76 struct thread_info
*ni
= task_thread_info(next
);
77 cputime_t delta_stime
, delta_utime
;
82 delta_stime
= cycle_to_cputime(pi
->ac_stime
+ (now
- pi
->ac_stamp
));
83 account_system_time(prev
, 0, delta_stime
);
84 account_system_time_scaled(prev
, delta_stime
);
87 delta_utime
= cycle_to_cputime(pi
->ac_utime
);
88 account_user_time(prev
, delta_utime
);
89 account_user_time_scaled(prev
, delta_utime
);
92 pi
->ac_stamp
= ni
->ac_stamp
= now
;
93 ni
->ac_stime
= ni
->ac_utime
= 0;
97 * Account time for a transition between system, hard irq or soft irq state.
98 * Note that this function is called with interrupts enabled.
100 void account_system_vtime(struct task_struct
*tsk
)
102 struct thread_info
*ti
= task_thread_info(tsk
);
104 cputime_t delta_stime
;
107 local_irq_save(flags
);
109 now
= ia64_get_itc();
111 delta_stime
= cycle_to_cputime(ti
->ac_stime
+ (now
- ti
->ac_stamp
));
112 account_system_time(tsk
, 0, delta_stime
);
113 account_system_time_scaled(tsk
, delta_stime
);
118 local_irq_restore(flags
);
122 * Called from the timer interrupt handler to charge accumulated user time
123 * to the current process. Must be called with interrupts disabled.
125 void account_process_tick(struct task_struct
*p
, int user_tick
)
127 struct thread_info
*ti
= task_thread_info(p
);
128 cputime_t delta_utime
;
131 delta_utime
= cycle_to_cputime(ti
->ac_utime
);
132 account_user_time(p
, delta_utime
);
133 account_user_time_scaled(p
, delta_utime
);
138 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
141 timer_interrupt (int irq
, void *dev_id
)
143 unsigned long new_itm
;
145 if (unlikely(cpu_is_offline(smp_processor_id()))) {
149 platform_timer_interrupt(irq
, dev_id
);
151 new_itm
= local_cpu_data
->itm_next
;
153 if (!time_after(ia64_get_itc(), new_itm
))
154 printk(KERN_ERR
"Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
155 ia64_get_itc(), new_itm
);
157 profile_tick(CPU_PROFILING
);
160 update_process_times(user_mode(get_irq_regs()));
162 new_itm
+= local_cpu_data
->itm_delta
;
164 if (smp_processor_id() == time_keeper_id
) {
166 * Here we are in the timer irq handler. We have irqs locally
167 * disabled, but we don't know if the timer_bh is running on
168 * another CPU. We need to avoid to SMP race by acquiring the
171 write_seqlock(&xtime_lock
);
173 local_cpu_data
->itm_next
= new_itm
;
174 write_sequnlock(&xtime_lock
);
176 local_cpu_data
->itm_next
= new_itm
;
178 if (time_after(new_itm
, ia64_get_itc()))
182 * Allow IPIs to interrupt the timer loop.
190 * If we're too close to the next clock tick for
191 * comfort, we increase the safety margin by
192 * intentionally dropping the next tick(s). We do NOT
193 * update itm.next because that would force us to call
194 * do_timer() which in turn would let our clock run
195 * too fast (with the potentially devastating effect
196 * of losing monotony of time).
198 while (!time_after(new_itm
, ia64_get_itc() + local_cpu_data
->itm_delta
/2))
199 new_itm
+= local_cpu_data
->itm_delta
;
200 ia64_set_itm(new_itm
);
201 /* double check, in case we got hit by a (slow) PMI: */
202 } while (time_after_eq(ia64_get_itc(), new_itm
));
207 * Encapsulate access to the itm structure for SMP.
210 ia64_cpu_local_tick (void)
212 int cpu
= smp_processor_id();
213 unsigned long shift
= 0, delta
;
215 /* arrange for the cycle counter to generate a timer interrupt: */
216 ia64_set_itv(IA64_TIMER_VECTOR
);
218 delta
= local_cpu_data
->itm_delta
;
220 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
224 unsigned long hi
= 1UL << ia64_fls(cpu
);
225 shift
= (2*(cpu
- hi
) + 1) * delta
/hi
/2;
227 local_cpu_data
->itm_next
= ia64_get_itc() + delta
+ shift
;
228 ia64_set_itm(local_cpu_data
->itm_next
);
233 static int __init
nojitter_setup(char *str
)
236 printk("Jitter checking for ITC timers disabled\n");
240 __setup("nojitter", nojitter_setup
);
246 unsigned long platform_base_freq
, itc_freq
;
247 struct pal_freq_ratio itc_ratio
, proc_ratio
;
248 long status
, platform_base_drift
, itc_drift
;
251 * According to SAL v2.6, we need to use a SAL call to determine the platform base
252 * frequency and then a PAL call to determine the frequency ratio between the ITC
253 * and the base frequency.
255 status
= ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM
,
256 &platform_base_freq
, &platform_base_drift
);
258 printk(KERN_ERR
"SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status
));
260 status
= ia64_pal_freq_ratios(&proc_ratio
, NULL
, &itc_ratio
);
262 printk(KERN_ERR
"PAL_FREQ_RATIOS failed with status=%ld\n", status
);
265 /* invent "random" values */
267 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
268 platform_base_freq
= 100000000;
269 platform_base_drift
= -1; /* no drift info */
273 if (platform_base_freq
< 40000000) {
274 printk(KERN_ERR
"Platform base frequency %lu bogus---resetting to 75MHz!\n",
276 platform_base_freq
= 75000000;
277 platform_base_drift
= -1;
280 proc_ratio
.den
= 1; /* avoid division by zero */
282 itc_ratio
.den
= 1; /* avoid division by zero */
284 itc_freq
= (platform_base_freq
*itc_ratio
.num
)/itc_ratio
.den
;
286 local_cpu_data
->itm_delta
= (itc_freq
+ HZ
/2) / HZ
;
287 printk(KERN_DEBUG
"CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
288 "ITC freq=%lu.%03luMHz", smp_processor_id(),
289 platform_base_freq
/ 1000000, (platform_base_freq
/ 1000) % 1000,
290 itc_ratio
.num
, itc_ratio
.den
, itc_freq
/ 1000000, (itc_freq
/ 1000) % 1000);
292 if (platform_base_drift
!= -1) {
293 itc_drift
= platform_base_drift
*itc_ratio
.num
/itc_ratio
.den
;
294 printk("+/-%ldppm\n", itc_drift
);
300 local_cpu_data
->proc_freq
= (platform_base_freq
*proc_ratio
.num
)/proc_ratio
.den
;
301 local_cpu_data
->itc_freq
= itc_freq
;
302 local_cpu_data
->cyc_per_usec
= (itc_freq
+ USEC_PER_SEC
/2) / USEC_PER_SEC
;
303 local_cpu_data
->nsec_per_cyc
= ((NSEC_PER_SEC
<<IA64_NSEC_PER_CYC_SHIFT
)
304 + itc_freq
/2)/itc_freq
;
306 if (!(sal_platform_features
& IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT
)) {
308 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
309 * Jitter compensation requires a cmpxchg which may limit
310 * the scalability of the syscalls for retrieving time.
311 * The ITC synchronization is usually successful to within a few
312 * ITC ticks but this is not a sure thing. If you need to improve
313 * timer performance in SMP situations then boot the kernel with the
314 * "nojitter" option. However, doing so may result in time fluctuating (maybe
315 * even going backward) if the ITC offsets between the individual CPUs
319 itc_jitter_data
.itc_jitter
= 1;
323 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
324 * ITC values may fluctuate significantly between processors.
325 * Clock should not be used for hrtimers. Mark itc as only
326 * useful for boot and testing.
328 * Note that jitter compensation is off! There is no point of
329 * synchronizing ITCs since they may be large differentials
330 * that change over time.
332 * The only way to fix this would be to repeatedly sync the
333 * ITCs. Until that time we have to avoid ITC.
335 clocksource_itc
.rating
= 50;
337 /* Setup the CPU local timer tick */
338 ia64_cpu_local_tick();
340 if (!itc_clocksource
) {
341 /* Sort out mult/shift values: */
342 clocksource_itc
.mult
=
343 clocksource_hz2mult(local_cpu_data
->itc_freq
,
344 clocksource_itc
.shift
);
345 clocksource_register(&clocksource_itc
);
346 itc_clocksource
= &clocksource_itc
;
350 static cycle_t
itc_get_cycles(void)
352 u64 lcycle
, now
, ret
;
354 if (!itc_jitter_data
.itc_jitter
)
357 lcycle
= itc_jitter_data
.itc_lastcycle
;
359 if (lcycle
&& time_after(lcycle
, now
))
363 * Keep track of the last timer value returned.
364 * In an SMP environment, you could lose out in contention of
365 * cmpxchg. If so, your cmpxchg returns new value which the
366 * winner of contention updated to. Use the new value instead.
368 ret
= cmpxchg(&itc_jitter_data
.itc_lastcycle
, lcycle
, now
);
369 if (unlikely(ret
!= lcycle
))
376 static struct irqaction timer_irqaction
= {
377 .handler
= timer_interrupt
,
378 .flags
= IRQF_DISABLED
| IRQF_IRQPOLL
,
382 void __devinit
ia64_disable_timer(void)
384 ia64_set_itv(1 << 16);
390 register_percpu_irq(IA64_TIMER_VECTOR
, &timer_irqaction
);
391 efi_gettimeofday(&xtime
);
395 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
396 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
398 set_normalized_timespec(&wall_to_monotonic
, -xtime
.tv_sec
, -xtime
.tv_nsec
);
402 * Generic udelay assumes that if preemption is allowed and the thread
403 * migrates to another CPU, that the ITC values are synchronized across
407 ia64_itc_udelay (unsigned long usecs
)
409 unsigned long start
= ia64_get_itc();
410 unsigned long end
= start
+ usecs
*local_cpu_data
->cyc_per_usec
;
412 while (time_before(ia64_get_itc(), end
))
416 void (*ia64_udelay
)(unsigned long usecs
) = &ia64_itc_udelay
;
419 udelay (unsigned long usecs
)
421 (*ia64_udelay
)(usecs
);
423 EXPORT_SYMBOL(udelay
);
425 /* IA64 doesn't cache the timezone */
426 void update_vsyscall_tz(void)
430 void update_vsyscall(struct timespec
*wall
, struct clocksource
*c
)
434 write_seqlock_irqsave(&fsyscall_gtod_data
.lock
, flags
);
436 /* copy fsyscall clock data */
437 fsyscall_gtod_data
.clk_mask
= c
->mask
;
438 fsyscall_gtod_data
.clk_mult
= c
->mult
;
439 fsyscall_gtod_data
.clk_shift
= c
->shift
;
440 fsyscall_gtod_data
.clk_fsys_mmio
= c
->fsys_mmio
;
441 fsyscall_gtod_data
.clk_cycle_last
= c
->cycle_last
;
443 /* copy kernel time structures */
444 fsyscall_gtod_data
.wall_time
.tv_sec
= wall
->tv_sec
;
445 fsyscall_gtod_data
.wall_time
.tv_nsec
= wall
->tv_nsec
;
446 fsyscall_gtod_data
.monotonic_time
.tv_sec
= wall_to_monotonic
.tv_sec
448 fsyscall_gtod_data
.monotonic_time
.tv_nsec
= wall_to_monotonic
.tv_nsec
452 while (fsyscall_gtod_data
.monotonic_time
.tv_nsec
>= NSEC_PER_SEC
) {
453 fsyscall_gtod_data
.monotonic_time
.tv_nsec
-= NSEC_PER_SEC
;
454 fsyscall_gtod_data
.monotonic_time
.tv_sec
++;
457 write_sequnlock_irqrestore(&fsyscall_gtod_data
.lock
, flags
);