]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/ia64/kernel/time.c | |
3 | * | |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
5 | * Stephane Eranian <eranian@hpl.hp.com> | |
6 | * David Mosberger <davidm@hpl.hp.com> | |
7 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
8 | * Copyright (C) 1999-2000 VA Linux Systems | |
9 | * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> | |
10 | */ | |
1da177e4 LT |
11 | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/profile.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/time.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/efi.h> | |
1da177e4 | 21 | #include <linux/timex.h> |
189374ae | 22 | #include <linux/timekeeper_internal.h> |
5e3fd9e5 | 23 | #include <linux/platform_device.h> |
fb8b049c | 24 | #include <linux/cputime.h> |
1da177e4 LT |
25 | |
26 | #include <asm/machvec.h> | |
27 | #include <asm/delay.h> | |
28 | #include <asm/hw_irq.h> | |
29 | #include <asm/ptrace.h> | |
30 | #include <asm/sal.h> | |
31 | #include <asm/sections.h> | |
1da177e4 | 32 | |
0aa366f3 TL |
33 | #include "fsyscall_gtod_data.h" |
34 | ||
a5a1d1c2 | 35 | static u64 itc_get_cycles(struct clocksource *cs); |
0aa366f3 | 36 | |
74a622be | 37 | struct fsyscall_gtod_data_t fsyscall_gtod_data; |
0aa366f3 TL |
38 | |
39 | struct itc_jitter_data_t itc_jitter_data; | |
40 | ||
ff741906 | 41 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
1da177e4 LT |
42 | |
43 | #ifdef CONFIG_IA64_DEBUG_IRQ | |
44 | ||
45 | unsigned long last_cli_ip; | |
46 | EXPORT_SYMBOL(last_cli_ip); | |
47 | ||
48 | #endif | |
49 | ||
0aa366f3 | 50 | static struct clocksource clocksource_itc = { |
3eb05676 LZ |
51 | .name = "itc", |
52 | .rating = 350, | |
53 | .read = itc_get_cycles, | |
54 | .mask = CLOCKSOURCE_MASK(64), | |
3eb05676 | 55 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
1da177e4 | 56 | }; |
0aa366f3 | 57 | static struct clocksource *itc_clocksource; |
1da177e4 | 58 | |
abf917cd | 59 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
b64f34cd HS |
60 | |
61 | #include <linux/kernel_stat.h> | |
62 | ||
e2339a4c | 63 | extern u64 cycle_to_nsec(u64 cyc); |
b64f34cd | 64 | |
c8d7dabf | 65 | void vtime_flush(struct task_struct *tsk) |
5bf412cd | 66 | { |
5bf412cd | 67 | struct thread_info *ti = task_thread_info(tsk); |
fb8b049c | 68 | u64 delta; |
5bf412cd | 69 | |
7dd58230 | 70 | if (ti->utime) |
e2339a4c | 71 | account_user_time(tsk, cycle_to_nsec(ti->utime)); |
7dd58230 FW |
72 | |
73 | if (ti->gtime) | |
e2339a4c | 74 | account_guest_time(tsk, cycle_to_nsec(ti->gtime)); |
7dd58230 FW |
75 | |
76 | if (ti->idle_time) | |
e2339a4c | 77 | account_idle_time(cycle_to_nsec(ti->idle_time)); |
7dd58230 FW |
78 | |
79 | if (ti->stime) { | |
e2339a4c | 80 | delta = cycle_to_nsec(ti->stime); |
7dd58230 FW |
81 | account_system_index_time(tsk, delta, CPUTIME_SYSTEM); |
82 | } | |
83 | ||
84 | if (ti->hardirq_time) { | |
e2339a4c | 85 | delta = cycle_to_nsec(ti->hardirq_time); |
7dd58230 FW |
86 | account_system_index_time(tsk, delta, CPUTIME_IRQ); |
87 | } | |
88 | ||
89 | if (ti->softirq_time) { | |
e2339a4c | 90 | delta = cycle_to_nsec(ti->softirq_time)); |
7dd58230 | 91 | account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); |
5bf412cd | 92 | } |
7dd58230 FW |
93 | |
94 | ti->utime = 0; | |
95 | ti->gtime = 0; | |
96 | ti->idle_time = 0; | |
97 | ti->stime = 0; | |
98 | ti->hardirq_time = 0; | |
99 | ti->softirq_time = 0; | |
5bf412cd FW |
100 | } |
101 | ||
b64f34cd HS |
102 | /* |
103 | * Called from the context switch with interrupts disabled, to charge all | |
104 | * accumulated times to the current process, and to prepare accounting on | |
105 | * the next process. | |
106 | */ | |
e3942ba0 | 107 | void arch_vtime_task_switch(struct task_struct *prev) |
b64f34cd HS |
108 | { |
109 | struct thread_info *pi = task_thread_info(prev); | |
baa36046 | 110 | struct thread_info *ni = task_thread_info(current); |
b64f34cd | 111 | |
8388d214 | 112 | ni->ac_stamp = pi->ac_stamp; |
b64f34cd HS |
113 | ni->ac_stime = ni->ac_utime = 0; |
114 | } | |
115 | ||
116 | /* | |
117 | * Account time for a transition between system, hard irq or soft irq state. | |
118 | * Note that this function is called with interrupts enabled. | |
119 | */ | |
7dd58230 | 120 | static __u64 vtime_delta(struct task_struct *tsk) |
b64f34cd HS |
121 | { |
122 | struct thread_info *ti = task_thread_info(tsk); | |
7dd58230 | 123 | __u64 now, delta_stime; |
b64f34cd | 124 | |
1b2852b1 FW |
125 | WARN_ON_ONCE(!irqs_disabled()); |
126 | ||
b64f34cd | 127 | now = ia64_get_itc(); |
7dd58230 | 128 | delta_stime = now - ti->ac_stamp; |
b64f34cd HS |
129 | ti->ac_stamp = now; |
130 | ||
a7e1a9e3 FW |
131 | return delta_stime; |
132 | } | |
133 | ||
fd25b4c2 | 134 | void vtime_account_system(struct task_struct *tsk) |
a7e1a9e3 | 135 | { |
7dd58230 FW |
136 | struct thread_info *ti = task_thread_info(tsk); |
137 | __u64 stime = vtime_delta(tsk); | |
138 | ||
139 | if ((tsk->flags & PF_VCPU) && !irq_count()) | |
140 | ti->gtime += stime; | |
141 | else if (hardirq_count()) | |
142 | ti->hardirq_time += stime; | |
143 | else if (in_serving_softirq()) | |
144 | ti->softirq_time += stime; | |
145 | else | |
146 | ti->stime += stime; | |
a7e1a9e3 | 147 | } |
c11f11fc | 148 | EXPORT_SYMBOL_GPL(vtime_account_system); |
a7e1a9e3 | 149 | |
fd25b4c2 | 150 | void vtime_account_idle(struct task_struct *tsk) |
a7e1a9e3 | 151 | { |
7dd58230 FW |
152 | struct thread_info *ti = task_thread_info(tsk); |
153 | ||
154 | ti->idle_time += vtime_delta(tsk); | |
b64f34cd HS |
155 | } |
156 | ||
abf917cd | 157 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ |
b64f34cd | 158 | |
1da177e4 | 159 | static irqreturn_t |
7d12e780 | 160 | timer_interrupt (int irq, void *dev_id) |
1da177e4 LT |
161 | { |
162 | unsigned long new_itm; | |
163 | ||
e7d28253 | 164 | if (cpu_is_offline(smp_processor_id())) { |
1da177e4 LT |
165 | return IRQ_HANDLED; |
166 | } | |
167 | ||
7d12e780 | 168 | platform_timer_interrupt(irq, dev_id); |
1da177e4 LT |
169 | |
170 | new_itm = local_cpu_data->itm_next; | |
171 | ||
172 | if (!time_after(ia64_get_itc(), new_itm)) | |
173 | printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", | |
174 | ia64_get_itc(), new_itm); | |
175 | ||
7d12e780 | 176 | profile_tick(CPU_PROFILING); |
1da177e4 LT |
177 | |
178 | while (1) { | |
7d12e780 | 179 | update_process_times(user_mode(get_irq_regs())); |
1da177e4 LT |
180 | |
181 | new_itm += local_cpu_data->itm_delta; | |
182 | ||
1aabd67d TH |
183 | if (smp_processor_id() == time_keeper_id) |
184 | xtime_update(1); | |
185 | ||
186 | local_cpu_data->itm_next = new_itm; | |
1da177e4 LT |
187 | |
188 | if (time_after(new_itm, ia64_get_itc())) | |
189 | break; | |
accaddb2 JS |
190 | |
191 | /* | |
192 | * Allow IPIs to interrupt the timer loop. | |
193 | */ | |
194 | local_irq_enable(); | |
195 | local_irq_disable(); | |
1da177e4 LT |
196 | } |
197 | ||
198 | do { | |
199 | /* | |
200 | * If we're too close to the next clock tick for | |
201 | * comfort, we increase the safety margin by | |
202 | * intentionally dropping the next tick(s). We do NOT | |
203 | * update itm.next because that would force us to call | |
1aabd67d | 204 | * xtime_update() which in turn would let our clock run |
1da177e4 LT |
205 | * too fast (with the potentially devastating effect |
206 | * of losing monotony of time). | |
207 | */ | |
208 | while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) | |
209 | new_itm += local_cpu_data->itm_delta; | |
210 | ia64_set_itm(new_itm); | |
211 | /* double check, in case we got hit by a (slow) PMI: */ | |
212 | } while (time_after_eq(ia64_get_itc(), new_itm)); | |
213 | return IRQ_HANDLED; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Encapsulate access to the itm structure for SMP. | |
218 | */ | |
219 | void | |
220 | ia64_cpu_local_tick (void) | |
221 | { | |
222 | int cpu = smp_processor_id(); | |
223 | unsigned long shift = 0, delta; | |
224 | ||
225 | /* arrange for the cycle counter to generate a timer interrupt: */ | |
226 | ia64_set_itv(IA64_TIMER_VECTOR); | |
227 | ||
228 | delta = local_cpu_data->itm_delta; | |
229 | /* | |
230 | * Stagger the timer tick for each CPU so they don't occur all at (almost) the | |
231 | * same time: | |
232 | */ | |
233 | if (cpu) { | |
234 | unsigned long hi = 1UL << ia64_fls(cpu); | |
235 | shift = (2*(cpu - hi) + 1) * delta/hi/2; | |
236 | } | |
237 | local_cpu_data->itm_next = ia64_get_itc() + delta + shift; | |
238 | ia64_set_itm(local_cpu_data->itm_next); | |
239 | } | |
240 | ||
241 | static int nojitter; | |
242 | ||
243 | static int __init nojitter_setup(char *str) | |
244 | { | |
245 | nojitter = 1; | |
246 | printk("Jitter checking for ITC timers disabled\n"); | |
247 | return 1; | |
248 | } | |
249 | ||
250 | __setup("nojitter", nojitter_setup); | |
251 | ||
252 | ||
5b5e76e9 | 253 | void ia64_init_itm(void) |
1da177e4 LT |
254 | { |
255 | unsigned long platform_base_freq, itc_freq; | |
256 | struct pal_freq_ratio itc_ratio, proc_ratio; | |
257 | long status, platform_base_drift, itc_drift; | |
258 | ||
259 | /* | |
260 | * According to SAL v2.6, we need to use a SAL call to determine the platform base | |
261 | * frequency and then a PAL call to determine the frequency ratio between the ITC | |
262 | * and the base frequency. | |
263 | */ | |
264 | status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, | |
265 | &platform_base_freq, &platform_base_drift); | |
266 | if (status != 0) { | |
267 | printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); | |
268 | } else { | |
269 | status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); | |
270 | if (status != 0) | |
271 | printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); | |
272 | } | |
273 | if (status != 0) { | |
274 | /* invent "random" values */ | |
275 | printk(KERN_ERR | |
276 | "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); | |
277 | platform_base_freq = 100000000; | |
278 | platform_base_drift = -1; /* no drift info */ | |
279 | itc_ratio.num = 3; | |
280 | itc_ratio.den = 1; | |
281 | } | |
282 | if (platform_base_freq < 40000000) { | |
283 | printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", | |
284 | platform_base_freq); | |
285 | platform_base_freq = 75000000; | |
286 | platform_base_drift = -1; | |
287 | } | |
288 | if (!proc_ratio.den) | |
289 | proc_ratio.den = 1; /* avoid division by zero */ | |
290 | if (!itc_ratio.den) | |
291 | itc_ratio.den = 1; /* avoid division by zero */ | |
292 | ||
293 | itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; | |
294 | ||
295 | local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; | |
2ab9391d | 296 | printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " |
1da177e4 LT |
297 | "ITC freq=%lu.%03luMHz", smp_processor_id(), |
298 | platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, | |
299 | itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); | |
300 | ||
301 | if (platform_base_drift != -1) { | |
302 | itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; | |
303 | printk("+/-%ldppm\n", itc_drift); | |
304 | } else { | |
305 | itc_drift = -1; | |
306 | printk("\n"); | |
307 | } | |
308 | ||
309 | local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; | |
310 | local_cpu_data->itc_freq = itc_freq; | |
311 | local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; | |
312 | local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) | |
313 | + itc_freq/2)/itc_freq; | |
314 | ||
315 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { | |
1da177e4 LT |
316 | #ifdef CONFIG_SMP |
317 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. | |
318 | * Jitter compensation requires a cmpxchg which may limit | |
319 | * the scalability of the syscalls for retrieving time. | |
320 | * The ITC synchronization is usually successful to within a few | |
321 | * ITC ticks but this is not a sure thing. If you need to improve | |
322 | * timer performance in SMP situations then boot the kernel with the | |
323 | * "nojitter" option. However, doing so may result in time fluctuating (maybe | |
324 | * even going backward) if the ITC offsets between the individual CPUs | |
325 | * are too large. | |
326 | */ | |
0aa366f3 TL |
327 | if (!nojitter) |
328 | itc_jitter_data.itc_jitter = 1; | |
1da177e4 | 329 | #endif |
b718f91c CL |
330 | } else |
331 | /* | |
332 | * ITC is drifty and we have not synchronized the ITCs in smpboot.c. | |
333 | * ITC values may fluctuate significantly between processors. | |
334 | * Clock should not be used for hrtimers. Mark itc as only | |
335 | * useful for boot and testing. | |
336 | * | |
337 | * Note that jitter compensation is off! There is no point of | |
338 | * synchronizing ITCs since they may be large differentials | |
339 | * that change over time. | |
340 | * | |
341 | * The only way to fix this would be to repeatedly sync the | |
342 | * ITCs. Until that time we have to avoid ITC. | |
343 | */ | |
344 | clocksource_itc.rating = 50; | |
1da177e4 | 345 | |
00d21d82 IY |
346 | /* avoid softlock up message when cpu is unplug and plugged again. */ |
347 | touch_softlockup_watchdog(); | |
348 | ||
1da177e4 LT |
349 | /* Setup the CPU local timer tick */ |
350 | ia64_cpu_local_tick(); | |
0aa366f3 TL |
351 | |
352 | if (!itc_clocksource) { | |
d60c3041 JS |
353 | clocksource_register_hz(&clocksource_itc, |
354 | local_cpu_data->itc_freq); | |
0aa366f3 TL |
355 | itc_clocksource = &clocksource_itc; |
356 | } | |
1da177e4 LT |
357 | } |
358 | ||
a5a1d1c2 | 359 | static u64 itc_get_cycles(struct clocksource *cs) |
0aa366f3 | 360 | { |
e088a4ad | 361 | unsigned long lcycle, now, ret; |
0aa366f3 TL |
362 | |
363 | if (!itc_jitter_data.itc_jitter) | |
364 | return get_cycles(); | |
365 | ||
366 | lcycle = itc_jitter_data.itc_lastcycle; | |
367 | now = get_cycles(); | |
368 | if (lcycle && time_after(lcycle, now)) | |
369 | return lcycle; | |
370 | ||
371 | /* | |
372 | * Keep track of the last timer value returned. | |
373 | * In an SMP environment, you could lose out in contention of | |
374 | * cmpxchg. If so, your cmpxchg returns new value which the | |
375 | * winner of contention updated to. Use the new value instead. | |
376 | */ | |
377 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); | |
378 | if (unlikely(ret != lcycle)) | |
379 | return ret; | |
380 | ||
381 | return now; | |
382 | } | |
383 | ||
384 | ||
1da177e4 LT |
385 | static struct irqaction timer_irqaction = { |
386 | .handler = timer_interrupt, | |
2958a489 | 387 | .flags = IRQF_IRQPOLL, |
1da177e4 LT |
388 | .name = "timer" |
389 | }; | |
390 | ||
70f4f935 | 391 | void read_persistent_clock64(struct timespec64 *ts) |
6ffdc577 JS |
392 | { |
393 | efi_gettimeofday(ts); | |
394 | } | |
395 | ||
1da177e4 LT |
396 | void __init |
397 | time_init (void) | |
398 | { | |
399 | register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); | |
1da177e4 | 400 | ia64_init_itm(); |
1da177e4 | 401 | } |
f5899b5d | 402 | |
defbb2c9 | 403 | /* |
404 | * Generic udelay assumes that if preemption is allowed and the thread | |
405 | * migrates to another CPU, that the ITC values are synchronized across | |
406 | * all CPUs. | |
407 | */ | |
408 | static void | |
409 | ia64_itc_udelay (unsigned long usecs) | |
f5899b5d | 410 | { |
defbb2c9 | 411 | unsigned long start = ia64_get_itc(); |
412 | unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; | |
f5899b5d | 413 | |
defbb2c9 | 414 | while (time_before(ia64_get_itc(), end)) |
415 | cpu_relax(); | |
416 | } | |
f5899b5d | 417 | |
defbb2c9 | 418 | void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; |
f5899b5d | 419 | |
defbb2c9 | 420 | void |
421 | udelay (unsigned long usecs) | |
422 | { | |
423 | (*ia64_udelay)(usecs); | |
f5899b5d JH |
424 | } |
425 | EXPORT_SYMBOL(udelay); | |
d6e56a2a | 426 | |
2c622148 TB |
427 | /* IA64 doesn't cache the timezone */ |
428 | void update_vsyscall_tz(void) | |
429 | { | |
430 | } | |
431 | ||
70639421 | 432 | void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, |
a5a1d1c2 | 433 | struct clocksource *c, u32 mult, u64 cycle_last) |
0aa366f3 | 434 | { |
74a622be | 435 | write_seqcount_begin(&fsyscall_gtod_data.seq); |
0aa366f3 TL |
436 | |
437 | /* copy fsyscall clock data */ | |
438 | fsyscall_gtod_data.clk_mask = c->mask; | |
0696b711 | 439 | fsyscall_gtod_data.clk_mult = mult; |
0aa366f3 | 440 | fsyscall_gtod_data.clk_shift = c->shift; |
574c44fa | 441 | fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; |
4a0e6377 | 442 | fsyscall_gtod_data.clk_cycle_last = cycle_last; |
0aa366f3 TL |
443 | |
444 | /* copy kernel time structures */ | |
445 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; | |
446 | fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; | |
7615856e | 447 | fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec |
0aa366f3 | 448 | + wall->tv_sec; |
7615856e | 449 | fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec |
0aa366f3 TL |
450 | + wall->tv_nsec; |
451 | ||
452 | /* normalize */ | |
453 | while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { | |
454 | fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; | |
455 | fsyscall_gtod_data.monotonic_time.tv_sec++; | |
456 | } | |
457 | ||
74a622be | 458 | write_seqcount_end(&fsyscall_gtod_data.seq); |
0aa366f3 TL |
459 | } |
460 |