]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/char/mmtimer.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/geert/linux...
[mirror_ubuntu-zesty-kernel.git] / drivers / char / mmtimer.c
CommitLineData
1da177e4 1/*
76832c28 2 * Timer device implementation for SGI SN platforms.
1da177e4
LT
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
76832c28 8 * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved.
1da177e4
LT
9 *
10 * This driver exports an API that should be supportable by any HPET or IA-PC
11 * multimedia timer. The code below is currently specific to the SGI Altix
12 * SHub RTC, however.
13 *
14 * 11/01/01 - jbarnes - initial revision
15 * 9/10/04 - Christoph Lameter - remove interrupt support for kernel inclusion
16 * 10/1/04 - Christoph Lameter - provide posix clock CLOCK_SGI_CYCLE
17 * 10/13/04 - Christoph Lameter, Dimitri Sivanich - provide timer interrupt
18 * support via the posix timer interface
19 */
20
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/ioctl.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/mm.h>
4e950f6f 28#include <linux/fs.h>
1da177e4
LT
29#include <linux/mmtimer.h>
30#include <linux/miscdevice.h>
31#include <linux/posix-timers.h>
32#include <linux/interrupt.h>
f8bd2258
RZ
33#include <linux/time.h>
34#include <linux/math64.h>
613655fa 35#include <linux/mutex.h>
5a0e3ad6 36#include <linux/slab.h>
1da177e4
LT
37
38#include <asm/uaccess.h>
39#include <asm/sn/addrs.h>
40#include <asm/sn/intr.h>
41#include <asm/sn/shub_mmr.h>
42#include <asm/sn/nodepda.h>
43#include <asm/sn/shubio.h>
44
45MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
46MODULE_DESCRIPTION("SGI Altix RTC Timer");
47MODULE_LICENSE("GPL");
48
49/* name of the device, usually in /dev */
50#define MMTIMER_NAME "mmtimer"
51#define MMTIMER_DESC "SGI Altix RTC Timer"
76832c28 52#define MMTIMER_VERSION "2.1"
1da177e4
LT
53
54#define RTC_BITS 55 /* 55 bits for this implementation */
55
56extern unsigned long sn_rtc_cycles_per_second;
57
58#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
59
60#define rtc_time() (*RTC_COUNTER_ADDR)
61
613655fa 62static DEFINE_MUTEX(mmtimer_mutex);
4cddb886
AC
63static long mmtimer_ioctl(struct file *file, unsigned int cmd,
64 unsigned long arg);
1da177e4
LT
65static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
66
67/*
68 * Period in femtoseconds (10^-15 s)
69 */
70static unsigned long mmtimer_femtoperiod = 0;
71
62322d25 72static const struct file_operations mmtimer_fops = {
4cddb886
AC
73 .owner = THIS_MODULE,
74 .mmap = mmtimer_mmap,
75 .unlocked_ioctl = mmtimer_ioctl,
6038f373 76 .llseek = noop_llseek,
1da177e4
LT
77};
78
79/*
80 * We only have comparison registers RTC1-4 currently available per
81 * node. RTC0 is used by SAL.
82 */
1da177e4 83/* Check for an RTC interrupt pending */
cbacdd95 84static int mmtimer_int_pending(int comparator)
1da177e4
LT
85{
86 if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
87 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
88 return 1;
89 else
90 return 0;
91}
cbacdd95 92
1da177e4 93/* Clear the RTC interrupt pending bit */
cbacdd95 94static void mmtimer_clr_int_pending(int comparator)
1da177e4
LT
95{
96 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
97 SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
98}
99
100/* Setup timer on comparator RTC1 */
cbacdd95 101static void mmtimer_setup_int_0(int cpu, u64 expires)
1da177e4
LT
102{
103 u64 val;
104
105 /* Disable interrupt */
106 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 0UL);
107
108 /* Initialize comparator value */
109 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), -1L);
110
111 /* Clear pending bit */
112 mmtimer_clr_int_pending(0);
113
114 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
cbacdd95 115 ((u64)cpu_physical_id(cpu) <<
1da177e4
LT
116 SH_RTC1_INT_CONFIG_PID_SHFT);
117
118 /* Set configuration */
119 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_CONFIG), val);
120
121 /* Enable RTC interrupts */
122 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE), 1UL);
123
124 /* Initialize comparator value */
125 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPB), expires);
126
127
128}
129
130/* Setup timer on comparator RTC2 */
cbacdd95 131static void mmtimer_setup_int_1(int cpu, u64 expires)
1da177e4
LT
132{
133 u64 val;
134
135 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 0UL);
136
137 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), -1L);
138
139 mmtimer_clr_int_pending(1);
140
141 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
cbacdd95 142 ((u64)cpu_physical_id(cpu) <<
1da177e4
LT
143 SH_RTC2_INT_CONFIG_PID_SHFT);
144
145 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
146
147 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE), 1UL);
148
149 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPC), expires);
150}
151
152/* Setup timer on comparator RTC3 */
cbacdd95 153static void mmtimer_setup_int_2(int cpu, u64 expires)
1da177e4
LT
154{
155 u64 val;
156
157 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 0UL);
158
159 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), -1L);
160
161 mmtimer_clr_int_pending(2);
162
163 val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
cbacdd95 164 ((u64)cpu_physical_id(cpu) <<
1da177e4
LT
165 SH_RTC3_INT_CONFIG_PID_SHFT);
166
167 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
168
169 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE), 1UL);
170
171 HUB_S((u64 *)LOCAL_MMR_ADDR(SH_INT_CMPD), expires);
172}
173
174/*
175 * This function must be called with interrupts disabled and preemption off
176 * in order to insure that the setup succeeds in a deterministic time frame.
177 * It will check if the interrupt setup succeeded.
178 */
cbacdd95 179static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
1da177e4
LT
180{
181
182 switch (comparator) {
183 case 0:
cbacdd95 184 mmtimer_setup_int_0(cpu, expires);
1da177e4
LT
185 break;
186 case 1:
cbacdd95 187 mmtimer_setup_int_1(cpu, expires);
1da177e4
LT
188 break;
189 case 2:
cbacdd95 190 mmtimer_setup_int_2(cpu, expires);
1da177e4
LT
191 break;
192 }
193 /* We might've missed our expiration time */
cbacdd95 194 if (rtc_time() <= expires)
1da177e4
LT
195 return 1;
196
197 /*
198 * If an interrupt is already pending then its okay
199 * if not then we failed
200 */
201 return mmtimer_int_pending(comparator);
202}
203
cbacdd95 204static int mmtimer_disable_int(long nasid, int comparator)
1da177e4
LT
205{
206 switch (comparator) {
207 case 0:
208 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC1_INT_ENABLE),
209 0UL) : REMOTE_HUB_S(nasid, SH_RTC1_INT_ENABLE, 0UL);
210 break;
211 case 1:
212 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_ENABLE),
213 0UL) : REMOTE_HUB_S(nasid, SH_RTC2_INT_ENABLE, 0UL);
214 break;
215 case 2:
216 nasid == -1 ? HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_ENABLE),
217 0UL) : REMOTE_HUB_S(nasid, SH_RTC3_INT_ENABLE, 0UL);
218 break;
219 default:
220 return -EFAULT;
221 }
222 return 0;
223}
224
cbacdd95 225#define COMPARATOR 1 /* The comparator to use */
1da177e4 226
cbacdd95
DS
227#define TIMER_OFF 0xbadcabLL /* Timer is not setup */
228#define TIMER_SET 0 /* Comparator is set for this timer */
229
230/* There is one of these for each timer */
231struct mmtimer {
232 struct rb_node list;
1da177e4 233 struct k_itimer *timer;
1da177e4 234 int cpu;
cbacdd95
DS
235};
236
237struct mmtimer_node {
238 spinlock_t lock ____cacheline_aligned;
239 struct rb_root timer_head;
240 struct rb_node *next;
1da177e4 241 struct tasklet_struct tasklet;
cbacdd95
DS
242};
243static struct mmtimer_node *timers;
244
245
246/*
247 * Add a new mmtimer struct to the node's mmtimer list.
248 * This function assumes the struct mmtimer_node is locked.
249 */
250static void mmtimer_add_list(struct mmtimer *n)
251{
252 int nodeid = n->timer->it.mmtimer.node;
253 unsigned long expires = n->timer->it.mmtimer.expires;
254 struct rb_node **link = &timers[nodeid].timer_head.rb_node;
255 struct rb_node *parent = NULL;
256 struct mmtimer *x;
257
258 /*
259 * Find the right place in the rbtree:
260 */
261 while (*link) {
262 parent = *link;
263 x = rb_entry(parent, struct mmtimer, list);
264
265 if (expires < x->timer->it.mmtimer.expires)
266 link = &(*link)->rb_left;
267 else
268 link = &(*link)->rb_right;
269 }
270
271 /*
272 * Insert the timer to the rbtree and check whether it
273 * replaces the first pending timer
274 */
275 rb_link_node(&n->list, parent, link);
276 rb_insert_color(&n->list, &timers[nodeid].timer_head);
277
278 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
279 struct mmtimer, list)->timer->it.mmtimer.expires)
280 timers[nodeid].next = &n->list;
281}
282
283/*
284 * Set the comparator for the next timer.
285 * This function assumes the struct mmtimer_node is locked.
286 */
287static void mmtimer_set_next_timer(int nodeid)
288{
289 struct mmtimer_node *n = &timers[nodeid];
290 struct mmtimer *x;
291 struct k_itimer *t;
292 int o;
293
294restart:
295 if (n->next == NULL)
296 return;
1da177e4 297
cbacdd95
DS
298 x = rb_entry(n->next, struct mmtimer, list);
299 t = x->timer;
300 if (!t->it.mmtimer.incr) {
301 /* Not an interval timer */
302 if (!mmtimer_setup(x->cpu, COMPARATOR,
303 t->it.mmtimer.expires)) {
304 /* Late setup, fire now */
305 tasklet_schedule(&n->tasklet);
306 }
307 return;
308 }
309
310 /* Interval timer */
311 o = 0;
312 while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
313 unsigned long e, e1;
314 struct rb_node *next;
315 t->it.mmtimer.expires += t->it.mmtimer.incr << o;
316 t->it_overrun += 1 << o;
317 o++;
318 if (o > 20) {
319 printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
320 t->it.mmtimer.clock = TIMER_OFF;
321 n->next = rb_next(&x->list);
322 rb_erase(&x->list, &n->timer_head);
323 kfree(x);
324 goto restart;
325 }
326
327 e = t->it.mmtimer.expires;
328 next = rb_next(&x->list);
329
330 if (next == NULL)
331 continue;
332
333 e1 = rb_entry(next, struct mmtimer, list)->
334 timer->it.mmtimer.expires;
335 if (e > e1) {
336 n->next = next;
337 rb_erase(&x->list, &n->timer_head);
338 mmtimer_add_list(x);
339 goto restart;
340 }
341 }
342}
1da177e4
LT
343
344/**
345 * mmtimer_ioctl - ioctl interface for /dev/mmtimer
1da177e4
LT
346 * @file: file structure for the device
347 * @cmd: command to execute
348 * @arg: optional argument to command
349 *
350 * Executes the command specified by @cmd. Returns 0 for success, < 0 for
351 * failure.
352 *
353 * Valid commands:
354 *
355 * %MMTIMER_GETOFFSET - Should return the offset (relative to the start
356 * of the page where the registers are mapped) for the counter in question.
357 *
358 * %MMTIMER_GETRES - Returns the resolution of the clock in femto (10^-15)
359 * seconds
360 *
361 * %MMTIMER_GETFREQ - Copies the frequency of the clock in Hz to the address
362 * specified by @arg
363 *
364 * %MMTIMER_GETBITS - Returns the number of bits in the clock's counter
365 *
366 * %MMTIMER_MMAPAVAIL - Returns 1 if the registers can be mmap'd into userspace
367 *
368 * %MMTIMER_GETCOUNTER - Gets the current value in the counter and places it
369 * in the address specified by @arg.
370 */
4cddb886
AC
371static long mmtimer_ioctl(struct file *file, unsigned int cmd,
372 unsigned long arg)
1da177e4
LT
373{
374 int ret = 0;
375
613655fa 376 mutex_lock(&mmtimer_mutex);
4cddb886 377
1da177e4
LT
378 switch (cmd) {
379 case MMTIMER_GETOFFSET: /* offset of the counter */
380 /*
381 * SN RTC registers are on their own 64k page
382 */
383 if(PAGE_SIZE <= (1 << 16))
384 ret = (((long)RTC_COUNTER_ADDR) & (PAGE_SIZE-1)) / 8;
385 else
386 ret = -ENOSYS;
387 break;
388
389 case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
390 if(copy_to_user((unsigned long __user *)arg,
391 &mmtimer_femtoperiod, sizeof(unsigned long)))
4cddb886 392 ret = -EFAULT;
1da177e4
LT
393 break;
394
395 case MMTIMER_GETFREQ: /* frequency in Hz */
396 if(copy_to_user((unsigned long __user *)arg,
397 &sn_rtc_cycles_per_second,
398 sizeof(unsigned long)))
4cddb886 399 ret = -EFAULT;
1da177e4
LT
400 break;
401
402 case MMTIMER_GETBITS: /* number of bits in the clock */
403 ret = RTC_BITS;
404 break;
405
406 case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */
407 ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
408 break;
409
410 case MMTIMER_GETCOUNTER:
411 if(copy_to_user((unsigned long __user *)arg,
412 RTC_COUNTER_ADDR, sizeof(unsigned long)))
4cddb886 413 ret = -EFAULT;
1da177e4
LT
414 break;
415 default:
4cddb886 416 ret = -ENOTTY;
1da177e4
LT
417 break;
418 }
613655fa 419 mutex_unlock(&mmtimer_mutex);
1da177e4
LT
420 return ret;
421}
422
423/**
424 * mmtimer_mmap - maps the clock's registers into userspace
425 * @file: file structure for the device
426 * @vma: VMA to map the registers into
427 *
428 * Calls remap_pfn_range() to map the clock's registers into
429 * the calling process' address space.
430 */
431static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
432{
433 unsigned long mmtimer_addr;
434
435 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
436 return -EINVAL;
437
438 if (vma->vm_flags & VM_WRITE)
439 return -EPERM;
440
441 if (PAGE_SIZE > (1 << 16))
442 return -ENOSYS;
443
1da177e4
LT
444 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
445
446 mmtimer_addr = __pa(RTC_COUNTER_ADDR);
447 mmtimer_addr &= ~(PAGE_SIZE - 1);
448 mmtimer_addr &= 0xfffffffffffffffUL;
449
450 if (remap_pfn_range(vma, vma->vm_start, mmtimer_addr >> PAGE_SHIFT,
451 PAGE_SIZE, vma->vm_page_prot)) {
452 printk(KERN_ERR "remap_pfn_range failed in mmtimer.c\n");
453 return -EAGAIN;
454 }
455
456 return 0;
457}
458
459static struct miscdevice mmtimer_miscdev = {
460 SGI_MMTIMER,
461 MMTIMER_NAME,
462 &mmtimer_fops
463};
464
465static struct timespec sgi_clock_offset;
466static int sgi_clock_period;
467
468/*
469 * Posix Timer Interface
470 */
471
472static struct timespec sgi_clock_offset;
473static int sgi_clock_period;
474
475static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
476{
477 u64 nsec;
478
479 nsec = rtc_time() * sgi_clock_period
480 + sgi_clock_offset.tv_nsec;
f8bd2258
RZ
481 *tp = ns_to_timespec(nsec);
482 tp->tv_sec += sgi_clock_offset.tv_sec;
1da177e4
LT
483 return 0;
484};
485
486static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
487{
488
489 u64 nsec;
f8bd2258 490 u32 rem;
1da177e4
LT
491
492 nsec = rtc_time() * sgi_clock_period;
493
f8bd2258 494 sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
1da177e4
LT
495
496 if (rem <= tp->tv_nsec)
497 sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
498 else {
499 sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem;
500 sgi_clock_offset.tv_sec--;
501 }
502 return 0;
503}
504
1da177e4
LT
505/**
506 * mmtimer_interrupt - timer interrupt handler
507 * @irq: irq received
508 * @dev_id: device the irq came from
1da177e4
LT
509 *
510 * Called when one of the comarators matches the counter, This
511 * routine will send signals to processes that have requested
512 * them.
513 *
514 * This interrupt is run in an interrupt context
515 * by the SHUB. It is therefore safe to locally access SHub
516 * registers.
517 */
518static irqreturn_t
7d12e780 519mmtimer_interrupt(int irq, void *dev_id)
1da177e4 520{
1da177e4
LT
521 unsigned long expires = 0;
522 int result = IRQ_NONE;
76832c28 523 unsigned indx = cpu_to_node(smp_processor_id());
cbacdd95 524 struct mmtimer *base;
1da177e4 525
cbacdd95
DS
526 spin_lock(&timers[indx].lock);
527 base = rb_entry(timers[indx].next, struct mmtimer, list);
528 if (base == NULL) {
529 spin_unlock(&timers[indx].lock);
530 return result;
531 }
532
533 if (base->cpu == smp_processor_id()) {
534 if (base->timer)
535 expires = base->timer->it.mmtimer.expires;
536 /* expires test won't work with shared irqs */
537 if ((mmtimer_int_pending(COMPARATOR) > 0) ||
538 (expires && (expires <= rtc_time()))) {
539 mmtimer_clr_int_pending(COMPARATOR);
540 tasklet_schedule(&timers[indx].tasklet);
541 result = IRQ_HANDLED;
1da177e4 542 }
1da177e4 543 }
cbacdd95 544 spin_unlock(&timers[indx].lock);
1da177e4
LT
545 return result;
546}
547
cbacdd95
DS
548static void mmtimer_tasklet(unsigned long data)
549{
550 int nodeid = data;
551 struct mmtimer_node *mn = &timers[nodeid];
0fbcae22 552 struct mmtimer *x;
cbacdd95 553 struct k_itimer *t;
1da177e4
LT
554 unsigned long flags;
555
1da177e4 556 /* Send signal and deal with periodic signals */
cbacdd95
DS
557 spin_lock_irqsave(&mn->lock, flags);
558 if (!mn->next)
1da177e4 559 goto out;
1da177e4 560
cbacdd95
DS
561 x = rb_entry(mn->next, struct mmtimer, list);
562 t = x->timer;
563
564 if (t->it.mmtimer.clock == TIMER_OFF)
565 goto out;
566
567 t->it_overrun = 0;
1da177e4 568
cbacdd95
DS
569 mn->next = rb_next(&x->list);
570 rb_erase(&x->list, &mn->timer_head);
1da177e4 571
cbacdd95 572 if (posix_timer_event(t, 0) != 0)
1da177e4 573 t->it_overrun++;
cbacdd95 574
1da177e4 575 if(t->it.mmtimer.incr) {
cbacdd95
DS
576 t->it.mmtimer.expires += t->it.mmtimer.incr;
577 mmtimer_add_list(x);
1da177e4
LT
578 } else {
579 /* Ensure we don't false trigger in mmtimer_interrupt */
cbacdd95 580 t->it.mmtimer.clock = TIMER_OFF;
1da177e4 581 t->it.mmtimer.expires = 0;
cbacdd95 582 kfree(x);
1da177e4 583 }
cbacdd95
DS
584 /* Set comparator for next timer, if there is one */
585 mmtimer_set_next_timer(nodeid);
586
1da177e4
LT
587 t->it_overrun_last = t->it_overrun;
588out:
cbacdd95 589 spin_unlock_irqrestore(&mn->lock, flags);
1da177e4
LT
590}
591
592static int sgi_timer_create(struct k_itimer *timer)
593{
594 /* Insure that a newly created timer is off */
595 timer->it.mmtimer.clock = TIMER_OFF;
596 return 0;
597}
598
599/* This does not really delete a timer. It just insures
600 * that the timer is not active
601 *
602 * Assumption: it_lock is already held with irq's disabled
603 */
604static int sgi_timer_del(struct k_itimer *timr)
605{
1da177e4 606 cnodeid_t nodeid = timr->it.mmtimer.node;
1da177e4
LT
607 unsigned long irqflags;
608
cbacdd95
DS
609 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
610 if (timr->it.mmtimer.clock != TIMER_OFF) {
611 unsigned long expires = timr->it.mmtimer.expires;
612 struct rb_node *n = timers[nodeid].timer_head.rb_node;
613 struct mmtimer *uninitialized_var(t);
614 int r = 0;
615
1da177e4
LT
616 timr->it.mmtimer.clock = TIMER_OFF;
617 timr->it.mmtimer.expires = 0;
cbacdd95
DS
618
619 while (n) {
620 t = rb_entry(n, struct mmtimer, list);
621 if (t->timer == timr)
622 break;
623
624 if (expires < t->timer->it.mmtimer.expires)
625 n = n->rb_left;
626 else
627 n = n->rb_right;
628 }
629
630 if (!n) {
631 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
632 return 0;
633 }
634
635 if (timers[nodeid].next == n) {
636 timers[nodeid].next = rb_next(n);
637 r = 1;
638 }
639
640 rb_erase(n, &timers[nodeid].timer_head);
641 kfree(t);
642
643 if (r) {
644 mmtimer_disable_int(cnodeid_to_nasid(nodeid),
645 COMPARATOR);
646 mmtimer_set_next_timer(nodeid);
647 }
1da177e4 648 }
cbacdd95 649 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
1da177e4
LT
650 return 0;
651}
652
1da177e4
LT
653/* Assumption: it_lock is already held with irq's disabled */
654static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
655{
656
657 if (timr->it.mmtimer.clock == TIMER_OFF) {
658 cur_setting->it_interval.tv_nsec = 0;
659 cur_setting->it_interval.tv_sec = 0;
660 cur_setting->it_value.tv_nsec = 0;
661 cur_setting->it_value.tv_sec =0;
662 return;
663 }
664
f8bd2258
RZ
665 cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
666 cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
1da177e4
LT
667}
668
669
670static int sgi_timer_set(struct k_itimer *timr, int flags,
671 struct itimerspec * new_setting,
672 struct itimerspec * old_setting)
673{
1da177e4
LT
674 unsigned long when, period, irqflags;
675 int err = 0;
676 cnodeid_t nodeid;
cbacdd95
DS
677 struct mmtimer *base;
678 struct rb_node *n;
1da177e4
LT
679
680 if (old_setting)
681 sgi_timer_get(timr, old_setting);
682
683 sgi_timer_del(timr);
f8bd2258
RZ
684 when = timespec_to_ns(&new_setting->it_value);
685 period = timespec_to_ns(&new_setting->it_interval);
1da177e4
LT
686
687 if (when == 0)
688 /* Clear timer */
689 return 0;
690
cbacdd95
DS
691 base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
692 if (base == NULL)
693 return -ENOMEM;
694
1da177e4
LT
695 if (flags & TIMER_ABSTIME) {
696 struct timespec n;
697 unsigned long now;
698
699 getnstimeofday(&n);
f8bd2258 700 now = timespec_to_ns(&n);
1da177e4
LT
701 if (when > now)
702 when -= now;
703 else
704 /* Fire the timer immediately */
705 when = 0;
706 }
707
708 /*
709 * Convert to sgi clock period. Need to keep rtc_time() as near as possible
710 * to getnstimeofday() in order to be as faithful as possible to the time
711 * specified.
712 */
713 when = (when + sgi_clock_period - 1) / sgi_clock_period + rtc_time();
714 period = (period + sgi_clock_period - 1) / sgi_clock_period;
715
716 /*
717 * We are allocating a local SHub comparator. If we would be moved to another
718 * cpu then another SHub may be local to us. Prohibit that by switching off
719 * preemption.
720 */
721 preempt_disable();
722
55642d36 723 nodeid = cpu_to_node(smp_processor_id());
1da177e4 724
cbacdd95
DS
725 /* Lock the node timer structure */
726 spin_lock_irqsave(&timers[nodeid].lock, irqflags);
1da177e4 727
76832c28
DS
728 base->timer = timr;
729 base->cpu = smp_processor_id();
1da177e4 730
cbacdd95 731 timr->it.mmtimer.clock = TIMER_SET;
1da177e4
LT
732 timr->it.mmtimer.node = nodeid;
733 timr->it.mmtimer.incr = period;
734 timr->it.mmtimer.expires = when;
735
cbacdd95
DS
736 n = timers[nodeid].next;
737
738 /* Add the new struct mmtimer to node's timer list */
739 mmtimer_add_list(base);
740
741 if (timers[nodeid].next == n) {
742 /* No need to reprogram comparator for now */
743 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
744 preempt_enable();
745 return err;
1da177e4
LT
746 }
747
cbacdd95
DS
748 /* We need to reprogram the comparator */
749 if (n)
750 mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
751
752 mmtimer_set_next_timer(nodeid);
753
754 /* Unlock the node timer structure */
755 spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
1da177e4
LT
756
757 preempt_enable();
758
759 return err;
760}
761
762static struct k_clock sgi_clock = {
763 .res = 0,
764 .clock_set = sgi_clock_set,
765 .clock_get = sgi_clock_get,
766 .timer_create = sgi_timer_create,
767 .nsleep = do_posix_clock_nonanosleep,
768 .timer_set = sgi_timer_set,
769 .timer_del = sgi_timer_del,
770 .timer_get = sgi_timer_get
771};
772
773/**
774 * mmtimer_init - device initialization routine
775 *
776 * Does initial setup for the mmtimer device.
777 */
778static int __init mmtimer_init(void)
779{
76832c28 780 cnodeid_t node, maxn = -1;
1da177e4
LT
781
782 if (!ia64_platform_is("sn2"))
f032f908 783 return 0;
1da177e4
LT
784
785 /*
786 * Sanity check the cycles/sec variable
787 */
788 if (sn_rtc_cycles_per_second < 100000) {
789 printk(KERN_ERR "%s: unable to determine clock frequency\n",
790 MMTIMER_NAME);
5d469ec0 791 goto out1;
1da177e4
LT
792 }
793
794 mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
795 2) / sn_rtc_cycles_per_second;
796
0f2ed4c6 797 if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
1da177e4
LT
798 printk(KERN_WARNING "%s: unable to allocate interrupt.",
799 MMTIMER_NAME);
5d469ec0 800 goto out1;
1da177e4
LT
801 }
802
1da177e4
LT
803 if (misc_register(&mmtimer_miscdev)) {
804 printk(KERN_ERR "%s: failed to register device\n",
805 MMTIMER_NAME);
5d469ec0 806 goto out2;
1da177e4
LT
807 }
808
76832c28
DS
809 /* Get max numbered node, calculate slots needed */
810 for_each_online_node(node) {
811 maxn = node;
812 }
813 maxn++;
814
815 /* Allocate list of node ptrs to mmtimer_t's */
cbacdd95 816 timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
76832c28
DS
817 if (timers == NULL) {
818 printk(KERN_ERR "%s: failed to allocate memory for device\n",
819 MMTIMER_NAME);
5d469ec0 820 goto out3;
76832c28
DS
821 }
822
cbacdd95 823 /* Initialize struct mmtimer's for each online node */
76832c28 824 for_each_online_node(node) {
cbacdd95
DS
825 spin_lock_init(&timers[node].lock);
826 tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
827 (unsigned long) node);
76832c28
DS
828 }
829
1da177e4
LT
830 sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
831 register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
832
833 printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
834 sn_rtc_cycles_per_second/(unsigned long)1E6);
835
836 return 0;
5d469ec0 837
5d469ec0 838out3:
cbacdd95 839 kfree(timers);
5d469ec0
NH
840 misc_deregister(&mmtimer_miscdev);
841out2:
842 free_irq(SGI_MMTIMER_VECTOR, NULL);
843out1:
844 return -1;
1da177e4
LT
845}
846
847module_init(mmtimer_init);