2 * Intel & MS High Precision Event Timer Implementation.
4 * Copyright (C) 2003 Intel Corporation
6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
7 * Bob Picco <robert.picco@hp.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/config.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/miscdevice.h>
20 #include <linux/major.h>
21 #include <linux/ioport.h>
22 #include <linux/fcntl.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <linux/proc_fs.h>
26 #include <linux/spinlock.h>
27 #include <linux/sysctl.h>
28 #include <linux/wait.h>
29 #include <linux/bcd.h>
30 #include <linux/seq_file.h>
31 #include <linux/bitops.h>
33 #include <asm/current.h>
34 #include <asm/uaccess.h>
35 #include <asm/system.h>
38 #include <asm/div64.h>
40 #include <linux/acpi.h>
41 #include <acpi/acpi_bus.h>
42 #include <linux/hpet.h>
45 * The High Precision Event Timer driver.
46 * This driver is closely modelled after the rtc.c driver.
47 * http://www.intel.com/hardwaredesign/hpetspec.htm
49 #define HPET_USER_FREQ (64)
50 #define HPET_DRIFT (500)
52 #define HPET_RANGE_SIZE 1024 /* from HPET spec */
54 static u32 hpet_nhpet
, hpet_max_freq
= HPET_USER_FREQ
;
56 /* A lock for concurrent access by app and isr hpet activity. */
57 static DEFINE_SPINLOCK(hpet_lock
);
58 /* A lock for concurrent intermodule access to hpet and isr hpet activity. */
59 static DEFINE_SPINLOCK(hpet_task_lock
);
61 #define HPET_DEV_NAME (7)
64 struct hpets
*hd_hpets
;
65 struct hpet __iomem
*hd_hpet
;
66 struct hpet_timer __iomem
*hd_timer
;
67 unsigned long hd_ireqfreq
;
68 unsigned long hd_irqdata
;
69 wait_queue_head_t hd_waitqueue
;
70 struct fasync_struct
*hd_async_queue
;
71 struct hpet_task
*hd_task
;
72 unsigned int hd_flags
;
74 unsigned int hd_hdwirq
;
75 char hd_name
[HPET_DEV_NAME
];
79 struct hpets
*hp_next
;
80 struct hpet __iomem
*hp_hpet
;
81 unsigned long hp_hpet_phys
;
82 struct time_interpolator
*hp_interpolator
;
83 unsigned long long hp_tick_freq
;
84 unsigned long hp_delta
;
85 unsigned int hp_ntimer
;
86 unsigned int hp_which
;
87 struct hpet_dev hp_dev
[1];
90 static struct hpets
*hpets
;
92 #define HPET_OPEN 0x0001
93 #define HPET_IE 0x0002 /* interrupt enabled */
94 #define HPET_PERIODIC 0x0004
95 #define HPET_SHARED_IRQ 0x0008
97 #if BITS_PER_LONG == 64
98 #define write_counter(V, MC) writeq(V, MC)
99 #define read_counter(MC) readq(MC)
101 #define write_counter(V, MC) writel(V, MC)
102 #define read_counter(MC) readl(MC)
106 static inline unsigned long long readq(void __iomem
*addr
)
108 return readl(addr
) | (((unsigned long long)readl(addr
+ 4)) << 32LL);
113 static inline void writeq(unsigned long long v
, void __iomem
*addr
)
115 writel(v
& 0xffffffff, addr
);
116 writel(v
>> 32, addr
+ 4);
120 static irqreturn_t
hpet_interrupt(int irq
, void *data
, struct pt_regs
*regs
)
122 struct hpet_dev
*devp
;
126 isr
= 1 << (devp
- devp
->hd_hpets
->hp_dev
);
128 if ((devp
->hd_flags
& HPET_SHARED_IRQ
) &&
129 !(isr
& readl(&devp
->hd_hpet
->hpet_isr
)))
132 spin_lock(&hpet_lock
);
136 * For non-periodic timers, increment the accumulator.
137 * This has the effect of treating non-periodic like periodic.
139 if ((devp
->hd_flags
& (HPET_IE
| HPET_PERIODIC
)) == HPET_IE
) {
142 t
= devp
->hd_ireqfreq
;
143 m
= read_counter(&devp
->hd_hpet
->hpet_mc
);
144 write_counter(t
+ m
+ devp
->hd_hpets
->hp_delta
,
145 &devp
->hd_timer
->hpet_compare
);
148 if (devp
->hd_flags
& HPET_SHARED_IRQ
)
149 writel(isr
, &devp
->hd_hpet
->hpet_isr
);
150 spin_unlock(&hpet_lock
);
152 spin_lock(&hpet_task_lock
);
154 devp
->hd_task
->ht_func(devp
->hd_task
->ht_data
);
155 spin_unlock(&hpet_task_lock
);
157 wake_up_interruptible(&devp
->hd_waitqueue
);
159 kill_fasync(&devp
->hd_async_queue
, SIGIO
, POLL_IN
);
164 static int hpet_open(struct inode
*inode
, struct file
*file
)
166 struct hpet_dev
*devp
;
170 if (file
->f_mode
& FMODE_WRITE
)
173 spin_lock_irq(&hpet_lock
);
175 for (devp
= NULL
, hpetp
= hpets
; hpetp
&& !devp
; hpetp
= hpetp
->hp_next
)
176 for (i
= 0; i
< hpetp
->hp_ntimer
; i
++)
177 if (hpetp
->hp_dev
[i
].hd_flags
& HPET_OPEN
178 || hpetp
->hp_dev
[i
].hd_task
)
181 devp
= &hpetp
->hp_dev
[i
];
186 spin_unlock_irq(&hpet_lock
);
190 file
->private_data
= devp
;
191 devp
->hd_irqdata
= 0;
192 devp
->hd_flags
|= HPET_OPEN
;
193 spin_unlock_irq(&hpet_lock
);
199 hpet_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
* ppos
)
201 DECLARE_WAITQUEUE(wait
, current
);
204 struct hpet_dev
*devp
;
206 devp
= file
->private_data
;
207 if (!devp
->hd_ireqfreq
)
210 if (count
< sizeof(unsigned long))
213 add_wait_queue(&devp
->hd_waitqueue
, &wait
);
216 set_current_state(TASK_INTERRUPTIBLE
);
218 spin_lock_irq(&hpet_lock
);
219 data
= devp
->hd_irqdata
;
220 devp
->hd_irqdata
= 0;
221 spin_unlock_irq(&hpet_lock
);
225 else if (file
->f_flags
& O_NONBLOCK
) {
228 } else if (signal_pending(current
)) {
229 retval
= -ERESTARTSYS
;
235 retval
= put_user(data
, (unsigned long __user
*)buf
);
237 retval
= sizeof(unsigned long);
239 __set_current_state(TASK_RUNNING
);
240 remove_wait_queue(&devp
->hd_waitqueue
, &wait
);
245 static unsigned int hpet_poll(struct file
*file
, poll_table
* wait
)
248 struct hpet_dev
*devp
;
250 devp
= file
->private_data
;
252 if (!devp
->hd_ireqfreq
)
255 poll_wait(file
, &devp
->hd_waitqueue
, wait
);
257 spin_lock_irq(&hpet_lock
);
258 v
= devp
->hd_irqdata
;
259 spin_unlock_irq(&hpet_lock
);
262 return POLLIN
| POLLRDNORM
;
267 static int hpet_mmap(struct file
*file
, struct vm_area_struct
*vma
)
269 #ifdef CONFIG_HPET_MMAP
270 struct hpet_dev
*devp
;
273 if (((vma
->vm_end
- vma
->vm_start
) != PAGE_SIZE
) || vma
->vm_pgoff
)
276 devp
= file
->private_data
;
277 addr
= devp
->hd_hpets
->hp_hpet_phys
;
279 if (addr
& (PAGE_SIZE
- 1))
282 vma
->vm_flags
|= VM_IO
;
283 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
285 if (io_remap_pfn_range(vma
, vma
->vm_start
, addr
>> PAGE_SHIFT
,
286 PAGE_SIZE
, vma
->vm_page_prot
)) {
287 printk(KERN_ERR
"%s: io_remap_pfn_range failed\n",
298 static int hpet_fasync(int fd
, struct file
*file
, int on
)
300 struct hpet_dev
*devp
;
302 devp
= file
->private_data
;
304 if (fasync_helper(fd
, file
, on
, &devp
->hd_async_queue
) >= 0)
310 static int hpet_release(struct inode
*inode
, struct file
*file
)
312 struct hpet_dev
*devp
;
313 struct hpet_timer __iomem
*timer
;
316 devp
= file
->private_data
;
317 timer
= devp
->hd_timer
;
319 spin_lock_irq(&hpet_lock
);
321 writeq((readq(&timer
->hpet_config
) & ~Tn_INT_ENB_CNF_MASK
),
322 &timer
->hpet_config
);
327 devp
->hd_ireqfreq
= 0;
329 if (devp
->hd_flags
& HPET_PERIODIC
330 && readq(&timer
->hpet_config
) & Tn_TYPE_CNF_MASK
) {
333 v
= readq(&timer
->hpet_config
);
334 v
^= Tn_TYPE_CNF_MASK
;
335 writeq(v
, &timer
->hpet_config
);
338 devp
->hd_flags
&= ~(HPET_OPEN
| HPET_IE
| HPET_PERIODIC
);
339 spin_unlock_irq(&hpet_lock
);
344 if (file
->f_flags
& FASYNC
)
345 hpet_fasync(-1, file
, 0);
347 file
->private_data
= NULL
;
351 static int hpet_ioctl_common(struct hpet_dev
*, int, unsigned long, int);
354 hpet_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
,
357 struct hpet_dev
*devp
;
359 devp
= file
->private_data
;
360 return hpet_ioctl_common(devp
, cmd
, arg
, 0);
363 static int hpet_ioctl_ieon(struct hpet_dev
*devp
)
365 struct hpet_timer __iomem
*timer
;
366 struct hpet __iomem
*hpet
;
369 unsigned long g
, v
, t
, m
;
370 unsigned long flags
, isr
;
372 timer
= devp
->hd_timer
;
373 hpet
= devp
->hd_hpet
;
374 hpetp
= devp
->hd_hpets
;
376 if (!devp
->hd_ireqfreq
)
379 spin_lock_irq(&hpet_lock
);
381 if (devp
->hd_flags
& HPET_IE
) {
382 spin_unlock_irq(&hpet_lock
);
386 devp
->hd_flags
|= HPET_IE
;
388 if (readl(&timer
->hpet_config
) & Tn_INT_TYPE_CNF_MASK
)
389 devp
->hd_flags
|= HPET_SHARED_IRQ
;
390 spin_unlock_irq(&hpet_lock
);
392 irq
= devp
->hd_hdwirq
;
395 unsigned long irq_flags
;
397 sprintf(devp
->hd_name
, "hpet%d", (int)(devp
- hpetp
->hp_dev
));
398 irq_flags
= devp
->hd_flags
& HPET_SHARED_IRQ
399 ? SA_SHIRQ
: SA_INTERRUPT
;
400 if (request_irq(irq
, hpet_interrupt
, irq_flags
,
401 devp
->hd_name
, (void *)devp
)) {
402 printk(KERN_ERR
"hpet: IRQ %d is not free\n", irq
);
408 spin_lock_irq(&hpet_lock
);
409 devp
->hd_flags
^= HPET_IE
;
410 spin_unlock_irq(&hpet_lock
);
415 t
= devp
->hd_ireqfreq
;
416 v
= readq(&timer
->hpet_config
);
417 g
= v
| Tn_INT_ENB_CNF_MASK
;
419 if (devp
->hd_flags
& HPET_PERIODIC
) {
420 write_counter(t
, &timer
->hpet_compare
);
421 g
|= Tn_TYPE_CNF_MASK
;
422 v
|= Tn_TYPE_CNF_MASK
;
423 writeq(v
, &timer
->hpet_config
);
424 v
|= Tn_VAL_SET_CNF_MASK
;
425 writeq(v
, &timer
->hpet_config
);
426 local_irq_save(flags
);
427 m
= read_counter(&hpet
->hpet_mc
);
428 write_counter(t
+ m
+ hpetp
->hp_delta
, &timer
->hpet_compare
);
430 local_irq_save(flags
);
431 m
= read_counter(&hpet
->hpet_mc
);
432 write_counter(t
+ m
+ hpetp
->hp_delta
, &timer
->hpet_compare
);
435 if (devp
->hd_flags
& HPET_SHARED_IRQ
) {
436 isr
= 1 << (devp
- devp
->hd_hpets
->hp_dev
);
437 writel(isr
, &hpet
->hpet_isr
);
439 writeq(g
, &timer
->hpet_config
);
440 local_irq_restore(flags
);
445 /* converts Hz to number of timer ticks */
446 static inline unsigned long hpet_time_div(struct hpets
*hpets
,
449 unsigned long long m
;
451 m
= hpets
->hp_tick_freq
+ (dis
>> 1);
453 return (unsigned long)m
;
457 hpet_ioctl_common(struct hpet_dev
*devp
, int cmd
, unsigned long arg
, int kernel
)
459 struct hpet_timer __iomem
*timer
;
460 struct hpet __iomem
*hpet
;
471 timer
= devp
->hd_timer
;
472 hpet
= devp
->hd_hpet
;
473 hpetp
= devp
->hd_hpets
;
476 return hpet_ioctl_ieon(devp
);
485 if ((devp
->hd_flags
& HPET_IE
) == 0)
487 v
= readq(&timer
->hpet_config
);
488 v
&= ~Tn_INT_ENB_CNF_MASK
;
489 writeq(v
, &timer
->hpet_config
);
491 free_irq(devp
->hd_irq
, devp
);
494 devp
->hd_flags
^= HPET_IE
;
498 struct hpet_info info
;
500 if (devp
->hd_ireqfreq
)
502 hpet_time_div(hpetp
, devp
->hd_ireqfreq
);
504 info
.hi_ireqfreq
= 0;
506 readq(&timer
->hpet_config
) & Tn_PER_INT_CAP_MASK
;
507 info
.hi_hpet
= hpetp
->hp_which
;
508 info
.hi_timer
= devp
- hpetp
->hp_dev
;
510 memcpy((void *)arg
, &info
, sizeof(info
));
512 if (copy_to_user((void __user
*)arg
, &info
,
518 v
= readq(&timer
->hpet_config
);
519 if ((v
& Tn_PER_INT_CAP_MASK
) == 0) {
523 devp
->hd_flags
|= HPET_PERIODIC
;
526 v
= readq(&timer
->hpet_config
);
527 if ((v
& Tn_PER_INT_CAP_MASK
) == 0) {
531 if (devp
->hd_flags
& HPET_PERIODIC
&&
532 readq(&timer
->hpet_config
) & Tn_TYPE_CNF_MASK
) {
533 v
= readq(&timer
->hpet_config
);
534 v
^= Tn_TYPE_CNF_MASK
;
535 writeq(v
, &timer
->hpet_config
);
537 devp
->hd_flags
&= ~HPET_PERIODIC
;
540 if (!kernel
&& (arg
> hpet_max_freq
) &&
541 !capable(CAP_SYS_RESOURCE
)) {
551 devp
->hd_ireqfreq
= hpet_time_div(hpetp
, arg
);
557 static struct file_operations hpet_fops
= {
558 .owner
= THIS_MODULE
,
564 .release
= hpet_release
,
565 .fasync
= hpet_fasync
,
569 static int hpet_is_known(struct hpet_data
*hdp
)
573 for (hpetp
= hpets
; hpetp
; hpetp
= hpetp
->hp_next
)
574 if (hpetp
->hp_hpet_phys
== hdp
->hd_phys_address
)
580 EXPORT_SYMBOL(hpet_alloc
);
581 EXPORT_SYMBOL(hpet_register
);
582 EXPORT_SYMBOL(hpet_unregister
);
583 EXPORT_SYMBOL(hpet_control
);
585 int hpet_register(struct hpet_task
*tp
, int periodic
)
589 struct hpet_timer __iomem
*timer
;
590 struct hpet_dev
*devp
;
595 mask
= Tn_PER_INT_CAP_MASK
;
604 tp
->ht_opaque
= NULL
;
606 spin_lock_irq(&hpet_task_lock
);
607 spin_lock(&hpet_lock
);
609 for (devp
= NULL
, hpetp
= hpets
; hpetp
&& !devp
; hpetp
= hpetp
->hp_next
)
610 for (timer
= hpetp
->hp_hpet
->hpet_timers
, i
= 0;
611 i
< hpetp
->hp_ntimer
; i
++, timer
++) {
612 if ((readq(&timer
->hpet_config
) & Tn_PER_INT_CAP_MASK
)
616 devp
= &hpetp
->hp_dev
[i
];
618 if (devp
->hd_flags
& HPET_OPEN
|| devp
->hd_task
) {
623 tp
->ht_opaque
= devp
;
628 spin_unlock(&hpet_lock
);
629 spin_unlock_irq(&hpet_task_lock
);
637 static inline int hpet_tpcheck(struct hpet_task
*tp
)
639 struct hpet_dev
*devp
;
642 devp
= tp
->ht_opaque
;
647 for (hpetp
= hpets
; hpetp
; hpetp
= hpetp
->hp_next
)
648 if (devp
>= hpetp
->hp_dev
649 && devp
< (hpetp
->hp_dev
+ hpetp
->hp_ntimer
)
650 && devp
->hd_hpet
== hpetp
->hp_hpet
)
656 int hpet_unregister(struct hpet_task
*tp
)
658 struct hpet_dev
*devp
;
659 struct hpet_timer __iomem
*timer
;
662 if ((err
= hpet_tpcheck(tp
)))
665 spin_lock_irq(&hpet_task_lock
);
666 spin_lock(&hpet_lock
);
668 devp
= tp
->ht_opaque
;
669 if (devp
->hd_task
!= tp
) {
670 spin_unlock(&hpet_lock
);
671 spin_unlock_irq(&hpet_task_lock
);
675 timer
= devp
->hd_timer
;
676 writeq((readq(&timer
->hpet_config
) & ~Tn_INT_ENB_CNF_MASK
),
677 &timer
->hpet_config
);
678 devp
->hd_flags
&= ~(HPET_IE
| HPET_PERIODIC
);
679 devp
->hd_task
= NULL
;
680 spin_unlock(&hpet_lock
);
681 spin_unlock_irq(&hpet_task_lock
);
686 int hpet_control(struct hpet_task
*tp
, unsigned int cmd
, unsigned long arg
)
688 struct hpet_dev
*devp
;
691 if ((err
= hpet_tpcheck(tp
)))
694 spin_lock_irq(&hpet_lock
);
695 devp
= tp
->ht_opaque
;
696 if (devp
->hd_task
!= tp
) {
697 spin_unlock_irq(&hpet_lock
);
700 spin_unlock_irq(&hpet_lock
);
701 return hpet_ioctl_common(devp
, cmd
, arg
, 1);
704 static ctl_table hpet_table
[] = {
707 .procname
= "max-user-freq",
708 .data
= &hpet_max_freq
,
709 .maxlen
= sizeof(int),
711 .proc_handler
= &proc_dointvec
,
716 static ctl_table hpet_root
[] = {
727 static ctl_table dev_root
[] = {
738 static struct ctl_table_header
*sysctl_header
;
740 static void hpet_register_interpolator(struct hpets
*hpetp
)
742 #ifdef CONFIG_TIME_INTERPOLATION
743 struct time_interpolator
*ti
;
745 ti
= kzalloc(sizeof(*ti
), GFP_KERNEL
);
749 ti
->source
= TIME_SOURCE_MMIO64
;
751 ti
->addr
= &hpetp
->hp_hpet
->hpet_mc
;
752 ti
->frequency
= hpetp
->hp_tick_freq
;
753 ti
->drift
= HPET_DRIFT
;
756 hpetp
->hp_interpolator
= ti
;
757 register_time_interpolator(ti
);
762 * Adjustment for when arming the timer with
763 * initial conditions. That is, main counter
764 * ticks expired before interrupts are enabled.
766 #define TICK_CALIBRATE (1000UL)
768 static unsigned long hpet_calibrate(struct hpets
*hpetp
)
770 struct hpet_timer __iomem
*timer
= NULL
;
771 unsigned long t
, m
, count
, i
, flags
, start
;
772 struct hpet_dev
*devp
;
774 struct hpet __iomem
*hpet
;
776 for (j
= 0, devp
= hpetp
->hp_dev
; j
< hpetp
->hp_ntimer
; j
++, devp
++)
777 if ((devp
->hd_flags
& HPET_OPEN
) == 0) {
778 timer
= devp
->hd_timer
;
785 hpet
= hpetp
->hp_hpet
;
786 t
= read_counter(&timer
->hpet_compare
);
789 count
= hpet_time_div(hpetp
, TICK_CALIBRATE
);
791 local_irq_save(flags
);
793 start
= read_counter(&hpet
->hpet_mc
);
796 m
= read_counter(&hpet
->hpet_mc
);
797 write_counter(t
+ m
+ hpetp
->hp_delta
, &timer
->hpet_compare
);
798 } while (i
++, (m
- start
) < count
);
800 local_irq_restore(flags
);
802 return (m
- start
) / i
;
805 int hpet_alloc(struct hpet_data
*hdp
)
808 struct hpet_dev
*devp
;
812 struct hpet __iomem
*hpet
;
813 static struct hpets
*last
= NULL
;
814 unsigned long period
;
815 unsigned long long temp
;
818 * hpet_alloc can be called by platform dependent code.
819 * If platform dependent code has allocated the hpet that
820 * ACPI has also reported, then we catch it here.
822 if (hpet_is_known(hdp
)) {
823 printk(KERN_DEBUG
"%s: duplicate HPET ignored\n",
828 siz
= sizeof(struct hpets
) + ((hdp
->hd_nirqs
- 1) *
829 sizeof(struct hpet_dev
));
831 hpetp
= kzalloc(siz
, GFP_KERNEL
);
836 hpetp
->hp_which
= hpet_nhpet
++;
837 hpetp
->hp_hpet
= hdp
->hd_address
;
838 hpetp
->hp_hpet_phys
= hdp
->hd_phys_address
;
840 hpetp
->hp_ntimer
= hdp
->hd_nirqs
;
842 for (i
= 0; i
< hdp
->hd_nirqs
; i
++)
843 hpetp
->hp_dev
[i
].hd_hdwirq
= hdp
->hd_irq
[i
];
845 hpet
= hpetp
->hp_hpet
;
847 cap
= readq(&hpet
->hpet_cap
);
849 ntimer
= ((cap
& HPET_NUM_TIM_CAP_MASK
) >> HPET_NUM_TIM_CAP_SHIFT
) + 1;
851 if (hpetp
->hp_ntimer
!= ntimer
) {
852 printk(KERN_WARNING
"hpet: number irqs doesn't agree"
853 " with number of timers\n");
859 last
->hp_next
= hpetp
;
865 period
= (cap
& HPET_COUNTER_CLK_PERIOD_MASK
) >>
866 HPET_COUNTER_CLK_PERIOD_SHIFT
; /* fs, 10^-15 */
867 temp
= 1000000000000000uLL; /* 10^15 femtoseconds per second */
868 temp
+= period
>> 1; /* round */
869 do_div(temp
, period
);
870 hpetp
->hp_tick_freq
= temp
; /* ticks per second */
872 printk(KERN_INFO
"hpet%d: at MMIO 0x%lx (virtual 0x%p), IRQ%s",
873 hpetp
->hp_which
, hdp
->hd_phys_address
, hdp
->hd_address
,
874 hpetp
->hp_ntimer
> 1 ? "s" : "");
875 for (i
= 0; i
< hpetp
->hp_ntimer
; i
++)
876 printk("%s %d", i
> 0 ? "," : "", hdp
->hd_irq
[i
]);
879 printk(KERN_INFO
"hpet%u: %u %d-bit timers, %Lu Hz\n",
880 hpetp
->hp_which
, hpetp
->hp_ntimer
,
881 cap
& HPET_COUNTER_SIZE_MASK
? 64 : 32, hpetp
->hp_tick_freq
);
883 mcfg
= readq(&hpet
->hpet_config
);
884 if ((mcfg
& HPET_ENABLE_CNF_MASK
) == 0) {
885 write_counter(0L, &hpet
->hpet_mc
);
886 mcfg
|= HPET_ENABLE_CNF_MASK
;
887 writeq(mcfg
, &hpet
->hpet_config
);
890 for (i
= 0, devp
= hpetp
->hp_dev
; i
< hpetp
->hp_ntimer
; i
++, devp
++) {
891 struct hpet_timer __iomem
*timer
;
893 timer
= &hpet
->hpet_timers
[devp
- hpetp
->hp_dev
];
895 devp
->hd_hpets
= hpetp
;
896 devp
->hd_hpet
= hpet
;
897 devp
->hd_timer
= timer
;
900 * If the timer was reserved by platform code,
901 * then make timer unavailable for opens.
903 if (hdp
->hd_state
& (1 << i
)) {
904 devp
->hd_flags
= HPET_OPEN
;
908 init_waitqueue_head(&devp
->hd_waitqueue
);
911 hpetp
->hp_delta
= hpet_calibrate(hpetp
);
912 hpet_register_interpolator(hpetp
);
917 static acpi_status
hpet_resources(struct acpi_resource
*res
, void *data
)
919 struct hpet_data
*hdp
;
921 struct acpi_resource_address64 addr
;
925 status
= acpi_resource_to_address64(res
, &addr
);
927 if (ACPI_SUCCESS(status
)) {
928 hdp
->hd_phys_address
= addr
.minimum
;
929 hdp
->hd_address
= ioremap(addr
.minimum
, addr
.address_length
);
931 if (hpet_is_known(hdp
)) {
932 printk(KERN_DEBUG
"%s: 0x%lx is busy\n",
933 __FUNCTION__
, hdp
->hd_phys_address
);
934 iounmap(hdp
->hd_address
);
937 } else if (res
->type
== ACPI_RESOURCE_TYPE_FIXED_MEMORY32
) {
938 struct acpi_resource_fixed_memory32
*fixmem32
;
940 fixmem32
= &res
->data
.fixed_memory32
;
944 hdp
->hd_phys_address
= fixmem32
->address
;
945 hdp
->hd_address
= ioremap(fixmem32
->address
,
948 if (hpet_is_known(hdp
)) {
949 printk(KERN_DEBUG
"%s: 0x%lx is busy\n",
950 __FUNCTION__
, hdp
->hd_phys_address
);
951 iounmap(hdp
->hd_address
);
954 } else if (res
->type
== ACPI_RESOURCE_TYPE_EXTENDED_IRQ
) {
955 struct acpi_resource_extended_irq
*irqp
;
958 irqp
= &res
->data
.extended_irq
;
960 for (i
= 0; i
< irqp
->interrupt_count
; i
++) {
961 irq
= acpi_register_gsi(irqp
->interrupts
[i
],
962 irqp
->triggering
, irqp
->polarity
);
966 hdp
->hd_irq
[hdp
->hd_nirqs
] = irq
;
974 static int hpet_acpi_add(struct acpi_device
*device
)
977 struct hpet_data data
;
979 memset(&data
, 0, sizeof(data
));
982 acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
983 hpet_resources
, &data
);
985 if (ACPI_FAILURE(result
))
988 if (!data
.hd_address
|| !data
.hd_nirqs
) {
989 printk("%s: no address or irqs in _CRS\n", __FUNCTION__
);
993 return hpet_alloc(&data
);
996 static int hpet_acpi_remove(struct acpi_device
*device
, int type
)
998 /* XXX need to unregister interpolator, dealloc mem, etc */
1002 static struct acpi_driver hpet_acpi_driver
= {
1006 .add
= hpet_acpi_add
,
1007 .remove
= hpet_acpi_remove
,
1011 static struct miscdevice hpet_misc
= { HPET_MINOR
, "hpet", &hpet_fops
};
1013 static int __init
hpet_init(void)
1017 result
= misc_register(&hpet_misc
);
1021 sysctl_header
= register_sysctl_table(dev_root
, 0);
1023 result
= acpi_bus_register_driver(&hpet_acpi_driver
);
1026 unregister_sysctl_table(sysctl_header
);
1027 misc_deregister(&hpet_misc
);
1034 static void __exit
hpet_exit(void)
1036 acpi_bus_unregister_driver(&hpet_acpi_driver
);
1039 unregister_sysctl_table(sysctl_header
);
1040 misc_deregister(&hpet_misc
);
1045 module_init(hpet_init
);
1046 module_exit(hpet_exit
);
1047 MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
1048 MODULE_LICENSE("GPL");