]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/clocksource.h> | |
2 | #include <linux/clockchips.h> | |
3 | #include <linux/interrupt.h> | |
4 | #include <linux/sysdev.h> | |
5 | #include <linux/delay.h> | |
6 | #include <linux/errno.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/hpet.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/pm.h> | |
12 | #include <linux/io.h> | |
13 | ||
14 | #include <asm/fixmap.h> | |
15 | #include <asm/i8253.h> | |
16 | #include <asm/hpet.h> | |
17 | ||
18 | #define HPET_MASK CLOCKSOURCE_MASK(32) | |
19 | ||
20 | /* FSEC = 10^-15 | |
21 | NSEC = 10^-9 */ | |
22 | #define FSEC_PER_NSEC 1000000L | |
23 | ||
24 | #define HPET_DEV_USED_BIT 2 | |
25 | #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) | |
26 | #define HPET_DEV_VALID 0x8 | |
27 | #define HPET_DEV_FSB_CAP 0x1000 | |
28 | #define HPET_DEV_PERI_CAP 0x2000 | |
29 | ||
30 | #define HPET_MIN_CYCLES 128 | |
31 | #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) | |
32 | ||
33 | #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) | |
34 | ||
35 | /* | |
36 | * HPET address is set in acpi/boot.c, when an ACPI entry exists | |
37 | */ | |
38 | unsigned long hpet_address; | |
39 | u8 hpet_blockid; /* OS timer block num */ | |
40 | u8 hpet_msi_disable; | |
41 | ||
42 | #ifdef CONFIG_PCI_MSI | |
43 | static unsigned long hpet_num_timers; | |
44 | #endif | |
45 | static void __iomem *hpet_virt_address; | |
46 | ||
47 | struct hpet_dev { | |
48 | struct clock_event_device evt; | |
49 | unsigned int num; | |
50 | int cpu; | |
51 | unsigned int irq; | |
52 | unsigned int flags; | |
53 | char name[10]; | |
54 | }; | |
55 | ||
56 | inline unsigned int hpet_readl(unsigned int a) | |
57 | { | |
58 | return readl(hpet_virt_address + a); | |
59 | } | |
60 | ||
61 | static inline void hpet_writel(unsigned int d, unsigned int a) | |
62 | { | |
63 | writel(d, hpet_virt_address + a); | |
64 | } | |
65 | ||
66 | #ifdef CONFIG_X86_64 | |
67 | #include <asm/pgtable.h> | |
68 | #endif | |
69 | ||
70 | static inline void hpet_set_mapping(void) | |
71 | { | |
72 | hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | |
73 | #ifdef CONFIG_X86_64 | |
74 | __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); | |
75 | #endif | |
76 | } | |
77 | ||
78 | static inline void hpet_clear_mapping(void) | |
79 | { | |
80 | iounmap(hpet_virt_address); | |
81 | hpet_virt_address = NULL; | |
82 | } | |
83 | ||
84 | /* | |
85 | * HPET command line enable / disable | |
86 | */ | |
87 | static int boot_hpet_disable; | |
88 | int hpet_force_user; | |
89 | static int hpet_verbose; | |
90 | ||
91 | static int __init hpet_setup(char *str) | |
92 | { | |
93 | if (str) { | |
94 | if (!strncmp("disable", str, 7)) | |
95 | boot_hpet_disable = 1; | |
96 | if (!strncmp("force", str, 5)) | |
97 | hpet_force_user = 1; | |
98 | if (!strncmp("verbose", str, 7)) | |
99 | hpet_verbose = 1; | |
100 | } | |
101 | return 1; | |
102 | } | |
103 | __setup("hpet=", hpet_setup); | |
104 | ||
105 | static int __init disable_hpet(char *str) | |
106 | { | |
107 | boot_hpet_disable = 1; | |
108 | return 1; | |
109 | } | |
110 | __setup("nohpet", disable_hpet); | |
111 | ||
112 | static inline int is_hpet_capable(void) | |
113 | { | |
114 | return !boot_hpet_disable && hpet_address; | |
115 | } | |
116 | ||
117 | /* | |
118 | * HPET timer interrupt enable / disable | |
119 | */ | |
120 | static int hpet_legacy_int_enabled; | |
121 | ||
122 | /** | |
123 | * is_hpet_enabled - check whether the hpet timer interrupt is enabled | |
124 | */ | |
125 | int is_hpet_enabled(void) | |
126 | { | |
127 | return is_hpet_capable() && hpet_legacy_int_enabled; | |
128 | } | |
129 | EXPORT_SYMBOL_GPL(is_hpet_enabled); | |
130 | ||
131 | static void _hpet_print_config(const char *function, int line) | |
132 | { | |
133 | u32 i, timers, l, h; | |
134 | printk(KERN_INFO "hpet: %s(%d):\n", function, line); | |
135 | l = hpet_readl(HPET_ID); | |
136 | h = hpet_readl(HPET_PERIOD); | |
137 | timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; | |
138 | printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); | |
139 | l = hpet_readl(HPET_CFG); | |
140 | h = hpet_readl(HPET_STATUS); | |
141 | printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); | |
142 | l = hpet_readl(HPET_COUNTER); | |
143 | h = hpet_readl(HPET_COUNTER+4); | |
144 | printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); | |
145 | ||
146 | for (i = 0; i < timers; i++) { | |
147 | l = hpet_readl(HPET_Tn_CFG(i)); | |
148 | h = hpet_readl(HPET_Tn_CFG(i)+4); | |
149 | printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", | |
150 | i, l, h); | |
151 | l = hpet_readl(HPET_Tn_CMP(i)); | |
152 | h = hpet_readl(HPET_Tn_CMP(i)+4); | |
153 | printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", | |
154 | i, l, h); | |
155 | l = hpet_readl(HPET_Tn_ROUTE(i)); | |
156 | h = hpet_readl(HPET_Tn_ROUTE(i)+4); | |
157 | printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", | |
158 | i, l, h); | |
159 | } | |
160 | } | |
161 | ||
162 | #define hpet_print_config() \ | |
163 | do { \ | |
164 | if (hpet_verbose) \ | |
165 | _hpet_print_config(__FUNCTION__, __LINE__); \ | |
166 | } while (0) | |
167 | ||
168 | /* | |
169 | * When the hpet driver (/dev/hpet) is enabled, we need to reserve | |
170 | * timer 0 and timer 1 in case of RTC emulation. | |
171 | */ | |
172 | #ifdef CONFIG_HPET | |
173 | ||
174 | static void hpet_reserve_msi_timers(struct hpet_data *hd); | |
175 | ||
176 | static void hpet_reserve_platform_timers(unsigned int id) | |
177 | { | |
178 | struct hpet __iomem *hpet = hpet_virt_address; | |
179 | struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; | |
180 | unsigned int nrtimers, i; | |
181 | struct hpet_data hd; | |
182 | ||
183 | nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; | |
184 | ||
185 | memset(&hd, 0, sizeof(hd)); | |
186 | hd.hd_phys_address = hpet_address; | |
187 | hd.hd_address = hpet; | |
188 | hd.hd_nirqs = nrtimers; | |
189 | hpet_reserve_timer(&hd, 0); | |
190 | ||
191 | #ifdef CONFIG_HPET_EMULATE_RTC | |
192 | hpet_reserve_timer(&hd, 1); | |
193 | #endif | |
194 | ||
195 | /* | |
196 | * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 | |
197 | * is wrong for i8259!) not the output IRQ. Many BIOS writers | |
198 | * don't bother configuring *any* comparator interrupts. | |
199 | */ | |
200 | hd.hd_irq[0] = HPET_LEGACY_8254; | |
201 | hd.hd_irq[1] = HPET_LEGACY_RTC; | |
202 | ||
203 | for (i = 2; i < nrtimers; timer++, i++) { | |
204 | hd.hd_irq[i] = (readl(&timer->hpet_config) & | |
205 | Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; | |
206 | } | |
207 | ||
208 | hpet_reserve_msi_timers(&hd); | |
209 | ||
210 | hpet_alloc(&hd); | |
211 | ||
212 | } | |
213 | #else | |
214 | static void hpet_reserve_platform_timers(unsigned int id) { } | |
215 | #endif | |
216 | ||
217 | /* | |
218 | * Common hpet info | |
219 | */ | |
220 | static unsigned long hpet_freq; | |
221 | ||
222 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | |
223 | struct clock_event_device *evt); | |
224 | static int hpet_legacy_next_event(unsigned long delta, | |
225 | struct clock_event_device *evt); | |
226 | ||
227 | /* | |
228 | * The hpet clock event device | |
229 | */ | |
230 | static struct clock_event_device hpet_clockevent = { | |
231 | .name = "hpet", | |
232 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
233 | .set_mode = hpet_legacy_set_mode, | |
234 | .set_next_event = hpet_legacy_next_event, | |
235 | .irq = 0, | |
236 | .rating = 50, | |
237 | }; | |
238 | ||
239 | static void hpet_stop_counter(void) | |
240 | { | |
241 | unsigned long cfg = hpet_readl(HPET_CFG); | |
242 | cfg &= ~HPET_CFG_ENABLE; | |
243 | hpet_writel(cfg, HPET_CFG); | |
244 | } | |
245 | ||
246 | static void hpet_reset_counter(void) | |
247 | { | |
248 | hpet_writel(0, HPET_COUNTER); | |
249 | hpet_writel(0, HPET_COUNTER + 4); | |
250 | } | |
251 | ||
252 | static void hpet_start_counter(void) | |
253 | { | |
254 | unsigned int cfg = hpet_readl(HPET_CFG); | |
255 | cfg |= HPET_CFG_ENABLE; | |
256 | hpet_writel(cfg, HPET_CFG); | |
257 | } | |
258 | ||
259 | static void hpet_restart_counter(void) | |
260 | { | |
261 | hpet_stop_counter(); | |
262 | hpet_reset_counter(); | |
263 | hpet_start_counter(); | |
264 | } | |
265 | ||
266 | static void hpet_resume_device(void) | |
267 | { | |
268 | force_hpet_resume(); | |
269 | } | |
270 | ||
271 | static void hpet_resume_counter(struct clocksource *cs) | |
272 | { | |
273 | hpet_resume_device(); | |
274 | hpet_restart_counter(); | |
275 | } | |
276 | ||
277 | static void hpet_enable_legacy_int(void) | |
278 | { | |
279 | unsigned int cfg = hpet_readl(HPET_CFG); | |
280 | ||
281 | cfg |= HPET_CFG_LEGACY; | |
282 | hpet_writel(cfg, HPET_CFG); | |
283 | hpet_legacy_int_enabled = 1; | |
284 | } | |
285 | ||
286 | static void hpet_legacy_clockevent_register(void) | |
287 | { | |
288 | /* Start HPET legacy interrupts */ | |
289 | hpet_enable_legacy_int(); | |
290 | ||
291 | /* | |
292 | * Start hpet with the boot cpu mask and make it | |
293 | * global after the IO_APIC has been initialized. | |
294 | */ | |
295 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); | |
296 | clockevents_config_and_register(&hpet_clockevent, hpet_freq, | |
297 | HPET_MIN_PROG_DELTA, 0x7FFFFFFF); | |
298 | global_clock_event = &hpet_clockevent; | |
299 | printk(KERN_DEBUG "hpet clockevent registered\n"); | |
300 | } | |
301 | ||
302 | static int hpet_setup_msi_irq(unsigned int irq); | |
303 | ||
304 | static void hpet_set_mode(enum clock_event_mode mode, | |
305 | struct clock_event_device *evt, int timer) | |
306 | { | |
307 | unsigned int cfg, cmp, now; | |
308 | uint64_t delta; | |
309 | ||
310 | switch (mode) { | |
311 | case CLOCK_EVT_MODE_PERIODIC: | |
312 | hpet_stop_counter(); | |
313 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; | |
314 | delta >>= evt->shift; | |
315 | now = hpet_readl(HPET_COUNTER); | |
316 | cmp = now + (unsigned int) delta; | |
317 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | |
318 | /* Make sure we use edge triggered interrupts */ | |
319 | cfg &= ~HPET_TN_LEVEL; | |
320 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | |
321 | HPET_TN_SETVAL | HPET_TN_32BIT; | |
322 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | |
323 | hpet_writel(cmp, HPET_Tn_CMP(timer)); | |
324 | udelay(1); | |
325 | /* | |
326 | * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL | |
327 | * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL | |
328 | * bit is automatically cleared after the first write. | |
329 | * (See AMD-8111 HyperTransport I/O Hub Data Sheet, | |
330 | * Publication # 24674) | |
331 | */ | |
332 | hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer)); | |
333 | hpet_start_counter(); | |
334 | hpet_print_config(); | |
335 | break; | |
336 | ||
337 | case CLOCK_EVT_MODE_ONESHOT: | |
338 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | |
339 | cfg &= ~HPET_TN_PERIODIC; | |
340 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | |
341 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | |
342 | break; | |
343 | ||
344 | case CLOCK_EVT_MODE_UNUSED: | |
345 | case CLOCK_EVT_MODE_SHUTDOWN: | |
346 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | |
347 | cfg &= ~HPET_TN_ENABLE; | |
348 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | |
349 | break; | |
350 | ||
351 | case CLOCK_EVT_MODE_RESUME: | |
352 | if (timer == 0) { | |
353 | hpet_enable_legacy_int(); | |
354 | } else { | |
355 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | |
356 | hpet_setup_msi_irq(hdev->irq); | |
357 | disable_irq(hdev->irq); | |
358 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); | |
359 | enable_irq(hdev->irq); | |
360 | } | |
361 | hpet_print_config(); | |
362 | break; | |
363 | } | |
364 | } | |
365 | ||
366 | static int hpet_next_event(unsigned long delta, | |
367 | struct clock_event_device *evt, int timer) | |
368 | { | |
369 | u32 cnt; | |
370 | s32 res; | |
371 | ||
372 | cnt = hpet_readl(HPET_COUNTER); | |
373 | cnt += (u32) delta; | |
374 | hpet_writel(cnt, HPET_Tn_CMP(timer)); | |
375 | ||
376 | /* | |
377 | * HPETs are a complete disaster. The compare register is | |
378 | * based on a equal comparison and neither provides a less | |
379 | * than or equal functionality (which would require to take | |
380 | * the wraparound into account) nor a simple count down event | |
381 | * mode. Further the write to the comparator register is | |
382 | * delayed internally up to two HPET clock cycles in certain | |
383 | * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even | |
384 | * longer delays. We worked around that by reading back the | |
385 | * compare register, but that required another workaround for | |
386 | * ICH9,10 chips where the first readout after write can | |
387 | * return the old stale value. We already had a minimum | |
388 | * programming delta of 5us enforced, but a NMI or SMI hitting | |
389 | * between the counter readout and the comparator write can | |
390 | * move us behind that point easily. Now instead of reading | |
391 | * the compare register back several times, we make the ETIME | |
392 | * decision based on the following: Return ETIME if the | |
393 | * counter value after the write is less than HPET_MIN_CYCLES | |
394 | * away from the event or if the counter is already ahead of | |
395 | * the event. The minimum programming delta for the generic | |
396 | * clockevents code is set to 1.5 * HPET_MIN_CYCLES. | |
397 | */ | |
398 | res = (s32)(cnt - hpet_readl(HPET_COUNTER)); | |
399 | ||
400 | return res < HPET_MIN_CYCLES ? -ETIME : 0; | |
401 | } | |
402 | ||
403 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | |
404 | struct clock_event_device *evt) | |
405 | { | |
406 | hpet_set_mode(mode, evt, 0); | |
407 | } | |
408 | ||
409 | static int hpet_legacy_next_event(unsigned long delta, | |
410 | struct clock_event_device *evt) | |
411 | { | |
412 | return hpet_next_event(delta, evt, 0); | |
413 | } | |
414 | ||
415 | /* | |
416 | * HPET MSI Support | |
417 | */ | |
418 | #ifdef CONFIG_PCI_MSI | |
419 | ||
420 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | |
421 | static struct hpet_dev *hpet_devs; | |
422 | ||
423 | void hpet_msi_unmask(struct irq_data *data) | |
424 | { | |
425 | struct hpet_dev *hdev = data->handler_data; | |
426 | unsigned int cfg; | |
427 | ||
428 | /* unmask it */ | |
429 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | |
430 | cfg |= HPET_TN_FSB; | |
431 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | |
432 | } | |
433 | ||
434 | void hpet_msi_mask(struct irq_data *data) | |
435 | { | |
436 | struct hpet_dev *hdev = data->handler_data; | |
437 | unsigned int cfg; | |
438 | ||
439 | /* mask it */ | |
440 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | |
441 | cfg &= ~HPET_TN_FSB; | |
442 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | |
443 | } | |
444 | ||
445 | void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) | |
446 | { | |
447 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | |
448 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | |
449 | } | |
450 | ||
451 | void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) | |
452 | { | |
453 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | |
454 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | |
455 | msg->address_hi = 0; | |
456 | } | |
457 | ||
458 | static void hpet_msi_set_mode(enum clock_event_mode mode, | |
459 | struct clock_event_device *evt) | |
460 | { | |
461 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | |
462 | hpet_set_mode(mode, evt, hdev->num); | |
463 | } | |
464 | ||
465 | static int hpet_msi_next_event(unsigned long delta, | |
466 | struct clock_event_device *evt) | |
467 | { | |
468 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | |
469 | return hpet_next_event(delta, evt, hdev->num); | |
470 | } | |
471 | ||
472 | static int hpet_setup_msi_irq(unsigned int irq) | |
473 | { | |
474 | if (arch_setup_hpet_msi(irq, hpet_blockid)) { | |
475 | destroy_irq(irq); | |
476 | return -EINVAL; | |
477 | } | |
478 | return 0; | |
479 | } | |
480 | ||
481 | static int hpet_assign_irq(struct hpet_dev *dev) | |
482 | { | |
483 | unsigned int irq; | |
484 | ||
485 | irq = create_irq_nr(0, -1); | |
486 | if (!irq) | |
487 | return -EINVAL; | |
488 | ||
489 | irq_set_handler_data(irq, dev); | |
490 | ||
491 | if (hpet_setup_msi_irq(irq)) | |
492 | return -EINVAL; | |
493 | ||
494 | dev->irq = irq; | |
495 | return 0; | |
496 | } | |
497 | ||
498 | static irqreturn_t hpet_interrupt_handler(int irq, void *data) | |
499 | { | |
500 | struct hpet_dev *dev = (struct hpet_dev *)data; | |
501 | struct clock_event_device *hevt = &dev->evt; | |
502 | ||
503 | if (!hevt->event_handler) { | |
504 | printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n", | |
505 | dev->num); | |
506 | return IRQ_HANDLED; | |
507 | } | |
508 | ||
509 | hevt->event_handler(hevt); | |
510 | return IRQ_HANDLED; | |
511 | } | |
512 | ||
513 | static int hpet_setup_irq(struct hpet_dev *dev) | |
514 | { | |
515 | ||
516 | if (request_irq(dev->irq, hpet_interrupt_handler, | |
517 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | |
518 | dev->name, dev)) | |
519 | return -1; | |
520 | ||
521 | disable_irq(dev->irq); | |
522 | irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); | |
523 | enable_irq(dev->irq); | |
524 | ||
525 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | |
526 | dev->name, dev->irq); | |
527 | ||
528 | return 0; | |
529 | } | |
530 | ||
531 | /* This should be called in specific @cpu */ | |
532 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |
533 | { | |
534 | struct clock_event_device *evt = &hdev->evt; | |
535 | ||
536 | WARN_ON(cpu != smp_processor_id()); | |
537 | if (!(hdev->flags & HPET_DEV_VALID)) | |
538 | return; | |
539 | ||
540 | if (hpet_setup_msi_irq(hdev->irq)) | |
541 | return; | |
542 | ||
543 | hdev->cpu = cpu; | |
544 | per_cpu(cpu_hpet_dev, cpu) = hdev; | |
545 | evt->name = hdev->name; | |
546 | hpet_setup_irq(hdev); | |
547 | evt->irq = hdev->irq; | |
548 | ||
549 | evt->rating = 110; | |
550 | evt->features = CLOCK_EVT_FEAT_ONESHOT; | |
551 | if (hdev->flags & HPET_DEV_PERI_CAP) | |
552 | evt->features |= CLOCK_EVT_FEAT_PERIODIC; | |
553 | ||
554 | evt->set_mode = hpet_msi_set_mode; | |
555 | evt->set_next_event = hpet_msi_next_event; | |
556 | evt->cpumask = cpumask_of(hdev->cpu); | |
557 | ||
558 | clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, | |
559 | 0x7FFFFFFF); | |
560 | } | |
561 | ||
562 | #ifdef CONFIG_HPET | |
563 | /* Reserve at least one timer for userspace (/dev/hpet) */ | |
564 | #define RESERVE_TIMERS 1 | |
565 | #else | |
566 | #define RESERVE_TIMERS 0 | |
567 | #endif | |
568 | ||
569 | static void hpet_msi_capability_lookup(unsigned int start_timer) | |
570 | { | |
571 | unsigned int id; | |
572 | unsigned int num_timers; | |
573 | unsigned int num_timers_used = 0; | |
574 | int i; | |
575 | ||
576 | if (hpet_msi_disable) | |
577 | return; | |
578 | ||
579 | if (boot_cpu_has(X86_FEATURE_ARAT)) | |
580 | return; | |
581 | id = hpet_readl(HPET_ID); | |
582 | ||
583 | num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); | |
584 | num_timers++; /* Value read out starts from 0 */ | |
585 | hpet_print_config(); | |
586 | ||
587 | hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); | |
588 | if (!hpet_devs) | |
589 | return; | |
590 | ||
591 | hpet_num_timers = num_timers; | |
592 | ||
593 | for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { | |
594 | struct hpet_dev *hdev = &hpet_devs[num_timers_used]; | |
595 | unsigned int cfg = hpet_readl(HPET_Tn_CFG(i)); | |
596 | ||
597 | /* Only consider HPET timer with MSI support */ | |
598 | if (!(cfg & HPET_TN_FSB_CAP)) | |
599 | continue; | |
600 | ||
601 | hdev->flags = 0; | |
602 | if (cfg & HPET_TN_PERIODIC_CAP) | |
603 | hdev->flags |= HPET_DEV_PERI_CAP; | |
604 | hdev->num = i; | |
605 | ||
606 | sprintf(hdev->name, "hpet%d", i); | |
607 | if (hpet_assign_irq(hdev)) | |
608 | continue; | |
609 | ||
610 | hdev->flags |= HPET_DEV_FSB_CAP; | |
611 | hdev->flags |= HPET_DEV_VALID; | |
612 | num_timers_used++; | |
613 | if (num_timers_used == num_possible_cpus()) | |
614 | break; | |
615 | } | |
616 | ||
617 | printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n", | |
618 | num_timers, num_timers_used); | |
619 | } | |
620 | ||
621 | #ifdef CONFIG_HPET | |
622 | static void hpet_reserve_msi_timers(struct hpet_data *hd) | |
623 | { | |
624 | int i; | |
625 | ||
626 | if (!hpet_devs) | |
627 | return; | |
628 | ||
629 | for (i = 0; i < hpet_num_timers; i++) { | |
630 | struct hpet_dev *hdev = &hpet_devs[i]; | |
631 | ||
632 | if (!(hdev->flags & HPET_DEV_VALID)) | |
633 | continue; | |
634 | ||
635 | hd->hd_irq[hdev->num] = hdev->irq; | |
636 | hpet_reserve_timer(hd, hdev->num); | |
637 | } | |
638 | } | |
639 | #endif | |
640 | ||
641 | static struct hpet_dev *hpet_get_unused_timer(void) | |
642 | { | |
643 | int i; | |
644 | ||
645 | if (!hpet_devs) | |
646 | return NULL; | |
647 | ||
648 | for (i = 0; i < hpet_num_timers; i++) { | |
649 | struct hpet_dev *hdev = &hpet_devs[i]; | |
650 | ||
651 | if (!(hdev->flags & HPET_DEV_VALID)) | |
652 | continue; | |
653 | if (test_and_set_bit(HPET_DEV_USED_BIT, | |
654 | (unsigned long *)&hdev->flags)) | |
655 | continue; | |
656 | return hdev; | |
657 | } | |
658 | return NULL; | |
659 | } | |
660 | ||
661 | struct hpet_work_struct { | |
662 | struct delayed_work work; | |
663 | struct completion complete; | |
664 | }; | |
665 | ||
666 | static void hpet_work(struct work_struct *w) | |
667 | { | |
668 | struct hpet_dev *hdev; | |
669 | int cpu = smp_processor_id(); | |
670 | struct hpet_work_struct *hpet_work; | |
671 | ||
672 | hpet_work = container_of(w, struct hpet_work_struct, work.work); | |
673 | ||
674 | hdev = hpet_get_unused_timer(); | |
675 | if (hdev) | |
676 | init_one_hpet_msi_clockevent(hdev, cpu); | |
677 | ||
678 | complete(&hpet_work->complete); | |
679 | } | |
680 | ||
681 | static int hpet_cpuhp_notify(struct notifier_block *n, | |
682 | unsigned long action, void *hcpu) | |
683 | { | |
684 | unsigned long cpu = (unsigned long)hcpu; | |
685 | struct hpet_work_struct work; | |
686 | struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); | |
687 | ||
688 | switch (action & 0xf) { | |
689 | case CPU_ONLINE: | |
690 | INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); | |
691 | init_completion(&work.complete); | |
692 | /* FIXME: add schedule_work_on() */ | |
693 | schedule_delayed_work_on(cpu, &work.work, 0); | |
694 | wait_for_completion(&work.complete); | |
695 | destroy_timer_on_stack(&work.work.timer); | |
696 | break; | |
697 | case CPU_DEAD: | |
698 | if (hdev) { | |
699 | free_irq(hdev->irq, hdev); | |
700 | hdev->flags &= ~HPET_DEV_USED; | |
701 | per_cpu(cpu_hpet_dev, cpu) = NULL; | |
702 | } | |
703 | break; | |
704 | } | |
705 | return NOTIFY_OK; | |
706 | } | |
707 | #else | |
708 | ||
709 | static int hpet_setup_msi_irq(unsigned int irq) | |
710 | { | |
711 | return 0; | |
712 | } | |
713 | static void hpet_msi_capability_lookup(unsigned int start_timer) | |
714 | { | |
715 | return; | |
716 | } | |
717 | ||
718 | #ifdef CONFIG_HPET | |
719 | static void hpet_reserve_msi_timers(struct hpet_data *hd) | |
720 | { | |
721 | return; | |
722 | } | |
723 | #endif | |
724 | ||
725 | static int hpet_cpuhp_notify(struct notifier_block *n, | |
726 | unsigned long action, void *hcpu) | |
727 | { | |
728 | return NOTIFY_OK; | |
729 | } | |
730 | ||
731 | #endif | |
732 | ||
733 | /* | |
734 | * Clock source related code | |
735 | */ | |
736 | static cycle_t read_hpet(struct clocksource *cs) | |
737 | { | |
738 | return (cycle_t)hpet_readl(HPET_COUNTER); | |
739 | } | |
740 | ||
741 | #ifdef CONFIG_X86_64 | |
742 | static cycle_t __vsyscall_fn vread_hpet(void) | |
743 | { | |
744 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | |
745 | } | |
746 | #endif | |
747 | ||
748 | static struct clocksource clocksource_hpet = { | |
749 | .name = "hpet", | |
750 | .rating = 250, | |
751 | .read = read_hpet, | |
752 | .mask = HPET_MASK, | |
753 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
754 | .resume = hpet_resume_counter, | |
755 | #ifdef CONFIG_X86_64 | |
756 | .vread = vread_hpet, | |
757 | #endif | |
758 | }; | |
759 | ||
760 | static int hpet_clocksource_register(void) | |
761 | { | |
762 | u64 start, now; | |
763 | cycle_t t1; | |
764 | ||
765 | /* Start the counter */ | |
766 | hpet_restart_counter(); | |
767 | ||
768 | /* Verify whether hpet counter works */ | |
769 | t1 = hpet_readl(HPET_COUNTER); | |
770 | rdtscll(start); | |
771 | ||
772 | /* | |
773 | * We don't know the TSC frequency yet, but waiting for | |
774 | * 200000 TSC cycles is safe: | |
775 | * 4 GHz == 50us | |
776 | * 1 GHz == 200us | |
777 | */ | |
778 | do { | |
779 | rep_nop(); | |
780 | rdtscll(now); | |
781 | } while ((now - start) < 200000UL); | |
782 | ||
783 | if (t1 == hpet_readl(HPET_COUNTER)) { | |
784 | printk(KERN_WARNING | |
785 | "HPET counter not counting. HPET disabled\n"); | |
786 | return -ENODEV; | |
787 | } | |
788 | ||
789 | clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); | |
790 | return 0; | |
791 | } | |
792 | ||
793 | /** | |
794 | * hpet_enable - Try to setup the HPET timer. Returns 1 on success. | |
795 | */ | |
796 | int __init hpet_enable(void) | |
797 | { | |
798 | unsigned long hpet_period; | |
799 | unsigned int id; | |
800 | u64 freq; | |
801 | int i; | |
802 | ||
803 | if (!is_hpet_capable()) | |
804 | return 0; | |
805 | ||
806 | hpet_set_mapping(); | |
807 | ||
808 | /* | |
809 | * Read the period and check for a sane value: | |
810 | */ | |
811 | hpet_period = hpet_readl(HPET_PERIOD); | |
812 | ||
813 | /* | |
814 | * AMD SB700 based systems with spread spectrum enabled use a | |
815 | * SMM based HPET emulation to provide proper frequency | |
816 | * setting. The SMM code is initialized with the first HPET | |
817 | * register access and takes some time to complete. During | |
818 | * this time the config register reads 0xffffffff. We check | |
819 | * for max. 1000 loops whether the config register reads a non | |
820 | * 0xffffffff value to make sure that HPET is up and running | |
821 | * before we go further. A counting loop is safe, as the HPET | |
822 | * access takes thousands of CPU cycles. On non SB700 based | |
823 | * machines this check is only done once and has no side | |
824 | * effects. | |
825 | */ | |
826 | for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) { | |
827 | if (i == 1000) { | |
828 | printk(KERN_WARNING | |
829 | "HPET config register value = 0xFFFFFFFF. " | |
830 | "Disabling HPET\n"); | |
831 | goto out_nohpet; | |
832 | } | |
833 | } | |
834 | ||
835 | if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) | |
836 | goto out_nohpet; | |
837 | ||
838 | /* | |
839 | * The period is a femto seconds value. Convert it to a | |
840 | * frequency. | |
841 | */ | |
842 | freq = FSEC_PER_SEC; | |
843 | do_div(freq, hpet_period); | |
844 | hpet_freq = freq; | |
845 | ||
846 | /* | |
847 | * Read the HPET ID register to retrieve the IRQ routing | |
848 | * information and the number of channels | |
849 | */ | |
850 | id = hpet_readl(HPET_ID); | |
851 | hpet_print_config(); | |
852 | ||
853 | #ifdef CONFIG_HPET_EMULATE_RTC | |
854 | /* | |
855 | * The legacy routing mode needs at least two channels, tick timer | |
856 | * and the rtc emulation channel. | |
857 | */ | |
858 | if (!(id & HPET_ID_NUMBER)) | |
859 | goto out_nohpet; | |
860 | #endif | |
861 | ||
862 | if (hpet_clocksource_register()) | |
863 | goto out_nohpet; | |
864 | ||
865 | if (id & HPET_ID_LEGSUP) { | |
866 | hpet_legacy_clockevent_register(); | |
867 | return 1; | |
868 | } | |
869 | return 0; | |
870 | ||
871 | out_nohpet: | |
872 | hpet_clear_mapping(); | |
873 | hpet_address = 0; | |
874 | return 0; | |
875 | } | |
876 | ||
877 | /* | |
878 | * Needs to be late, as the reserve_timer code calls kalloc ! | |
879 | * | |
880 | * Not a problem on i386 as hpet_enable is called from late_time_init, | |
881 | * but on x86_64 it is necessary ! | |
882 | */ | |
883 | static __init int hpet_late_init(void) | |
884 | { | |
885 | int cpu; | |
886 | ||
887 | if (boot_hpet_disable) | |
888 | return -ENODEV; | |
889 | ||
890 | if (!hpet_address) { | |
891 | if (!force_hpet_address) | |
892 | return -ENODEV; | |
893 | ||
894 | hpet_address = force_hpet_address; | |
895 | hpet_enable(); | |
896 | } | |
897 | ||
898 | if (!hpet_virt_address) | |
899 | return -ENODEV; | |
900 | ||
901 | if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) | |
902 | hpet_msi_capability_lookup(2); | |
903 | else | |
904 | hpet_msi_capability_lookup(0); | |
905 | ||
906 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | |
907 | hpet_print_config(); | |
908 | ||
909 | if (hpet_msi_disable) | |
910 | return 0; | |
911 | ||
912 | if (boot_cpu_has(X86_FEATURE_ARAT)) | |
913 | return 0; | |
914 | ||
915 | for_each_online_cpu(cpu) { | |
916 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); | |
917 | } | |
918 | ||
919 | /* This notifier should be called after workqueue is ready */ | |
920 | hotcpu_notifier(hpet_cpuhp_notify, -20); | |
921 | ||
922 | return 0; | |
923 | } | |
924 | fs_initcall(hpet_late_init); | |
925 | ||
926 | void hpet_disable(void) | |
927 | { | |
928 | if (is_hpet_capable() && hpet_virt_address) { | |
929 | unsigned int cfg = hpet_readl(HPET_CFG); | |
930 | ||
931 | if (hpet_legacy_int_enabled) { | |
932 | cfg &= ~HPET_CFG_LEGACY; | |
933 | hpet_legacy_int_enabled = 0; | |
934 | } | |
935 | cfg &= ~HPET_CFG_ENABLE; | |
936 | hpet_writel(cfg, HPET_CFG); | |
937 | } | |
938 | } | |
939 | ||
940 | #ifdef CONFIG_HPET_EMULATE_RTC | |
941 | ||
942 | /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET | |
943 | * is enabled, we support RTC interrupt functionality in software. | |
944 | * RTC has 3 kinds of interrupts: | |
945 | * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock | |
946 | * is updated | |
947 | * 2) Alarm Interrupt - generate an interrupt at a specific time of day | |
948 | * 3) Periodic Interrupt - generate periodic interrupt, with frequencies | |
949 | * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) | |
950 | * (1) and (2) above are implemented using polling at a frequency of | |
951 | * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt | |
952 | * overhead. (DEFAULT_RTC_INT_FREQ) | |
953 | * For (3), we use interrupts at 64Hz or user specified periodic | |
954 | * frequency, whichever is higher. | |
955 | */ | |
956 | #include <linux/mc146818rtc.h> | |
957 | #include <linux/rtc.h> | |
958 | #include <asm/rtc.h> | |
959 | ||
960 | #define DEFAULT_RTC_INT_FREQ 64 | |
961 | #define DEFAULT_RTC_SHIFT 6 | |
962 | #define RTC_NUM_INTS 1 | |
963 | ||
964 | static unsigned long hpet_rtc_flags; | |
965 | static int hpet_prev_update_sec; | |
966 | static struct rtc_time hpet_alarm_time; | |
967 | static unsigned long hpet_pie_count; | |
968 | static u32 hpet_t1_cmp; | |
969 | static u32 hpet_default_delta; | |
970 | static u32 hpet_pie_delta; | |
971 | static unsigned long hpet_pie_limit; | |
972 | ||
973 | static rtc_irq_handler irq_handler; | |
974 | ||
975 | /* | |
976 | * Check that the hpet counter c1 is ahead of the c2 | |
977 | */ | |
978 | static inline int hpet_cnt_ahead(u32 c1, u32 c2) | |
979 | { | |
980 | return (s32)(c2 - c1) < 0; | |
981 | } | |
982 | ||
983 | /* | |
984 | * Registers a IRQ handler. | |
985 | */ | |
986 | int hpet_register_irq_handler(rtc_irq_handler handler) | |
987 | { | |
988 | if (!is_hpet_enabled()) | |
989 | return -ENODEV; | |
990 | if (irq_handler) | |
991 | return -EBUSY; | |
992 | ||
993 | irq_handler = handler; | |
994 | ||
995 | return 0; | |
996 | } | |
997 | EXPORT_SYMBOL_GPL(hpet_register_irq_handler); | |
998 | ||
999 | /* | |
1000 | * Deregisters the IRQ handler registered with hpet_register_irq_handler() | |
1001 | * and does cleanup. | |
1002 | */ | |
1003 | void hpet_unregister_irq_handler(rtc_irq_handler handler) | |
1004 | { | |
1005 | if (!is_hpet_enabled()) | |
1006 | return; | |
1007 | ||
1008 | irq_handler = NULL; | |
1009 | hpet_rtc_flags = 0; | |
1010 | } | |
1011 | EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); | |
1012 | ||
1013 | /* | |
1014 | * Timer 1 for RTC emulation. We use one shot mode, as periodic mode | |
1015 | * is not supported by all HPET implementations for timer 1. | |
1016 | * | |
1017 | * hpet_rtc_timer_init() is called when the rtc is initialized. | |
1018 | */ | |
1019 | int hpet_rtc_timer_init(void) | |
1020 | { | |
1021 | unsigned int cfg, cnt, delta; | |
1022 | unsigned long flags; | |
1023 | ||
1024 | if (!is_hpet_enabled()) | |
1025 | return 0; | |
1026 | ||
1027 | if (!hpet_default_delta) { | |
1028 | uint64_t clc; | |
1029 | ||
1030 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; | |
1031 | clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; | |
1032 | hpet_default_delta = clc; | |
1033 | } | |
1034 | ||
1035 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) | |
1036 | delta = hpet_default_delta; | |
1037 | else | |
1038 | delta = hpet_pie_delta; | |
1039 | ||
1040 | local_irq_save(flags); | |
1041 | ||
1042 | cnt = delta + hpet_readl(HPET_COUNTER); | |
1043 | hpet_writel(cnt, HPET_T1_CMP); | |
1044 | hpet_t1_cmp = cnt; | |
1045 | ||
1046 | cfg = hpet_readl(HPET_T1_CFG); | |
1047 | cfg &= ~HPET_TN_PERIODIC; | |
1048 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | |
1049 | hpet_writel(cfg, HPET_T1_CFG); | |
1050 | ||
1051 | local_irq_restore(flags); | |
1052 | ||
1053 | return 1; | |
1054 | } | |
1055 | EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); | |
1056 | ||
1057 | /* | |
1058 | * The functions below are called from rtc driver. | |
1059 | * Return 0 if HPET is not being used. | |
1060 | * Otherwise do the necessary changes and return 1. | |
1061 | */ | |
1062 | int hpet_mask_rtc_irq_bit(unsigned long bit_mask) | |
1063 | { | |
1064 | if (!is_hpet_enabled()) | |
1065 | return 0; | |
1066 | ||
1067 | hpet_rtc_flags &= ~bit_mask; | |
1068 | return 1; | |
1069 | } | |
1070 | EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); | |
1071 | ||
1072 | int hpet_set_rtc_irq_bit(unsigned long bit_mask) | |
1073 | { | |
1074 | unsigned long oldbits = hpet_rtc_flags; | |
1075 | ||
1076 | if (!is_hpet_enabled()) | |
1077 | return 0; | |
1078 | ||
1079 | hpet_rtc_flags |= bit_mask; | |
1080 | ||
1081 | if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) | |
1082 | hpet_prev_update_sec = -1; | |
1083 | ||
1084 | if (!oldbits) | |
1085 | hpet_rtc_timer_init(); | |
1086 | ||
1087 | return 1; | |
1088 | } | |
1089 | EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); | |
1090 | ||
1091 | int hpet_set_alarm_time(unsigned char hrs, unsigned char min, | |
1092 | unsigned char sec) | |
1093 | { | |
1094 | if (!is_hpet_enabled()) | |
1095 | return 0; | |
1096 | ||
1097 | hpet_alarm_time.tm_hour = hrs; | |
1098 | hpet_alarm_time.tm_min = min; | |
1099 | hpet_alarm_time.tm_sec = sec; | |
1100 | ||
1101 | return 1; | |
1102 | } | |
1103 | EXPORT_SYMBOL_GPL(hpet_set_alarm_time); | |
1104 | ||
1105 | int hpet_set_periodic_freq(unsigned long freq) | |
1106 | { | |
1107 | uint64_t clc; | |
1108 | ||
1109 | if (!is_hpet_enabled()) | |
1110 | return 0; | |
1111 | ||
1112 | if (freq <= DEFAULT_RTC_INT_FREQ) | |
1113 | hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; | |
1114 | else { | |
1115 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; | |
1116 | do_div(clc, freq); | |
1117 | clc >>= hpet_clockevent.shift; | |
1118 | hpet_pie_delta = clc; | |
1119 | hpet_pie_limit = 0; | |
1120 | } | |
1121 | return 1; | |
1122 | } | |
1123 | EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); | |
1124 | ||
1125 | int hpet_rtc_dropped_irq(void) | |
1126 | { | |
1127 | return is_hpet_enabled(); | |
1128 | } | |
1129 | EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); | |
1130 | ||
1131 | static void hpet_rtc_timer_reinit(void) | |
1132 | { | |
1133 | unsigned int cfg, delta; | |
1134 | int lost_ints = -1; | |
1135 | ||
1136 | if (unlikely(!hpet_rtc_flags)) { | |
1137 | cfg = hpet_readl(HPET_T1_CFG); | |
1138 | cfg &= ~HPET_TN_ENABLE; | |
1139 | hpet_writel(cfg, HPET_T1_CFG); | |
1140 | return; | |
1141 | } | |
1142 | ||
1143 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) | |
1144 | delta = hpet_default_delta; | |
1145 | else | |
1146 | delta = hpet_pie_delta; | |
1147 | ||
1148 | /* | |
1149 | * Increment the comparator value until we are ahead of the | |
1150 | * current count. | |
1151 | */ | |
1152 | do { | |
1153 | hpet_t1_cmp += delta; | |
1154 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | |
1155 | lost_ints++; | |
1156 | } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); | |
1157 | ||
1158 | if (lost_ints) { | |
1159 | if (hpet_rtc_flags & RTC_PIE) | |
1160 | hpet_pie_count += lost_ints; | |
1161 | if (printk_ratelimit()) | |
1162 | printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n", | |
1163 | lost_ints); | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | |
1168 | { | |
1169 | struct rtc_time curr_time; | |
1170 | unsigned long rtc_int_flag = 0; | |
1171 | ||
1172 | hpet_rtc_timer_reinit(); | |
1173 | memset(&curr_time, 0, sizeof(struct rtc_time)); | |
1174 | ||
1175 | if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) | |
1176 | get_rtc_time(&curr_time); | |
1177 | ||
1178 | if (hpet_rtc_flags & RTC_UIE && | |
1179 | curr_time.tm_sec != hpet_prev_update_sec) { | |
1180 | if (hpet_prev_update_sec >= 0) | |
1181 | rtc_int_flag = RTC_UF; | |
1182 | hpet_prev_update_sec = curr_time.tm_sec; | |
1183 | } | |
1184 | ||
1185 | if (hpet_rtc_flags & RTC_PIE && | |
1186 | ++hpet_pie_count >= hpet_pie_limit) { | |
1187 | rtc_int_flag |= RTC_PF; | |
1188 | hpet_pie_count = 0; | |
1189 | } | |
1190 | ||
1191 | if (hpet_rtc_flags & RTC_AIE && | |
1192 | (curr_time.tm_sec == hpet_alarm_time.tm_sec) && | |
1193 | (curr_time.tm_min == hpet_alarm_time.tm_min) && | |
1194 | (curr_time.tm_hour == hpet_alarm_time.tm_hour)) | |
1195 | rtc_int_flag |= RTC_AF; | |
1196 | ||
1197 | if (rtc_int_flag) { | |
1198 | rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); | |
1199 | if (irq_handler) | |
1200 | irq_handler(rtc_int_flag, dev_id); | |
1201 | } | |
1202 | return IRQ_HANDLED; | |
1203 | } | |
1204 | EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); | |
1205 | #endif |