]>
Commit | Line | Data |
---|---|---|
1 | #include <linux/clocksource.h> | |
2 | #include <linux/clockchips.h> | |
3 | #include <linux/interrupt.h> | |
4 | #include <linux/sysdev.h> | |
5 | #include <linux/delay.h> | |
6 | #include <linux/errno.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/hpet.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/pm.h> | |
12 | #include <linux/io.h> | |
13 | ||
14 | #include <asm/fixmap.h> | |
15 | #include <asm/i8253.h> | |
16 | #include <asm/hpet.h> | |
17 | ||
18 | #define HPET_MASK CLOCKSOURCE_MASK(32) | |
19 | #define HPET_SHIFT 22 | |
20 | ||
21 | /* FSEC = 10^-15 | |
22 | NSEC = 10^-9 */ | |
23 | #define FSEC_PER_NSEC 1000000L | |
24 | ||
25 | #define HPET_DEV_USED_BIT 2 | |
26 | #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) | |
27 | #define HPET_DEV_VALID 0x8 | |
28 | #define HPET_DEV_FSB_CAP 0x1000 | |
29 | #define HPET_DEV_PERI_CAP 0x2000 | |
30 | ||
31 | #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) | |
32 | ||
33 | /* | |
34 | * HPET address is set in acpi/boot.c, when an ACPI entry exists | |
35 | */ | |
36 | unsigned long hpet_address; | |
37 | u8 hpet_blockid; /* OS timer block num */ | |
38 | u8 hpet_msi_disable; | |
39 | ||
40 | #ifdef CONFIG_PCI_MSI | |
41 | static unsigned long hpet_num_timers; | |
42 | #endif | |
43 | static void __iomem *hpet_virt_address; | |
44 | ||
45 | struct hpet_dev { | |
46 | struct clock_event_device evt; | |
47 | unsigned int num; | |
48 | int cpu; | |
49 | unsigned int irq; | |
50 | unsigned int flags; | |
51 | char name[10]; | |
52 | }; | |
53 | ||
54 | inline unsigned int hpet_readl(unsigned int a) | |
55 | { | |
56 | return readl(hpet_virt_address + a); | |
57 | } | |
58 | ||
59 | static inline void hpet_writel(unsigned int d, unsigned int a) | |
60 | { | |
61 | writel(d, hpet_virt_address + a); | |
62 | } | |
63 | ||
64 | #ifdef CONFIG_X86_64 | |
65 | #include <asm/pgtable.h> | |
66 | #endif | |
67 | ||
68 | static inline void hpet_set_mapping(void) | |
69 | { | |
70 | hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); | |
71 | #ifdef CONFIG_X86_64 | |
72 | __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); | |
73 | #endif | |
74 | } | |
75 | ||
76 | static inline void hpet_clear_mapping(void) | |
77 | { | |
78 | iounmap(hpet_virt_address); | |
79 | hpet_virt_address = NULL; | |
80 | } | |
81 | ||
82 | /* | |
83 | * HPET command line enable / disable | |
84 | */ | |
85 | static int boot_hpet_disable; | |
86 | int hpet_force_user; | |
87 | static int hpet_verbose; | |
88 | ||
89 | static int __init hpet_setup(char *str) | |
90 | { | |
91 | if (str) { | |
92 | if (!strncmp("disable", str, 7)) | |
93 | boot_hpet_disable = 1; | |
94 | if (!strncmp("force", str, 5)) | |
95 | hpet_force_user = 1; | |
96 | if (!strncmp("verbose", str, 7)) | |
97 | hpet_verbose = 1; | |
98 | } | |
99 | return 1; | |
100 | } | |
101 | __setup("hpet=", hpet_setup); | |
102 | ||
103 | static int __init disable_hpet(char *str) | |
104 | { | |
105 | boot_hpet_disable = 1; | |
106 | return 1; | |
107 | } | |
108 | __setup("nohpet", disable_hpet); | |
109 | ||
110 | static inline int is_hpet_capable(void) | |
111 | { | |
112 | return !boot_hpet_disable && hpet_address; | |
113 | } | |
114 | ||
115 | /* | |
116 | * HPET timer interrupt enable / disable | |
117 | */ | |
118 | static int hpet_legacy_int_enabled; | |
119 | ||
120 | /** | |
121 | * is_hpet_enabled - check whether the hpet timer interrupt is enabled | |
122 | */ | |
123 | int is_hpet_enabled(void) | |
124 | { | |
125 | return is_hpet_capable() && hpet_legacy_int_enabled; | |
126 | } | |
127 | EXPORT_SYMBOL_GPL(is_hpet_enabled); | |
128 | ||
129 | static void _hpet_print_config(const char *function, int line) | |
130 | { | |
131 | u32 i, timers, l, h; | |
132 | printk(KERN_INFO "hpet: %s(%d):\n", function, line); | |
133 | l = hpet_readl(HPET_ID); | |
134 | h = hpet_readl(HPET_PERIOD); | |
135 | timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; | |
136 | printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); | |
137 | l = hpet_readl(HPET_CFG); | |
138 | h = hpet_readl(HPET_STATUS); | |
139 | printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); | |
140 | l = hpet_readl(HPET_COUNTER); | |
141 | h = hpet_readl(HPET_COUNTER+4); | |
142 | printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); | |
143 | ||
144 | for (i = 0; i < timers; i++) { | |
145 | l = hpet_readl(HPET_Tn_CFG(i)); | |
146 | h = hpet_readl(HPET_Tn_CFG(i)+4); | |
147 | printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", | |
148 | i, l, h); | |
149 | l = hpet_readl(HPET_Tn_CMP(i)); | |
150 | h = hpet_readl(HPET_Tn_CMP(i)+4); | |
151 | printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", | |
152 | i, l, h); | |
153 | l = hpet_readl(HPET_Tn_ROUTE(i)); | |
154 | h = hpet_readl(HPET_Tn_ROUTE(i)+4); | |
155 | printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", | |
156 | i, l, h); | |
157 | } | |
158 | } | |
159 | ||
160 | #define hpet_print_config() \ | |
161 | do { \ | |
162 | if (hpet_verbose) \ | |
163 | _hpet_print_config(__FUNCTION__, __LINE__); \ | |
164 | } while (0) | |
165 | ||
166 | /* | |
167 | * When the hpet driver (/dev/hpet) is enabled, we need to reserve | |
168 | * timer 0 and timer 1 in case of RTC emulation. | |
169 | */ | |
170 | #ifdef CONFIG_HPET | |
171 | ||
172 | static void hpet_reserve_msi_timers(struct hpet_data *hd); | |
173 | ||
174 | static void hpet_reserve_platform_timers(unsigned int id) | |
175 | { | |
176 | struct hpet __iomem *hpet = hpet_virt_address; | |
177 | struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; | |
178 | unsigned int nrtimers, i; | |
179 | struct hpet_data hd; | |
180 | ||
181 | nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; | |
182 | ||
183 | memset(&hd, 0, sizeof(hd)); | |
184 | hd.hd_phys_address = hpet_address; | |
185 | hd.hd_address = hpet; | |
186 | hd.hd_nirqs = nrtimers; | |
187 | hpet_reserve_timer(&hd, 0); | |
188 | ||
189 | #ifdef CONFIG_HPET_EMULATE_RTC | |
190 | hpet_reserve_timer(&hd, 1); | |
191 | #endif | |
192 | ||
193 | /* | |
194 | * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 | |
195 | * is wrong for i8259!) not the output IRQ. Many BIOS writers | |
196 | * don't bother configuring *any* comparator interrupts. | |
197 | */ | |
198 | hd.hd_irq[0] = HPET_LEGACY_8254; | |
199 | hd.hd_irq[1] = HPET_LEGACY_RTC; | |
200 | ||
201 | for (i = 2; i < nrtimers; timer++, i++) { | |
202 | hd.hd_irq[i] = (readl(&timer->hpet_config) & | |
203 | Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; | |
204 | } | |
205 | ||
206 | hpet_reserve_msi_timers(&hd); | |
207 | ||
208 | hpet_alloc(&hd); | |
209 | ||
210 | } | |
211 | #else | |
212 | static void hpet_reserve_platform_timers(unsigned int id) { } | |
213 | #endif | |
214 | ||
215 | /* | |
216 | * Common hpet info | |
217 | */ | |
218 | static unsigned long hpet_period; | |
219 | ||
220 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | |
221 | struct clock_event_device *evt); | |
222 | static int hpet_legacy_next_event(unsigned long delta, | |
223 | struct clock_event_device *evt); | |
224 | ||
225 | /* | |
226 | * The hpet clock event device | |
227 | */ | |
228 | static struct clock_event_device hpet_clockevent = { | |
229 | .name = "hpet", | |
230 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
231 | .set_mode = hpet_legacy_set_mode, | |
232 | .set_next_event = hpet_legacy_next_event, | |
233 | .shift = 32, | |
234 | .irq = 0, | |
235 | .rating = 50, | |
236 | }; | |
237 | ||
238 | static void hpet_stop_counter(void) | |
239 | { | |
240 | unsigned long cfg = hpet_readl(HPET_CFG); | |
241 | cfg &= ~HPET_CFG_ENABLE; | |
242 | hpet_writel(cfg, HPET_CFG); | |
243 | } | |
244 | ||
245 | static void hpet_reset_counter(void) | |
246 | { | |
247 | hpet_writel(0, HPET_COUNTER); | |
248 | hpet_writel(0, HPET_COUNTER + 4); | |
249 | } | |
250 | ||
251 | static void hpet_start_counter(void) | |
252 | { | |
253 | unsigned int cfg = hpet_readl(HPET_CFG); | |
254 | cfg |= HPET_CFG_ENABLE; | |
255 | hpet_writel(cfg, HPET_CFG); | |
256 | } | |
257 | ||
258 | static void hpet_restart_counter(void) | |
259 | { | |
260 | hpet_stop_counter(); | |
261 | hpet_reset_counter(); | |
262 | hpet_start_counter(); | |
263 | } | |
264 | ||
265 | static void hpet_resume_device(void) | |
266 | { | |
267 | force_hpet_resume(); | |
268 | } | |
269 | ||
270 | static void hpet_resume_counter(struct clocksource *cs) | |
271 | { | |
272 | hpet_resume_device(); | |
273 | hpet_restart_counter(); | |
274 | } | |
275 | ||
276 | static void hpet_enable_legacy_int(void) | |
277 | { | |
278 | unsigned int cfg = hpet_readl(HPET_CFG); | |
279 | ||
280 | cfg |= HPET_CFG_LEGACY; | |
281 | hpet_writel(cfg, HPET_CFG); | |
282 | hpet_legacy_int_enabled = 1; | |
283 | } | |
284 | ||
285 | static void hpet_legacy_clockevent_register(void) | |
286 | { | |
287 | /* Start HPET legacy interrupts */ | |
288 | hpet_enable_legacy_int(); | |
289 | ||
290 | /* | |
291 | * The mult factor is defined as (include/linux/clockchips.h) | |
292 | * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) | |
293 | * hpet_period is in units of femtoseconds (per cycle), so | |
294 | * mult/2^shift = cyc/ns = 10^6/hpet_period | |
295 | * mult = (10^6 * 2^shift)/hpet_period | |
296 | * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period | |
297 | */ | |
298 | hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, | |
299 | hpet_period, hpet_clockevent.shift); | |
300 | /* Calculate the min / max delta */ | |
301 | hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, | |
302 | &hpet_clockevent); | |
303 | /* 5 usec minimum reprogramming delta. */ | |
304 | hpet_clockevent.min_delta_ns = 5000; | |
305 | ||
306 | /* | |
307 | * Start hpet with the boot cpu mask and make it | |
308 | * global after the IO_APIC has been initialized. | |
309 | */ | |
310 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); | |
311 | clockevents_register_device(&hpet_clockevent); | |
312 | global_clock_event = &hpet_clockevent; | |
313 | printk(KERN_DEBUG "hpet clockevent registered\n"); | |
314 | } | |
315 | ||
316 | static int hpet_setup_msi_irq(unsigned int irq); | |
317 | ||
318 | static void hpet_set_mode(enum clock_event_mode mode, | |
319 | struct clock_event_device *evt, int timer) | |
320 | { | |
321 | unsigned int cfg, cmp, now; | |
322 | uint64_t delta; | |
323 | ||
324 | switch (mode) { | |
325 | case CLOCK_EVT_MODE_PERIODIC: | |
326 | hpet_stop_counter(); | |
327 | delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; | |
328 | delta >>= evt->shift; | |
329 | now = hpet_readl(HPET_COUNTER); | |
330 | cmp = now + (unsigned int) delta; | |
331 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | |
332 | /* Make sure we use edge triggered interrupts */ | |
333 | cfg &= ~HPET_TN_LEVEL; | |
334 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | |
335 | HPET_TN_SETVAL | HPET_TN_32BIT; | |
336 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | |
337 | hpet_writel(cmp, HPET_Tn_CMP(timer)); | |
338 | udelay(1); | |
339 | /* | |
340 | * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL | |
341 | * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL | |
342 | * bit is automatically cleared after the first write. | |
343 | * (See AMD-8111 HyperTransport I/O Hub Data Sheet, | |
344 | * Publication # 24674) | |
345 | */ | |
346 | hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer)); | |
347 | hpet_start_counter(); | |
348 | hpet_print_config(); | |
349 | break; | |
350 | ||
351 | case CLOCK_EVT_MODE_ONESHOT: | |
352 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | |
353 | cfg &= ~HPET_TN_PERIODIC; | |
354 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | |
355 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | |
356 | break; | |
357 | ||
358 | case CLOCK_EVT_MODE_UNUSED: | |
359 | case CLOCK_EVT_MODE_SHUTDOWN: | |
360 | cfg = hpet_readl(HPET_Tn_CFG(timer)); | |
361 | cfg &= ~HPET_TN_ENABLE; | |
362 | hpet_writel(cfg, HPET_Tn_CFG(timer)); | |
363 | break; | |
364 | ||
365 | case CLOCK_EVT_MODE_RESUME: | |
366 | if (timer == 0) { | |
367 | hpet_enable_legacy_int(); | |
368 | } else { | |
369 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | |
370 | hpet_setup_msi_irq(hdev->irq); | |
371 | disable_irq(hdev->irq); | |
372 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); | |
373 | enable_irq(hdev->irq); | |
374 | } | |
375 | hpet_print_config(); | |
376 | break; | |
377 | } | |
378 | } | |
379 | ||
380 | static int hpet_next_event(unsigned long delta, | |
381 | struct clock_event_device *evt, int timer) | |
382 | { | |
383 | u32 cnt; | |
384 | ||
385 | cnt = hpet_readl(HPET_COUNTER); | |
386 | cnt += (u32) delta; | |
387 | hpet_writel(cnt, HPET_Tn_CMP(timer)); | |
388 | ||
389 | /* | |
390 | * We need to read back the CMP register on certain HPET | |
391 | * implementations (ATI chipsets) which seem to delay the | |
392 | * transfer of the compare register into the internal compare | |
393 | * logic. With small deltas this might actually be too late as | |
394 | * the counter could already be higher than the compare value | |
395 | * at that point and we would wait for the next hpet interrupt | |
396 | * forever. We found out that reading the CMP register back | |
397 | * forces the transfer so we can rely on the comparison with | |
398 | * the counter register below. If the read back from the | |
399 | * compare register does not match the value we programmed | |
400 | * then we might have a real hardware problem. We can not do | |
401 | * much about it here, but at least alert the user/admin with | |
402 | * a prominent warning. | |
403 | */ | |
404 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | |
405 | KERN_WARNING "hpet: compare register read back failed.\n"); | |
406 | ||
407 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | |
408 | } | |
409 | ||
410 | static void hpet_legacy_set_mode(enum clock_event_mode mode, | |
411 | struct clock_event_device *evt) | |
412 | { | |
413 | hpet_set_mode(mode, evt, 0); | |
414 | } | |
415 | ||
416 | static int hpet_legacy_next_event(unsigned long delta, | |
417 | struct clock_event_device *evt) | |
418 | { | |
419 | return hpet_next_event(delta, evt, 0); | |
420 | } | |
421 | ||
422 | /* | |
423 | * HPET MSI Support | |
424 | */ | |
425 | #ifdef CONFIG_PCI_MSI | |
426 | ||
427 | static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); | |
428 | static struct hpet_dev *hpet_devs; | |
429 | ||
430 | void hpet_msi_unmask(unsigned int irq) | |
431 | { | |
432 | struct hpet_dev *hdev = get_irq_data(irq); | |
433 | unsigned int cfg; | |
434 | ||
435 | /* unmask it */ | |
436 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | |
437 | cfg |= HPET_TN_FSB; | |
438 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | |
439 | } | |
440 | ||
441 | void hpet_msi_mask(unsigned int irq) | |
442 | { | |
443 | unsigned int cfg; | |
444 | struct hpet_dev *hdev = get_irq_data(irq); | |
445 | ||
446 | /* mask it */ | |
447 | cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); | |
448 | cfg &= ~HPET_TN_FSB; | |
449 | hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); | |
450 | } | |
451 | ||
452 | void hpet_msi_write(unsigned int irq, struct msi_msg *msg) | |
453 | { | |
454 | struct hpet_dev *hdev = get_irq_data(irq); | |
455 | ||
456 | hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); | |
457 | hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); | |
458 | } | |
459 | ||
460 | void hpet_msi_read(unsigned int irq, struct msi_msg *msg) | |
461 | { | |
462 | struct hpet_dev *hdev = get_irq_data(irq); | |
463 | ||
464 | msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); | |
465 | msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); | |
466 | msg->address_hi = 0; | |
467 | } | |
468 | ||
469 | static void hpet_msi_set_mode(enum clock_event_mode mode, | |
470 | struct clock_event_device *evt) | |
471 | { | |
472 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | |
473 | hpet_set_mode(mode, evt, hdev->num); | |
474 | } | |
475 | ||
476 | static int hpet_msi_next_event(unsigned long delta, | |
477 | struct clock_event_device *evt) | |
478 | { | |
479 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | |
480 | return hpet_next_event(delta, evt, hdev->num); | |
481 | } | |
482 | ||
483 | static int hpet_setup_msi_irq(unsigned int irq) | |
484 | { | |
485 | if (arch_setup_hpet_msi(irq, hpet_blockid)) { | |
486 | destroy_irq(irq); | |
487 | return -EINVAL; | |
488 | } | |
489 | return 0; | |
490 | } | |
491 | ||
492 | static int hpet_assign_irq(struct hpet_dev *dev) | |
493 | { | |
494 | unsigned int irq; | |
495 | ||
496 | irq = create_irq(); | |
497 | if (!irq) | |
498 | return -EINVAL; | |
499 | ||
500 | set_irq_data(irq, dev); | |
501 | ||
502 | if (hpet_setup_msi_irq(irq)) | |
503 | return -EINVAL; | |
504 | ||
505 | dev->irq = irq; | |
506 | return 0; | |
507 | } | |
508 | ||
509 | static irqreturn_t hpet_interrupt_handler(int irq, void *data) | |
510 | { | |
511 | struct hpet_dev *dev = (struct hpet_dev *)data; | |
512 | struct clock_event_device *hevt = &dev->evt; | |
513 | ||
514 | if (!hevt->event_handler) { | |
515 | printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n", | |
516 | dev->num); | |
517 | return IRQ_HANDLED; | |
518 | } | |
519 | ||
520 | hevt->event_handler(hevt); | |
521 | return IRQ_HANDLED; | |
522 | } | |
523 | ||
524 | static int hpet_setup_irq(struct hpet_dev *dev) | |
525 | { | |
526 | ||
527 | if (request_irq(dev->irq, hpet_interrupt_handler, | |
528 | IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, | |
529 | dev->name, dev)) | |
530 | return -1; | |
531 | ||
532 | disable_irq(dev->irq); | |
533 | irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); | |
534 | enable_irq(dev->irq); | |
535 | ||
536 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | |
537 | dev->name, dev->irq); | |
538 | ||
539 | return 0; | |
540 | } | |
541 | ||
542 | /* This should be called in specific @cpu */ | |
543 | static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |
544 | { | |
545 | struct clock_event_device *evt = &hdev->evt; | |
546 | uint64_t hpet_freq; | |
547 | ||
548 | WARN_ON(cpu != smp_processor_id()); | |
549 | if (!(hdev->flags & HPET_DEV_VALID)) | |
550 | return; | |
551 | ||
552 | if (hpet_setup_msi_irq(hdev->irq)) | |
553 | return; | |
554 | ||
555 | hdev->cpu = cpu; | |
556 | per_cpu(cpu_hpet_dev, cpu) = hdev; | |
557 | evt->name = hdev->name; | |
558 | hpet_setup_irq(hdev); | |
559 | evt->irq = hdev->irq; | |
560 | ||
561 | evt->rating = 110; | |
562 | evt->features = CLOCK_EVT_FEAT_ONESHOT; | |
563 | if (hdev->flags & HPET_DEV_PERI_CAP) | |
564 | evt->features |= CLOCK_EVT_FEAT_PERIODIC; | |
565 | ||
566 | evt->set_mode = hpet_msi_set_mode; | |
567 | evt->set_next_event = hpet_msi_next_event; | |
568 | evt->shift = 32; | |
569 | ||
570 | /* | |
571 | * The period is a femto seconds value. We need to calculate the | |
572 | * scaled math multiplication factor for nanosecond to hpet tick | |
573 | * conversion. | |
574 | */ | |
575 | hpet_freq = 1000000000000000ULL; | |
576 | do_div(hpet_freq, hpet_period); | |
577 | evt->mult = div_sc((unsigned long) hpet_freq, | |
578 | NSEC_PER_SEC, evt->shift); | |
579 | /* Calculate the max delta */ | |
580 | evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); | |
581 | /* 5 usec minimum reprogramming delta. */ | |
582 | evt->min_delta_ns = 5000; | |
583 | ||
584 | evt->cpumask = cpumask_of(hdev->cpu); | |
585 | clockevents_register_device(evt); | |
586 | } | |
587 | ||
588 | #ifdef CONFIG_HPET | |
589 | /* Reserve at least one timer for userspace (/dev/hpet) */ | |
590 | #define RESERVE_TIMERS 1 | |
591 | #else | |
592 | #define RESERVE_TIMERS 0 | |
593 | #endif | |
594 | ||
595 | static void hpet_msi_capability_lookup(unsigned int start_timer) | |
596 | { | |
597 | unsigned int id; | |
598 | unsigned int num_timers; | |
599 | unsigned int num_timers_used = 0; | |
600 | int i; | |
601 | ||
602 | if (hpet_msi_disable) | |
603 | return; | |
604 | ||
605 | if (boot_cpu_has(X86_FEATURE_ARAT)) | |
606 | return; | |
607 | id = hpet_readl(HPET_ID); | |
608 | ||
609 | num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); | |
610 | num_timers++; /* Value read out starts from 0 */ | |
611 | hpet_print_config(); | |
612 | ||
613 | hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); | |
614 | if (!hpet_devs) | |
615 | return; | |
616 | ||
617 | hpet_num_timers = num_timers; | |
618 | ||
619 | for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { | |
620 | struct hpet_dev *hdev = &hpet_devs[num_timers_used]; | |
621 | unsigned int cfg = hpet_readl(HPET_Tn_CFG(i)); | |
622 | ||
623 | /* Only consider HPET timer with MSI support */ | |
624 | if (!(cfg & HPET_TN_FSB_CAP)) | |
625 | continue; | |
626 | ||
627 | hdev->flags = 0; | |
628 | if (cfg & HPET_TN_PERIODIC_CAP) | |
629 | hdev->flags |= HPET_DEV_PERI_CAP; | |
630 | hdev->num = i; | |
631 | ||
632 | sprintf(hdev->name, "hpet%d", i); | |
633 | if (hpet_assign_irq(hdev)) | |
634 | continue; | |
635 | ||
636 | hdev->flags |= HPET_DEV_FSB_CAP; | |
637 | hdev->flags |= HPET_DEV_VALID; | |
638 | num_timers_used++; | |
639 | if (num_timers_used == num_possible_cpus()) | |
640 | break; | |
641 | } | |
642 | ||
643 | printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n", | |
644 | num_timers, num_timers_used); | |
645 | } | |
646 | ||
647 | #ifdef CONFIG_HPET | |
648 | static void hpet_reserve_msi_timers(struct hpet_data *hd) | |
649 | { | |
650 | int i; | |
651 | ||
652 | if (!hpet_devs) | |
653 | return; | |
654 | ||
655 | for (i = 0; i < hpet_num_timers; i++) { | |
656 | struct hpet_dev *hdev = &hpet_devs[i]; | |
657 | ||
658 | if (!(hdev->flags & HPET_DEV_VALID)) | |
659 | continue; | |
660 | ||
661 | hd->hd_irq[hdev->num] = hdev->irq; | |
662 | hpet_reserve_timer(hd, hdev->num); | |
663 | } | |
664 | } | |
665 | #endif | |
666 | ||
667 | static struct hpet_dev *hpet_get_unused_timer(void) | |
668 | { | |
669 | int i; | |
670 | ||
671 | if (!hpet_devs) | |
672 | return NULL; | |
673 | ||
674 | for (i = 0; i < hpet_num_timers; i++) { | |
675 | struct hpet_dev *hdev = &hpet_devs[i]; | |
676 | ||
677 | if (!(hdev->flags & HPET_DEV_VALID)) | |
678 | continue; | |
679 | if (test_and_set_bit(HPET_DEV_USED_BIT, | |
680 | (unsigned long *)&hdev->flags)) | |
681 | continue; | |
682 | return hdev; | |
683 | } | |
684 | return NULL; | |
685 | } | |
686 | ||
687 | struct hpet_work_struct { | |
688 | struct delayed_work work; | |
689 | struct completion complete; | |
690 | }; | |
691 | ||
692 | static void hpet_work(struct work_struct *w) | |
693 | { | |
694 | struct hpet_dev *hdev; | |
695 | int cpu = smp_processor_id(); | |
696 | struct hpet_work_struct *hpet_work; | |
697 | ||
698 | hpet_work = container_of(w, struct hpet_work_struct, work.work); | |
699 | ||
700 | hdev = hpet_get_unused_timer(); | |
701 | if (hdev) | |
702 | init_one_hpet_msi_clockevent(hdev, cpu); | |
703 | ||
704 | complete(&hpet_work->complete); | |
705 | } | |
706 | ||
707 | static int hpet_cpuhp_notify(struct notifier_block *n, | |
708 | unsigned long action, void *hcpu) | |
709 | { | |
710 | unsigned long cpu = (unsigned long)hcpu; | |
711 | struct hpet_work_struct work; | |
712 | struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); | |
713 | ||
714 | switch (action & 0xf) { | |
715 | case CPU_ONLINE: | |
716 | INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); | |
717 | init_completion(&work.complete); | |
718 | /* FIXME: add schedule_work_on() */ | |
719 | schedule_delayed_work_on(cpu, &work.work, 0); | |
720 | wait_for_completion(&work.complete); | |
721 | destroy_timer_on_stack(&work.work.timer); | |
722 | break; | |
723 | case CPU_DEAD: | |
724 | if (hdev) { | |
725 | free_irq(hdev->irq, hdev); | |
726 | hdev->flags &= ~HPET_DEV_USED; | |
727 | per_cpu(cpu_hpet_dev, cpu) = NULL; | |
728 | } | |
729 | break; | |
730 | } | |
731 | return NOTIFY_OK; | |
732 | } | |
733 | #else | |
734 | ||
735 | static int hpet_setup_msi_irq(unsigned int irq) | |
736 | { | |
737 | return 0; | |
738 | } | |
739 | static void hpet_msi_capability_lookup(unsigned int start_timer) | |
740 | { | |
741 | return; | |
742 | } | |
743 | ||
744 | #ifdef CONFIG_HPET | |
745 | static void hpet_reserve_msi_timers(struct hpet_data *hd) | |
746 | { | |
747 | return; | |
748 | } | |
749 | #endif | |
750 | ||
751 | static int hpet_cpuhp_notify(struct notifier_block *n, | |
752 | unsigned long action, void *hcpu) | |
753 | { | |
754 | return NOTIFY_OK; | |
755 | } | |
756 | ||
757 | #endif | |
758 | ||
759 | /* | |
760 | * Clock source related code | |
761 | */ | |
762 | static cycle_t read_hpet(struct clocksource *cs) | |
763 | { | |
764 | return (cycle_t)hpet_readl(HPET_COUNTER); | |
765 | } | |
766 | ||
767 | #ifdef CONFIG_X86_64 | |
768 | static cycle_t __vsyscall_fn vread_hpet(void) | |
769 | { | |
770 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); | |
771 | } | |
772 | #endif | |
773 | ||
774 | static struct clocksource clocksource_hpet = { | |
775 | .name = "hpet", | |
776 | .rating = 250, | |
777 | .read = read_hpet, | |
778 | .mask = HPET_MASK, | |
779 | .shift = HPET_SHIFT, | |
780 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
781 | .resume = hpet_resume_counter, | |
782 | #ifdef CONFIG_X86_64 | |
783 | .vread = vread_hpet, | |
784 | #endif | |
785 | }; | |
786 | ||
787 | static int hpet_clocksource_register(void) | |
788 | { | |
789 | u64 start, now; | |
790 | cycle_t t1; | |
791 | ||
792 | /* Start the counter */ | |
793 | hpet_restart_counter(); | |
794 | ||
795 | /* Verify whether hpet counter works */ | |
796 | t1 = hpet_readl(HPET_COUNTER); | |
797 | rdtscll(start); | |
798 | ||
799 | /* | |
800 | * We don't know the TSC frequency yet, but waiting for | |
801 | * 200000 TSC cycles is safe: | |
802 | * 4 GHz == 50us | |
803 | * 1 GHz == 200us | |
804 | */ | |
805 | do { | |
806 | rep_nop(); | |
807 | rdtscll(now); | |
808 | } while ((now - start) < 200000UL); | |
809 | ||
810 | if (t1 == hpet_readl(HPET_COUNTER)) { | |
811 | printk(KERN_WARNING | |
812 | "HPET counter not counting. HPET disabled\n"); | |
813 | return -ENODEV; | |
814 | } | |
815 | ||
816 | /* | |
817 | * The definition of mult is (include/linux/clocksource.h) | |
818 | * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc | |
819 | * so we first need to convert hpet_period to ns/cyc units: | |
820 | * mult/2^shift = ns/cyc = hpet_period/10^6 | |
821 | * mult = (hpet_period * 2^shift)/10^6 | |
822 | * mult = (hpet_period << shift)/FSEC_PER_NSEC | |
823 | */ | |
824 | clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT); | |
825 | ||
826 | clocksource_register(&clocksource_hpet); | |
827 | ||
828 | return 0; | |
829 | } | |
830 | ||
831 | /** | |
832 | * hpet_enable - Try to setup the HPET timer. Returns 1 on success. | |
833 | */ | |
834 | int __init hpet_enable(void) | |
835 | { | |
836 | unsigned int id; | |
837 | int i; | |
838 | ||
839 | if (!is_hpet_capable()) | |
840 | return 0; | |
841 | ||
842 | hpet_set_mapping(); | |
843 | ||
844 | /* | |
845 | * Read the period and check for a sane value: | |
846 | */ | |
847 | hpet_period = hpet_readl(HPET_PERIOD); | |
848 | ||
849 | /* | |
850 | * AMD SB700 based systems with spread spectrum enabled use a | |
851 | * SMM based HPET emulation to provide proper frequency | |
852 | * setting. The SMM code is initialized with the first HPET | |
853 | * register access and takes some time to complete. During | |
854 | * this time the config register reads 0xffffffff. We check | |
855 | * for max. 1000 loops whether the config register reads a non | |
856 | * 0xffffffff value to make sure that HPET is up and running | |
857 | * before we go further. A counting loop is safe, as the HPET | |
858 | * access takes thousands of CPU cycles. On non SB700 based | |
859 | * machines this check is only done once and has no side | |
860 | * effects. | |
861 | */ | |
862 | for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) { | |
863 | if (i == 1000) { | |
864 | printk(KERN_WARNING | |
865 | "HPET config register value = 0xFFFFFFFF. " | |
866 | "Disabling HPET\n"); | |
867 | goto out_nohpet; | |
868 | } | |
869 | } | |
870 | ||
871 | if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) | |
872 | goto out_nohpet; | |
873 | ||
874 | /* | |
875 | * Read the HPET ID register to retrieve the IRQ routing | |
876 | * information and the number of channels | |
877 | */ | |
878 | id = hpet_readl(HPET_ID); | |
879 | hpet_print_config(); | |
880 | ||
881 | #ifdef CONFIG_HPET_EMULATE_RTC | |
882 | /* | |
883 | * The legacy routing mode needs at least two channels, tick timer | |
884 | * and the rtc emulation channel. | |
885 | */ | |
886 | if (!(id & HPET_ID_NUMBER)) | |
887 | goto out_nohpet; | |
888 | #endif | |
889 | ||
890 | if (hpet_clocksource_register()) | |
891 | goto out_nohpet; | |
892 | ||
893 | if (id & HPET_ID_LEGSUP) { | |
894 | hpet_legacy_clockevent_register(); | |
895 | return 1; | |
896 | } | |
897 | return 0; | |
898 | ||
899 | out_nohpet: | |
900 | hpet_clear_mapping(); | |
901 | hpet_address = 0; | |
902 | return 0; | |
903 | } | |
904 | ||
905 | /* | |
906 | * Needs to be late, as the reserve_timer code calls kalloc ! | |
907 | * | |
908 | * Not a problem on i386 as hpet_enable is called from late_time_init, | |
909 | * but on x86_64 it is necessary ! | |
910 | */ | |
911 | static __init int hpet_late_init(void) | |
912 | { | |
913 | int cpu; | |
914 | ||
915 | if (boot_hpet_disable) | |
916 | return -ENODEV; | |
917 | ||
918 | if (!hpet_address) { | |
919 | if (!force_hpet_address) | |
920 | return -ENODEV; | |
921 | ||
922 | hpet_address = force_hpet_address; | |
923 | hpet_enable(); | |
924 | } | |
925 | ||
926 | if (!hpet_virt_address) | |
927 | return -ENODEV; | |
928 | ||
929 | if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) | |
930 | hpet_msi_capability_lookup(2); | |
931 | else | |
932 | hpet_msi_capability_lookup(0); | |
933 | ||
934 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | |
935 | hpet_print_config(); | |
936 | ||
937 | if (hpet_msi_disable) | |
938 | return 0; | |
939 | ||
940 | if (boot_cpu_has(X86_FEATURE_ARAT)) | |
941 | return 0; | |
942 | ||
943 | for_each_online_cpu(cpu) { | |
944 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); | |
945 | } | |
946 | ||
947 | /* This notifier should be called after workqueue is ready */ | |
948 | hotcpu_notifier(hpet_cpuhp_notify, -20); | |
949 | ||
950 | return 0; | |
951 | } | |
952 | fs_initcall(hpet_late_init); | |
953 | ||
954 | void hpet_disable(void) | |
955 | { | |
956 | if (is_hpet_capable()) { | |
957 | unsigned int cfg = hpet_readl(HPET_CFG); | |
958 | ||
959 | if (hpet_legacy_int_enabled) { | |
960 | cfg &= ~HPET_CFG_LEGACY; | |
961 | hpet_legacy_int_enabled = 0; | |
962 | } | |
963 | cfg &= ~HPET_CFG_ENABLE; | |
964 | hpet_writel(cfg, HPET_CFG); | |
965 | } | |
966 | } | |
967 | ||
968 | #ifdef CONFIG_HPET_EMULATE_RTC | |
969 | ||
970 | /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET | |
971 | * is enabled, we support RTC interrupt functionality in software. | |
972 | * RTC has 3 kinds of interrupts: | |
973 | * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock | |
974 | * is updated | |
975 | * 2) Alarm Interrupt - generate an interrupt at a specific time of day | |
976 | * 3) Periodic Interrupt - generate periodic interrupt, with frequencies | |
977 | * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) | |
978 | * (1) and (2) above are implemented using polling at a frequency of | |
979 | * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt | |
980 | * overhead. (DEFAULT_RTC_INT_FREQ) | |
981 | * For (3), we use interrupts at 64Hz or user specified periodic | |
982 | * frequency, whichever is higher. | |
983 | */ | |
984 | #include <linux/mc146818rtc.h> | |
985 | #include <linux/rtc.h> | |
986 | #include <asm/rtc.h> | |
987 | ||
988 | #define DEFAULT_RTC_INT_FREQ 64 | |
989 | #define DEFAULT_RTC_SHIFT 6 | |
990 | #define RTC_NUM_INTS 1 | |
991 | ||
992 | static unsigned long hpet_rtc_flags; | |
993 | static int hpet_prev_update_sec; | |
994 | static struct rtc_time hpet_alarm_time; | |
995 | static unsigned long hpet_pie_count; | |
996 | static u32 hpet_t1_cmp; | |
997 | static u32 hpet_default_delta; | |
998 | static u32 hpet_pie_delta; | |
999 | static unsigned long hpet_pie_limit; | |
1000 | ||
1001 | static rtc_irq_handler irq_handler; | |
1002 | ||
1003 | /* | |
1004 | * Check that the hpet counter c1 is ahead of the c2 | |
1005 | */ | |
1006 | static inline int hpet_cnt_ahead(u32 c1, u32 c2) | |
1007 | { | |
1008 | return (s32)(c2 - c1) < 0; | |
1009 | } | |
1010 | ||
1011 | /* | |
1012 | * Registers a IRQ handler. | |
1013 | */ | |
1014 | int hpet_register_irq_handler(rtc_irq_handler handler) | |
1015 | { | |
1016 | if (!is_hpet_enabled()) | |
1017 | return -ENODEV; | |
1018 | if (irq_handler) | |
1019 | return -EBUSY; | |
1020 | ||
1021 | irq_handler = handler; | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | EXPORT_SYMBOL_GPL(hpet_register_irq_handler); | |
1026 | ||
1027 | /* | |
1028 | * Deregisters the IRQ handler registered with hpet_register_irq_handler() | |
1029 | * and does cleanup. | |
1030 | */ | |
1031 | void hpet_unregister_irq_handler(rtc_irq_handler handler) | |
1032 | { | |
1033 | if (!is_hpet_enabled()) | |
1034 | return; | |
1035 | ||
1036 | irq_handler = NULL; | |
1037 | hpet_rtc_flags = 0; | |
1038 | } | |
1039 | EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); | |
1040 | ||
1041 | /* | |
1042 | * Timer 1 for RTC emulation. We use one shot mode, as periodic mode | |
1043 | * is not supported by all HPET implementations for timer 1. | |
1044 | * | |
1045 | * hpet_rtc_timer_init() is called when the rtc is initialized. | |
1046 | */ | |
1047 | int hpet_rtc_timer_init(void) | |
1048 | { | |
1049 | unsigned int cfg, cnt, delta; | |
1050 | unsigned long flags; | |
1051 | ||
1052 | if (!is_hpet_enabled()) | |
1053 | return 0; | |
1054 | ||
1055 | if (!hpet_default_delta) { | |
1056 | uint64_t clc; | |
1057 | ||
1058 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; | |
1059 | clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; | |
1060 | hpet_default_delta = clc; | |
1061 | } | |
1062 | ||
1063 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) | |
1064 | delta = hpet_default_delta; | |
1065 | else | |
1066 | delta = hpet_pie_delta; | |
1067 | ||
1068 | local_irq_save(flags); | |
1069 | ||
1070 | cnt = delta + hpet_readl(HPET_COUNTER); | |
1071 | hpet_writel(cnt, HPET_T1_CMP); | |
1072 | hpet_t1_cmp = cnt; | |
1073 | ||
1074 | cfg = hpet_readl(HPET_T1_CFG); | |
1075 | cfg &= ~HPET_TN_PERIODIC; | |
1076 | cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; | |
1077 | hpet_writel(cfg, HPET_T1_CFG); | |
1078 | ||
1079 | local_irq_restore(flags); | |
1080 | ||
1081 | return 1; | |
1082 | } | |
1083 | EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); | |
1084 | ||
1085 | /* | |
1086 | * The functions below are called from rtc driver. | |
1087 | * Return 0 if HPET is not being used. | |
1088 | * Otherwise do the necessary changes and return 1. | |
1089 | */ | |
1090 | int hpet_mask_rtc_irq_bit(unsigned long bit_mask) | |
1091 | { | |
1092 | if (!is_hpet_enabled()) | |
1093 | return 0; | |
1094 | ||
1095 | hpet_rtc_flags &= ~bit_mask; | |
1096 | return 1; | |
1097 | } | |
1098 | EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); | |
1099 | ||
1100 | int hpet_set_rtc_irq_bit(unsigned long bit_mask) | |
1101 | { | |
1102 | unsigned long oldbits = hpet_rtc_flags; | |
1103 | ||
1104 | if (!is_hpet_enabled()) | |
1105 | return 0; | |
1106 | ||
1107 | hpet_rtc_flags |= bit_mask; | |
1108 | ||
1109 | if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) | |
1110 | hpet_prev_update_sec = -1; | |
1111 | ||
1112 | if (!oldbits) | |
1113 | hpet_rtc_timer_init(); | |
1114 | ||
1115 | return 1; | |
1116 | } | |
1117 | EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); | |
1118 | ||
1119 | int hpet_set_alarm_time(unsigned char hrs, unsigned char min, | |
1120 | unsigned char sec) | |
1121 | { | |
1122 | if (!is_hpet_enabled()) | |
1123 | return 0; | |
1124 | ||
1125 | hpet_alarm_time.tm_hour = hrs; | |
1126 | hpet_alarm_time.tm_min = min; | |
1127 | hpet_alarm_time.tm_sec = sec; | |
1128 | ||
1129 | return 1; | |
1130 | } | |
1131 | EXPORT_SYMBOL_GPL(hpet_set_alarm_time); | |
1132 | ||
1133 | int hpet_set_periodic_freq(unsigned long freq) | |
1134 | { | |
1135 | uint64_t clc; | |
1136 | ||
1137 | if (!is_hpet_enabled()) | |
1138 | return 0; | |
1139 | ||
1140 | if (freq <= DEFAULT_RTC_INT_FREQ) | |
1141 | hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; | |
1142 | else { | |
1143 | clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; | |
1144 | do_div(clc, freq); | |
1145 | clc >>= hpet_clockevent.shift; | |
1146 | hpet_pie_delta = clc; | |
1147 | } | |
1148 | return 1; | |
1149 | } | |
1150 | EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); | |
1151 | ||
1152 | int hpet_rtc_dropped_irq(void) | |
1153 | { | |
1154 | return is_hpet_enabled(); | |
1155 | } | |
1156 | EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); | |
1157 | ||
1158 | static void hpet_rtc_timer_reinit(void) | |
1159 | { | |
1160 | unsigned int cfg, delta; | |
1161 | int lost_ints = -1; | |
1162 | ||
1163 | if (unlikely(!hpet_rtc_flags)) { | |
1164 | cfg = hpet_readl(HPET_T1_CFG); | |
1165 | cfg &= ~HPET_TN_ENABLE; | |
1166 | hpet_writel(cfg, HPET_T1_CFG); | |
1167 | return; | |
1168 | } | |
1169 | ||
1170 | if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) | |
1171 | delta = hpet_default_delta; | |
1172 | else | |
1173 | delta = hpet_pie_delta; | |
1174 | ||
1175 | /* | |
1176 | * Increment the comparator value until we are ahead of the | |
1177 | * current count. | |
1178 | */ | |
1179 | do { | |
1180 | hpet_t1_cmp += delta; | |
1181 | hpet_writel(hpet_t1_cmp, HPET_T1_CMP); | |
1182 | lost_ints++; | |
1183 | } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); | |
1184 | ||
1185 | if (lost_ints) { | |
1186 | if (hpet_rtc_flags & RTC_PIE) | |
1187 | hpet_pie_count += lost_ints; | |
1188 | if (printk_ratelimit()) | |
1189 | printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n", | |
1190 | lost_ints); | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | |
1195 | { | |
1196 | struct rtc_time curr_time; | |
1197 | unsigned long rtc_int_flag = 0; | |
1198 | ||
1199 | hpet_rtc_timer_reinit(); | |
1200 | memset(&curr_time, 0, sizeof(struct rtc_time)); | |
1201 | ||
1202 | if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) | |
1203 | get_rtc_time(&curr_time); | |
1204 | ||
1205 | if (hpet_rtc_flags & RTC_UIE && | |
1206 | curr_time.tm_sec != hpet_prev_update_sec) { | |
1207 | if (hpet_prev_update_sec >= 0) | |
1208 | rtc_int_flag = RTC_UF; | |
1209 | hpet_prev_update_sec = curr_time.tm_sec; | |
1210 | } | |
1211 | ||
1212 | if (hpet_rtc_flags & RTC_PIE && | |
1213 | ++hpet_pie_count >= hpet_pie_limit) { | |
1214 | rtc_int_flag |= RTC_PF; | |
1215 | hpet_pie_count = 0; | |
1216 | } | |
1217 | ||
1218 | if (hpet_rtc_flags & RTC_AIE && | |
1219 | (curr_time.tm_sec == hpet_alarm_time.tm_sec) && | |
1220 | (curr_time.tm_min == hpet_alarm_time.tm_min) && | |
1221 | (curr_time.tm_hour == hpet_alarm_time.tm_hour)) | |
1222 | rtc_int_flag |= RTC_AF; | |
1223 | ||
1224 | if (rtc_int_flag) { | |
1225 | rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); | |
1226 | if (irq_handler) | |
1227 | irq_handler(rtc_int_flag, dev_id); | |
1228 | } | |
1229 | return IRQ_HANDLED; | |
1230 | } | |
1231 | EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); | |
1232 | #endif |