]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/kernel/apic_64.c
x86: remove never used apic_mapped
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / apic_64.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/init.h>
18
19 #include <linux/mm.h>
20 #include <linux/delay.h>
21 #include <linux/bootmem.h>
22 #include <linux/interrupt.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/sysdev.h>
26 #include <linux/module.h>
27 #include <linux/ioport.h>
28
29 #include <asm/atomic.h>
30 #include <asm/smp.h>
31 #include <asm/mtrr.h>
32 #include <asm/mpspec.h>
33 #include <asm/pgalloc.h>
34 #include <asm/mach_apic.h>
35 #include <asm/nmi.h>
36 #include <asm/idle.h>
37 #include <asm/proto.h>
38 #include <asm/timex.h>
39 #include <asm/hpet.h>
40 #include <asm/apic.h>
41
42 int apic_verbosity;
43 int apic_runs_main_timer;
44 int apic_calibrate_pmtmr __initdata;
45
46 int disable_apic_timer __initdata;
47
48 /* Local APIC timer works in C2? */
49 int local_apic_timer_c2_ok;
50 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
51
52 static struct resource *ioapic_resources;
53 static struct resource lapic_resource = {
54 .name = "Local APIC",
55 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
56 };
57
58 /*
59 * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
60 * IPIs in place of local APIC timers
61 */
62 static cpumask_t timer_interrupt_broadcast_ipi_mask;
63
64 /* Using APIC to generate smp_local_timer_interrupt? */
65 int using_apic_timer __read_mostly = 0;
66
67 static void apic_pm_activate(void);
68
69 void apic_wait_icr_idle(void)
70 {
71 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
72 cpu_relax();
73 }
74
75 unsigned int safe_apic_wait_icr_idle(void)
76 {
77 unsigned int send_status;
78 int timeout;
79
80 timeout = 0;
81 do {
82 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
83 if (!send_status)
84 break;
85 udelay(100);
86 } while (timeout++ < 1000);
87
88 return send_status;
89 }
90
91 void enable_NMI_through_LVT0 (void * dummy)
92 {
93 unsigned int v;
94
95 /* unmask and set to NMI */
96 v = APIC_DM_NMI;
97 apic_write(APIC_LVT0, v);
98 }
99
100 int get_maxlvt(void)
101 {
102 unsigned int v, maxlvt;
103
104 v = apic_read(APIC_LVR);
105 maxlvt = GET_APIC_MAXLVT(v);
106 return maxlvt;
107 }
108
109 /*
110 * 'what should we do if we get a hw irq event on an illegal vector'.
111 * each architecture has to answer this themselves.
112 */
113 void ack_bad_irq(unsigned int irq)
114 {
115 printk("unexpected IRQ trap at vector %02x\n", irq);
116 /*
117 * Currently unexpected vectors happen only on SMP and APIC.
118 * We _must_ ack these because every local APIC has only N
119 * irq slots per priority level, and a 'hanging, unacked' IRQ
120 * holds up an irq slot - in excessive cases (when multiple
121 * unexpected vectors occur) that might lock up the APIC
122 * completely.
123 * But don't ack when the APIC is disabled. -AK
124 */
125 if (!disable_apic)
126 ack_APIC_irq();
127 }
128
129 void clear_local_APIC(void)
130 {
131 int maxlvt;
132 unsigned int v;
133
134 maxlvt = get_maxlvt();
135
136 /*
137 * Masking an LVT entry can trigger a local APIC error
138 * if the vector is zero. Mask LVTERR first to prevent this.
139 */
140 if (maxlvt >= 3) {
141 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
142 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
143 }
144 /*
145 * Careful: we have to set masks only first to deassert
146 * any level-triggered sources.
147 */
148 v = apic_read(APIC_LVTT);
149 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
150 v = apic_read(APIC_LVT0);
151 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
152 v = apic_read(APIC_LVT1);
153 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
154 if (maxlvt >= 4) {
155 v = apic_read(APIC_LVTPC);
156 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
157 }
158
159 /*
160 * Clean APIC state for other OSs:
161 */
162 apic_write(APIC_LVTT, APIC_LVT_MASKED);
163 apic_write(APIC_LVT0, APIC_LVT_MASKED);
164 apic_write(APIC_LVT1, APIC_LVT_MASKED);
165 if (maxlvt >= 3)
166 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
167 if (maxlvt >= 4)
168 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
169 apic_write(APIC_ESR, 0);
170 apic_read(APIC_ESR);
171 }
172
173 void disconnect_bsp_APIC(int virt_wire_setup)
174 {
175 /* Go back to Virtual Wire compatibility mode */
176 unsigned long value;
177
178 /* For the spurious interrupt use vector F, and enable it */
179 value = apic_read(APIC_SPIV);
180 value &= ~APIC_VECTOR_MASK;
181 value |= APIC_SPIV_APIC_ENABLED;
182 value |= 0xf;
183 apic_write(APIC_SPIV, value);
184
185 if (!virt_wire_setup) {
186 /* For LVT0 make it edge triggered, active high, external and enabled */
187 value = apic_read(APIC_LVT0);
188 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
189 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
190 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
191 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
192 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
193 apic_write(APIC_LVT0, value);
194 } else {
195 /* Disable LVT0 */
196 apic_write(APIC_LVT0, APIC_LVT_MASKED);
197 }
198
199 /* For LVT1 make it edge triggered, active high, nmi and enabled */
200 value = apic_read(APIC_LVT1);
201 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
202 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
203 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
204 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
205 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
206 apic_write(APIC_LVT1, value);
207 }
208
209 void disable_local_APIC(void)
210 {
211 unsigned int value;
212
213 clear_local_APIC();
214
215 /*
216 * Disable APIC (implies clearing of registers
217 * for 82489DX!).
218 */
219 value = apic_read(APIC_SPIV);
220 value &= ~APIC_SPIV_APIC_ENABLED;
221 apic_write(APIC_SPIV, value);
222 }
223
224 /*
225 * This is to verify that we're looking at a real local APIC.
226 * Check these against your board if the CPUs aren't getting
227 * started for no apparent reason.
228 */
229 int __init verify_local_APIC(void)
230 {
231 unsigned int reg0, reg1;
232
233 /*
234 * The version register is read-only in a real APIC.
235 */
236 reg0 = apic_read(APIC_LVR);
237 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
238 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
239 reg1 = apic_read(APIC_LVR);
240 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
241
242 /*
243 * The two version reads above should print the same
244 * numbers. If the second one is different, then we
245 * poke at a non-APIC.
246 */
247 if (reg1 != reg0)
248 return 0;
249
250 /*
251 * Check if the version looks reasonably.
252 */
253 reg1 = GET_APIC_VERSION(reg0);
254 if (reg1 == 0x00 || reg1 == 0xff)
255 return 0;
256 reg1 = get_maxlvt();
257 if (reg1 < 0x02 || reg1 == 0xff)
258 return 0;
259
260 /*
261 * The ID register is read/write in a real APIC.
262 */
263 reg0 = apic_read(APIC_ID);
264 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
265 apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
266 reg1 = apic_read(APIC_ID);
267 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
268 apic_write(APIC_ID, reg0);
269 if (reg1 != (reg0 ^ APIC_ID_MASK))
270 return 0;
271
272 /*
273 * The next two are just to see if we have sane values.
274 * They're only really relevant if we're in Virtual Wire
275 * compatibility mode, but most boxes are anymore.
276 */
277 reg0 = apic_read(APIC_LVT0);
278 apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0);
279 reg1 = apic_read(APIC_LVT1);
280 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
281
282 return 1;
283 }
284
285 void __init sync_Arb_IDs(void)
286 {
287 /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
288 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
289 if (ver >= 0x14) /* P4 or higher */
290 return;
291
292 /*
293 * Wait for idle.
294 */
295 apic_wait_icr_idle();
296
297 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
298 apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
299 | APIC_DM_INIT);
300 }
301
302 /*
303 * An initial setup of the virtual wire mode.
304 */
305 void __init init_bsp_APIC(void)
306 {
307 unsigned int value;
308
309 /*
310 * Don't do the setup now if we have a SMP BIOS as the
311 * through-I/O-APIC virtual wire mode might be active.
312 */
313 if (smp_found_config || !cpu_has_apic)
314 return;
315
316 value = apic_read(APIC_LVR);
317
318 /*
319 * Do not trust the local APIC being empty at bootup.
320 */
321 clear_local_APIC();
322
323 /*
324 * Enable APIC.
325 */
326 value = apic_read(APIC_SPIV);
327 value &= ~APIC_VECTOR_MASK;
328 value |= APIC_SPIV_APIC_ENABLED;
329 value |= APIC_SPIV_FOCUS_DISABLED;
330 value |= SPURIOUS_APIC_VECTOR;
331 apic_write(APIC_SPIV, value);
332
333 /*
334 * Set up the virtual wire mode.
335 */
336 apic_write(APIC_LVT0, APIC_DM_EXTINT);
337 value = APIC_DM_NMI;
338 apic_write(APIC_LVT1, value);
339 }
340
341 void __cpuinit setup_local_APIC (void)
342 {
343 unsigned int value, maxlvt;
344 int i, j;
345
346 value = apic_read(APIC_LVR);
347
348 BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
349
350 /*
351 * Double-check whether this APIC is really registered.
352 * This is meaningless in clustered apic mode, so we skip it.
353 */
354 if (!apic_id_registered())
355 BUG();
356
357 /*
358 * Intel recommends to set DFR, LDR and TPR before enabling
359 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
360 * document number 292116). So here it goes...
361 */
362 init_apic_ldr();
363
364 /*
365 * Set Task Priority to 'accept all'. We never change this
366 * later on.
367 */
368 value = apic_read(APIC_TASKPRI);
369 value &= ~APIC_TPRI_MASK;
370 apic_write(APIC_TASKPRI, value);
371
372 /*
373 * After a crash, we no longer service the interrupts and a pending
374 * interrupt from previous kernel might still have ISR bit set.
375 *
376 * Most probably by now CPU has serviced that pending interrupt and
377 * it might not have done the ack_APIC_irq() because it thought,
378 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
379 * does not clear the ISR bit and cpu thinks it has already serivced
380 * the interrupt. Hence a vector might get locked. It was noticed
381 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
382 */
383 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
384 value = apic_read(APIC_ISR + i*0x10);
385 for (j = 31; j >= 0; j--) {
386 if (value & (1<<j))
387 ack_APIC_irq();
388 }
389 }
390
391 /*
392 * Now that we are all set up, enable the APIC
393 */
394 value = apic_read(APIC_SPIV);
395 value &= ~APIC_VECTOR_MASK;
396 /*
397 * Enable APIC
398 */
399 value |= APIC_SPIV_APIC_ENABLED;
400
401 /* We always use processor focus */
402
403 /*
404 * Set spurious IRQ vector
405 */
406 value |= SPURIOUS_APIC_VECTOR;
407 apic_write(APIC_SPIV, value);
408
409 /*
410 * Set up LVT0, LVT1:
411 *
412 * set up through-local-APIC on the BP's LINT0. This is not
413 * strictly necessary in pure symmetric-IO mode, but sometimes
414 * we delegate interrupts to the 8259A.
415 */
416 /*
417 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
418 */
419 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
420 if (!smp_processor_id() && !value) {
421 value = APIC_DM_EXTINT;
422 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id());
423 } else {
424 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
425 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id());
426 }
427 apic_write(APIC_LVT0, value);
428
429 /*
430 * only the BP should see the LINT1 NMI signal, obviously.
431 */
432 if (!smp_processor_id())
433 value = APIC_DM_NMI;
434 else
435 value = APIC_DM_NMI | APIC_LVT_MASKED;
436 apic_write(APIC_LVT1, value);
437
438 {
439 unsigned oldvalue;
440 maxlvt = get_maxlvt();
441 oldvalue = apic_read(APIC_ESR);
442 value = ERROR_APIC_VECTOR; // enables sending errors
443 apic_write(APIC_LVTERR, value);
444 /*
445 * spec says clear errors after enabling vector.
446 */
447 if (maxlvt > 3)
448 apic_write(APIC_ESR, 0);
449 value = apic_read(APIC_ESR);
450 if (value != oldvalue)
451 apic_printk(APIC_VERBOSE,
452 "ESR value after enabling vector: %08x, after %08x\n",
453 oldvalue, value);
454 }
455
456 nmi_watchdog_default();
457 setup_apic_nmi_watchdog(NULL);
458 apic_pm_activate();
459 }
460
461 #ifdef CONFIG_PM
462
463 static struct {
464 /* 'active' is true if the local APIC was enabled by us and
465 not the BIOS; this signifies that we are also responsible
466 for disabling it before entering apm/acpi suspend */
467 int active;
468 /* r/w apic fields */
469 unsigned int apic_id;
470 unsigned int apic_taskpri;
471 unsigned int apic_ldr;
472 unsigned int apic_dfr;
473 unsigned int apic_spiv;
474 unsigned int apic_lvtt;
475 unsigned int apic_lvtpc;
476 unsigned int apic_lvt0;
477 unsigned int apic_lvt1;
478 unsigned int apic_lvterr;
479 unsigned int apic_tmict;
480 unsigned int apic_tdcr;
481 unsigned int apic_thmr;
482 } apic_pm_state;
483
484 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
485 {
486 unsigned long flags;
487 int maxlvt;
488
489 if (!apic_pm_state.active)
490 return 0;
491
492 maxlvt = get_maxlvt();
493
494 apic_pm_state.apic_id = apic_read(APIC_ID);
495 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
496 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
497 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
498 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
499 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
500 if (maxlvt >= 4)
501 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
502 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
503 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
504 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
505 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
506 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
507 #ifdef CONFIG_X86_MCE_INTEL
508 if (maxlvt >= 5)
509 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
510 #endif
511 local_irq_save(flags);
512 disable_local_APIC();
513 local_irq_restore(flags);
514 return 0;
515 }
516
517 static int lapic_resume(struct sys_device *dev)
518 {
519 unsigned int l, h;
520 unsigned long flags;
521 int maxlvt;
522
523 if (!apic_pm_state.active)
524 return 0;
525
526 maxlvt = get_maxlvt();
527
528 local_irq_save(flags);
529 rdmsr(MSR_IA32_APICBASE, l, h);
530 l &= ~MSR_IA32_APICBASE_BASE;
531 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
532 wrmsr(MSR_IA32_APICBASE, l, h);
533 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
534 apic_write(APIC_ID, apic_pm_state.apic_id);
535 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
536 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
537 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
538 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
539 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
540 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
541 #ifdef CONFIG_X86_MCE_INTEL
542 if (maxlvt >= 5)
543 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
544 #endif
545 if (maxlvt >= 4)
546 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
547 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
548 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
549 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
550 apic_write(APIC_ESR, 0);
551 apic_read(APIC_ESR);
552 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
553 apic_write(APIC_ESR, 0);
554 apic_read(APIC_ESR);
555 local_irq_restore(flags);
556 return 0;
557 }
558
559 static struct sysdev_class lapic_sysclass = {
560 set_kset_name("lapic"),
561 .resume = lapic_resume,
562 .suspend = lapic_suspend,
563 };
564
565 static struct sys_device device_lapic = {
566 .id = 0,
567 .cls = &lapic_sysclass,
568 };
569
570 static void __cpuinit apic_pm_activate(void)
571 {
572 apic_pm_state.active = 1;
573 }
574
575 static int __init init_lapic_sysfs(void)
576 {
577 int error;
578 if (!cpu_has_apic)
579 return 0;
580 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
581 error = sysdev_class_register(&lapic_sysclass);
582 if (!error)
583 error = sysdev_register(&device_lapic);
584 return error;
585 }
586 device_initcall(init_lapic_sysfs);
587
588 #else /* CONFIG_PM */
589
590 static void apic_pm_activate(void) { }
591
592 #endif /* CONFIG_PM */
593
594 static int __init apic_set_verbosity(char *str)
595 {
596 if (str == NULL) {
597 skip_ioapic_setup = 0;
598 ioapic_force = 1;
599 return 0;
600 }
601 if (strcmp("debug", str) == 0)
602 apic_verbosity = APIC_DEBUG;
603 else if (strcmp("verbose", str) == 0)
604 apic_verbosity = APIC_VERBOSE;
605 else {
606 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
607 " use apic=verbose or apic=debug\n", str);
608 return -EINVAL;
609 }
610
611 return 0;
612 }
613 early_param("apic", apic_set_verbosity);
614
615 /*
616 * Detect and enable local APICs on non-SMP boards.
617 * Original code written by Keir Fraser.
618 * On AMD64 we trust the BIOS - if it says no APIC it is likely
619 * not correctly set up (usually the APIC timer won't work etc.)
620 */
621
622 static int __init detect_init_APIC (void)
623 {
624 if (!cpu_has_apic) {
625 printk(KERN_INFO "No local APIC present\n");
626 return -1;
627 }
628
629 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
630 boot_cpu_id = 0;
631 return 0;
632 }
633
634 #ifdef CONFIG_X86_IO_APIC
635 static struct resource * __init ioapic_setup_resources(void)
636 {
637 #define IOAPIC_RESOURCE_NAME_SIZE 11
638 unsigned long n;
639 struct resource *res;
640 char *mem;
641 int i;
642
643 if (nr_ioapics <= 0)
644 return NULL;
645
646 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
647 n *= nr_ioapics;
648
649 mem = alloc_bootmem(n);
650 res = (void *)mem;
651
652 if (mem != NULL) {
653 memset(mem, 0, n);
654 mem += sizeof(struct resource) * nr_ioapics;
655
656 for (i = 0; i < nr_ioapics; i++) {
657 res[i].name = mem;
658 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
659 sprintf(mem, "IOAPIC %u", i);
660 mem += IOAPIC_RESOURCE_NAME_SIZE;
661 }
662 }
663
664 ioapic_resources = res;
665
666 return res;
667 }
668
669 static int __init ioapic_insert_resources(void)
670 {
671 int i;
672 struct resource *r = ioapic_resources;
673
674 if (!r) {
675 printk("IO APIC resources could be not be allocated.\n");
676 return -1;
677 }
678
679 for (i = 0; i < nr_ioapics; i++) {
680 insert_resource(&iomem_resource, r);
681 r++;
682 }
683
684 return 0;
685 }
686
687 /* Insert the IO APIC resources after PCI initialization has occured to handle
688 * IO APICS that are mapped in on a BAR in PCI space. */
689 late_initcall(ioapic_insert_resources);
690 #endif
691
692 void __init init_apic_mappings(void)
693 {
694 unsigned long apic_phys;
695
696 /*
697 * If no local APIC can be found then set up a fake all
698 * zeroes page to simulate the local APIC and another
699 * one for the IO-APIC.
700 */
701 if (!smp_found_config && detect_init_APIC()) {
702 apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
703 apic_phys = __pa(apic_phys);
704 } else
705 apic_phys = mp_lapic_addr;
706
707 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
708 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
709 APIC_BASE, apic_phys);
710
711 /* Put local APIC into the resource map. */
712 lapic_resource.start = apic_phys;
713 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
714 insert_resource(&iomem_resource, &lapic_resource);
715
716 /*
717 * Fetch the APIC ID of the BSP in case we have a
718 * default configuration (or the MP table is broken).
719 */
720 boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
721
722 {
723 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
724 int i;
725 struct resource *ioapic_res;
726
727 ioapic_res = ioapic_setup_resources();
728 for (i = 0; i < nr_ioapics; i++) {
729 if (smp_found_config) {
730 ioapic_phys = mp_ioapics[i].mpc_apicaddr;
731 } else {
732 ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
733 ioapic_phys = __pa(ioapic_phys);
734 }
735 set_fixmap_nocache(idx, ioapic_phys);
736 apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
737 __fix_to_virt(idx), ioapic_phys);
738 idx++;
739
740 if (ioapic_res != NULL) {
741 ioapic_res->start = ioapic_phys;
742 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
743 ioapic_res++;
744 }
745 }
746 }
747 }
748
749 /*
750 * This function sets up the local APIC timer, with a timeout of
751 * 'clocks' APIC bus clock. During calibration we actually call
752 * this function twice on the boot CPU, once with a bogus timeout
753 * value, second time for real. The other (noncalibrating) CPUs
754 * call this function only once, with the real, calibrated value.
755 *
756 * We do reads before writes even if unnecessary, to get around the
757 * P5 APIC double write bug.
758 */
759
760 #define APIC_DIVISOR 16
761
762 static void __setup_APIC_LVTT(unsigned int clocks)
763 {
764 unsigned int lvtt_value, tmp_value;
765 int cpu = smp_processor_id();
766
767 lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
768
769 if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
770 lvtt_value |= APIC_LVT_MASKED;
771
772 apic_write(APIC_LVTT, lvtt_value);
773
774 /*
775 * Divide PICLK by 16
776 */
777 tmp_value = apic_read(APIC_TDCR);
778 apic_write(APIC_TDCR, (tmp_value
779 & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
780 | APIC_TDR_DIV_16);
781
782 apic_write(APIC_TMICT, clocks/APIC_DIVISOR);
783 }
784
785 static void setup_APIC_timer(unsigned int clocks)
786 {
787 unsigned long flags;
788
789 local_irq_save(flags);
790
791 /* wait for irq slice */
792 if (hpet_address && hpet_use_timer) {
793 u32 trigger = hpet_readl(HPET_T0_CMP);
794 while (hpet_readl(HPET_T0_CMP) == trigger)
795 /* do nothing */ ;
796 } else {
797 int c1, c2;
798 outb_p(0x00, 0x43);
799 c2 = inb_p(0x40);
800 c2 |= inb_p(0x40) << 8;
801 do {
802 c1 = c2;
803 outb_p(0x00, 0x43);
804 c2 = inb_p(0x40);
805 c2 |= inb_p(0x40) << 8;
806 } while (c2 - c1 < 300);
807 }
808 __setup_APIC_LVTT(clocks);
809 /* Turn off PIT interrupt if we use APIC timer as main timer.
810 Only works with the PM timer right now
811 TBD fix it for HPET too. */
812 if ((pmtmr_ioport != 0) &&
813 smp_processor_id() == boot_cpu_id &&
814 apic_runs_main_timer == 1 &&
815 !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
816 stop_timer_interrupt();
817 apic_runs_main_timer++;
818 }
819 local_irq_restore(flags);
820 }
821
822 /*
823 * In this function we calibrate APIC bus clocks to the external
824 * timer. Unfortunately we cannot use jiffies and the timer irq
825 * to calibrate, since some later bootup code depends on getting
826 * the first irq? Ugh.
827 *
828 * We want to do the calibration only once since we
829 * want to have local timer irqs syncron. CPUs connected
830 * by the same APIC bus have the very same bus frequency.
831 * And we want to have irqs off anyways, no accidental
832 * APIC irq that way.
833 */
834
835 #define TICK_COUNT 100000000
836
837 static int __init calibrate_APIC_clock(void)
838 {
839 unsigned apic, apic_start;
840 unsigned long tsc, tsc_start;
841 int result;
842 /*
843 * Put whatever arbitrary (but long enough) timeout
844 * value into the APIC clock, we just want to get the
845 * counter running for calibration.
846 */
847 __setup_APIC_LVTT(4000000000);
848
849 apic_start = apic_read(APIC_TMCCT);
850 #ifdef CONFIG_X86_PM_TIMER
851 if (apic_calibrate_pmtmr && pmtmr_ioport) {
852 pmtimer_wait(5000); /* 5ms wait */
853 apic = apic_read(APIC_TMCCT);
854 result = (apic_start - apic) * 1000L / 5;
855 } else
856 #endif
857 {
858 rdtscll(tsc_start);
859
860 do {
861 apic = apic_read(APIC_TMCCT);
862 rdtscll(tsc);
863 } while ((tsc - tsc_start) < TICK_COUNT &&
864 (apic_start - apic) < TICK_COUNT);
865
866 result = (apic_start - apic) * 1000L * tsc_khz /
867 (tsc - tsc_start);
868 }
869 printk("result %d\n", result);
870
871
872 printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
873 result / 1000 / 1000, result / 1000 % 1000);
874
875 return result * APIC_DIVISOR / HZ;
876 }
877
878 static unsigned int calibration_result;
879
880 void __init setup_boot_APIC_clock (void)
881 {
882 if (disable_apic_timer) {
883 printk(KERN_INFO "Disabling APIC timer\n");
884 return;
885 }
886
887 printk(KERN_INFO "Using local APIC timer interrupts.\n");
888 using_apic_timer = 1;
889
890 local_irq_disable();
891
892 calibration_result = calibrate_APIC_clock();
893 /*
894 * Now set up the timer for real.
895 */
896 setup_APIC_timer(calibration_result);
897
898 local_irq_enable();
899 }
900
901 void __cpuinit setup_secondary_APIC_clock(void)
902 {
903 local_irq_disable(); /* FIXME: Do we need this? --RR */
904 setup_APIC_timer(calibration_result);
905 local_irq_enable();
906 }
907
908 void disable_APIC_timer(void)
909 {
910 if (using_apic_timer) {
911 unsigned long v;
912
913 v = apic_read(APIC_LVTT);
914 /*
915 * When an illegal vector value (0-15) is written to an LVT
916 * entry and delivery mode is Fixed, the APIC may signal an
917 * illegal vector error, with out regard to whether the mask
918 * bit is set or whether an interrupt is actually seen on input.
919 *
920 * Boot sequence might call this function when the LVTT has
921 * '0' vector value. So make sure vector field is set to
922 * valid value.
923 */
924 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
925 apic_write(APIC_LVTT, v);
926 }
927 }
928
929 void enable_APIC_timer(void)
930 {
931 int cpu = smp_processor_id();
932
933 if (using_apic_timer &&
934 !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
935 unsigned long v;
936
937 v = apic_read(APIC_LVTT);
938 apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
939 }
940 }
941
942 void switch_APIC_timer_to_ipi(void *cpumask)
943 {
944 cpumask_t mask = *(cpumask_t *)cpumask;
945 int cpu = smp_processor_id();
946
947 if (cpu_isset(cpu, mask) &&
948 !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
949 disable_APIC_timer();
950 cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
951 }
952 }
953 EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
954
955 void smp_send_timer_broadcast_ipi(void)
956 {
957 int cpu = smp_processor_id();
958 cpumask_t mask;
959
960 cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
961
962 if (cpu_isset(cpu, mask)) {
963 cpu_clear(cpu, mask);
964 add_pda(apic_timer_irqs, 1);
965 smp_local_timer_interrupt();
966 }
967
968 if (!cpus_empty(mask)) {
969 send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
970 }
971 }
972
973 void switch_ipi_to_APIC_timer(void *cpumask)
974 {
975 cpumask_t mask = *(cpumask_t *)cpumask;
976 int cpu = smp_processor_id();
977
978 if (cpu_isset(cpu, mask) &&
979 cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
980 cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
981 enable_APIC_timer();
982 }
983 }
984 EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
985
986 int setup_profiling_timer(unsigned int multiplier)
987 {
988 return -EINVAL;
989 }
990
991 void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
992 unsigned char msg_type, unsigned char mask)
993 {
994 unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
995 unsigned int v = (mask << 16) | (msg_type << 8) | vector;
996 apic_write(reg, v);
997 }
998
999 #undef APIC_DIVISOR
1000
1001 /*
1002 * Local timer interrupt handler. It does both profiling and
1003 * process statistics/rescheduling.
1004 *
1005 * We do profiling in every local tick, statistics/rescheduling
1006 * happen only every 'profiling multiplier' ticks. The default
1007 * multiplier is 1 and it can be changed by writing the new multiplier
1008 * value into /proc/profile.
1009 */
1010
1011 void smp_local_timer_interrupt(void)
1012 {
1013 profile_tick(CPU_PROFILING);
1014 #ifdef CONFIG_SMP
1015 update_process_times(user_mode(get_irq_regs()));
1016 #endif
1017 if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
1018 main_timer_handler();
1019 /*
1020 * We take the 'long' return path, and there every subsystem
1021 * grabs the appropriate locks (kernel lock/ irq lock).
1022 *
1023 * We might want to decouple profiling from the 'long path',
1024 * and do the profiling totally in assembly.
1025 *
1026 * Currently this isn't too much of an issue (performance wise),
1027 * we can take more than 100K local irqs per second on a 100 MHz P5.
1028 */
1029 }
1030
1031 /*
1032 * Local APIC timer interrupt. This is the most natural way for doing
1033 * local interrupts, but local timer interrupts can be emulated by
1034 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1035 *
1036 * [ if a single-CPU system runs an SMP kernel then we call the local
1037 * interrupt as well. Thus we cannot inline the local irq ... ]
1038 */
1039 void smp_apic_timer_interrupt(struct pt_regs *regs)
1040 {
1041 struct pt_regs *old_regs = set_irq_regs(regs);
1042
1043 /*
1044 * the NMI deadlock-detector uses this.
1045 */
1046 add_pda(apic_timer_irqs, 1);
1047
1048 /*
1049 * NOTE! We'd better ACK the irq immediately,
1050 * because timer handling can be slow.
1051 */
1052 ack_APIC_irq();
1053 /*
1054 * update_process_times() expects us to have done irq_enter().
1055 * Besides, if we don't timer interrupts ignore the global
1056 * interrupt lock, which is the WrongThing (tm) to do.
1057 */
1058 exit_idle();
1059 irq_enter();
1060 smp_local_timer_interrupt();
1061 irq_exit();
1062 set_irq_regs(old_regs);
1063 }
1064
1065 /*
1066 * apic_is_clustered_box() -- Check if we can expect good TSC
1067 *
1068 * Thus far, the major user of this is IBM's Summit2 series:
1069 *
1070 * Clustered boxes may have unsynced TSC problems if they are
1071 * multi-chassis. Use available data to take a good guess.
1072 * If in doubt, go HPET.
1073 */
1074 __cpuinit int apic_is_clustered_box(void)
1075 {
1076 int i, clusters, zeros;
1077 unsigned id;
1078 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
1079
1080 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
1081
1082 for (i = 0; i < NR_CPUS; i++) {
1083 id = bios_cpu_apicid[i];
1084 if (id != BAD_APICID)
1085 __set_bit(APIC_CLUSTERID(id), clustermap);
1086 }
1087
1088 /* Problem: Partially populated chassis may not have CPUs in some of
1089 * the APIC clusters they have been allocated. Only present CPUs have
1090 * bios_cpu_apicid entries, thus causing zeroes in the bitmap. Since
1091 * clusters are allocated sequentially, count zeros only if they are
1092 * bounded by ones.
1093 */
1094 clusters = 0;
1095 zeros = 0;
1096 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
1097 if (test_bit(i, clustermap)) {
1098 clusters += 1 + zeros;
1099 zeros = 0;
1100 } else
1101 ++zeros;
1102 }
1103
1104 /*
1105 * If clusters > 2, then should be multi-chassis.
1106 * May have to revisit this when multi-core + hyperthreaded CPUs come
1107 * out, but AFAIK this will work even for them.
1108 */
1109 return (clusters > 2);
1110 }
1111
1112 /*
1113 * This interrupt should _never_ happen with our APIC/SMP architecture
1114 */
1115 asmlinkage void smp_spurious_interrupt(void)
1116 {
1117 unsigned int v;
1118 exit_idle();
1119 irq_enter();
1120 /*
1121 * Check if this really is a spurious interrupt and ACK it
1122 * if it is a vectored one. Just in case...
1123 * Spurious interrupts should not be ACKed.
1124 */
1125 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1126 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1127 ack_APIC_irq();
1128
1129 irq_exit();
1130 }
1131
1132 /*
1133 * This interrupt should never happen with our APIC/SMP architecture
1134 */
1135
1136 asmlinkage void smp_error_interrupt(void)
1137 {
1138 unsigned int v, v1;
1139
1140 exit_idle();
1141 irq_enter();
1142 /* First tickle the hardware, only then report what went on. -- REW */
1143 v = apic_read(APIC_ESR);
1144 apic_write(APIC_ESR, 0);
1145 v1 = apic_read(APIC_ESR);
1146 ack_APIC_irq();
1147 atomic_inc(&irq_err_count);
1148
1149 /* Here is what the APIC error bits mean:
1150 0: Send CS error
1151 1: Receive CS error
1152 2: Send accept error
1153 3: Receive accept error
1154 4: Reserved
1155 5: Send illegal vector
1156 6: Received illegal vector
1157 7: Illegal register address
1158 */
1159 printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
1160 smp_processor_id(), v , v1);
1161 irq_exit();
1162 }
1163
1164 int disable_apic;
1165
1166 /*
1167 * This initializes the IO-APIC and APIC hardware if this is
1168 * a UP kernel.
1169 */
1170 int __init APIC_init_uniprocessor (void)
1171 {
1172 if (disable_apic) {
1173 printk(KERN_INFO "Apic disabled\n");
1174 return -1;
1175 }
1176 if (!cpu_has_apic) {
1177 disable_apic = 1;
1178 printk(KERN_INFO "Apic disabled by BIOS\n");
1179 return -1;
1180 }
1181
1182 verify_local_APIC();
1183
1184 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
1185 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
1186
1187 setup_local_APIC();
1188
1189 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1190 setup_IO_APIC();
1191 else
1192 nr_ioapics = 0;
1193 setup_boot_APIC_clock();
1194 check_nmi_watchdog();
1195 return 0;
1196 }
1197
1198 static __init int setup_disableapic(char *str)
1199 {
1200 disable_apic = 1;
1201 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
1202 return 0;
1203 }
1204 early_param("disableapic", setup_disableapic);
1205
1206 /* same as disableapic, for compatibility */
1207 static __init int setup_nolapic(char *str)
1208 {
1209 return setup_disableapic(str);
1210 }
1211 early_param("nolapic", setup_nolapic);
1212
1213 static int __init parse_lapic_timer_c2_ok(char *arg)
1214 {
1215 local_apic_timer_c2_ok = 1;
1216 return 0;
1217 }
1218 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
1219
1220 static __init int setup_noapictimer(char *str)
1221 {
1222 if (str[0] != ' ' && str[0] != 0)
1223 return 0;
1224 disable_apic_timer = 1;
1225 return 1;
1226 }
1227
1228 static __init int setup_apicmaintimer(char *str)
1229 {
1230 apic_runs_main_timer = 1;
1231 nohpet = 1;
1232 return 1;
1233 }
1234 __setup("apicmaintimer", setup_apicmaintimer);
1235
1236 static __init int setup_noapicmaintimer(char *str)
1237 {
1238 apic_runs_main_timer = -1;
1239 return 1;
1240 }
1241 __setup("noapicmaintimer", setup_noapicmaintimer);
1242
1243 static __init int setup_apicpmtimer(char *s)
1244 {
1245 apic_calibrate_pmtmr = 1;
1246 notsc_setup(NULL);
1247 return setup_apicmaintimer(NULL);
1248 }
1249 __setup("apicpmtimer", setup_apicpmtimer);
1250
1251 __setup("noapictimer", setup_noapictimer);
1252