]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/kernel/apic/apic.c
x86, irq: Clean up irqdomain transition code
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / apic / apic.c
1 /*
2 * Local APIC handling, local APIC timers
3 *
4 * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
9 * and Rolf G. Tews
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
13 * Pavel Machek and
14 * Mikael Pettersson : PM converted to driver model.
15 */
16
17 #include <linux/perf_event.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/acpi_pmtmr.h>
21 #include <linux/clockchips.h>
22 #include <linux/interrupt.h>
23 #include <linux/bootmem.h>
24 #include <linux/ftrace.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/delay.h>
29 #include <linux/timex.h>
30 #include <linux/i8253.h>
31 #include <linux/dmar.h>
32 #include <linux/init.h>
33 #include <linux/cpu.h>
34 #include <linux/dmi.h>
35 #include <linux/smp.h>
36 #include <linux/mm.h>
37
38 #include <asm/trace/irq_vectors.h>
39 #include <asm/irq_remapping.h>
40 #include <asm/perf_event.h>
41 #include <asm/x86_init.h>
42 #include <asm/pgalloc.h>
43 #include <linux/atomic.h>
44 #include <asm/mpspec.h>
45 #include <asm/i8259.h>
46 #include <asm/proto.h>
47 #include <asm/apic.h>
48 #include <asm/io_apic.h>
49 #include <asm/desc.h>
50 #include <asm/hpet.h>
51 #include <asm/idle.h>
52 #include <asm/mtrr.h>
53 #include <asm/time.h>
54 #include <asm/smp.h>
55 #include <asm/mce.h>
56 #include <asm/tsc.h>
57 #include <asm/hypervisor.h>
58
59 unsigned int num_processors;
60
61 unsigned disabled_cpus;
62
63 /* Processor that is doing the boot up */
64 unsigned int boot_cpu_physical_apicid = -1U;
65 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
66
67 /*
68 * The highest APIC ID seen during enumeration.
69 */
70 static unsigned int max_physical_apicid;
71
72 /*
73 * Bitmask of physically existing CPUs:
74 */
75 physid_mask_t phys_cpu_present_map;
76
77 /*
78 * Processor to be disabled specified by kernel parameter
79 * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
80 * avoid undefined behaviour caused by sending INIT from AP to BSP.
81 */
82 static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
83
84 /*
85 * Map cpu index to physical APIC ID
86 */
87 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
88 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
89 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
90 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
91
92 #ifdef CONFIG_X86_32
93
94 /*
95 * On x86_32, the mapping between cpu and logical apicid may vary
96 * depending on apic in use. The following early percpu variable is
97 * used for the mapping. This is where the behaviors of x86_64 and 32
98 * actually diverge. Let's keep it ugly for now.
99 */
100 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
101
102 /* Local APIC was disabled by the BIOS and enabled by the kernel */
103 static int enabled_via_apicbase;
104
105 /*
106 * Handle interrupt mode configuration register (IMCR).
107 * This register controls whether the interrupt signals
108 * that reach the BSP come from the master PIC or from the
109 * local APIC. Before entering Symmetric I/O Mode, either
110 * the BIOS or the operating system must switch out of
111 * PIC Mode by changing the IMCR.
112 */
113 static inline void imcr_pic_to_apic(void)
114 {
115 /* select IMCR register */
116 outb(0x70, 0x22);
117 /* NMI and 8259 INTR go through APIC */
118 outb(0x01, 0x23);
119 }
120
121 static inline void imcr_apic_to_pic(void)
122 {
123 /* select IMCR register */
124 outb(0x70, 0x22);
125 /* NMI and 8259 INTR go directly to BSP */
126 outb(0x00, 0x23);
127 }
128 #endif
129
130 /*
131 * Knob to control our willingness to enable the local APIC.
132 *
133 * +1=force-enable
134 */
135 static int force_enable_local_apic __initdata;
136
137 /* Control whether x2APIC mode is enabled or not */
138 static bool nox2apic __initdata;
139
140 /*
141 * APIC command line parameters
142 */
143 static int __init parse_lapic(char *arg)
144 {
145 if (config_enabled(CONFIG_X86_32) && !arg)
146 force_enable_local_apic = 1;
147 else if (arg && !strncmp(arg, "notscdeadline", 13))
148 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
149 return 0;
150 }
151 early_param("lapic", parse_lapic);
152
153 #ifdef CONFIG_X86_64
154 static int apic_calibrate_pmtmr __initdata;
155 static __init int setup_apicpmtimer(char *s)
156 {
157 apic_calibrate_pmtmr = 1;
158 notsc_setup(NULL);
159 return 0;
160 }
161 __setup("apicpmtimer", setup_apicpmtimer);
162 #endif
163
164 int x2apic_mode;
165 #ifdef CONFIG_X86_X2APIC
166 /* x2apic enabled before OS handover */
167 int x2apic_preenabled;
168 static int x2apic_disabled;
169 static int __init setup_nox2apic(char *str)
170 {
171 if (x2apic_enabled()) {
172 int apicid = native_apic_msr_read(APIC_ID);
173
174 if (apicid >= 255) {
175 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
176 apicid);
177 return 0;
178 }
179
180 pr_warning("x2apic already enabled. will disable it\n");
181 } else
182 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
183
184 nox2apic = true;
185
186 return 0;
187 }
188 early_param("nox2apic", setup_nox2apic);
189 #endif
190
191 unsigned long mp_lapic_addr;
192 int disable_apic;
193 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
194 static int disable_apic_timer __initdata;
195 /* Local APIC timer works in C2 */
196 int local_apic_timer_c2_ok;
197 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
198
199 int first_system_vector = 0xfe;
200
201 /*
202 * Debug level, exported for io_apic.c
203 */
204 unsigned int apic_verbosity;
205
206 int pic_mode;
207
208 /* Have we found an MP table */
209 int smp_found_config;
210
211 static struct resource lapic_resource = {
212 .name = "Local APIC",
213 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
214 };
215
216 unsigned int lapic_timer_frequency = 0;
217
218 static void apic_pm_activate(void);
219
220 static unsigned long apic_phys;
221
222 /*
223 * Get the LAPIC version
224 */
225 static inline int lapic_get_version(void)
226 {
227 return GET_APIC_VERSION(apic_read(APIC_LVR));
228 }
229
230 /*
231 * Check, if the APIC is integrated or a separate chip
232 */
233 static inline int lapic_is_integrated(void)
234 {
235 #ifdef CONFIG_X86_64
236 return 1;
237 #else
238 return APIC_INTEGRATED(lapic_get_version());
239 #endif
240 }
241
242 /*
243 * Check, whether this is a modern or a first generation APIC
244 */
245 static int modern_apic(void)
246 {
247 /* AMD systems use old APIC versions, so check the CPU */
248 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
249 boot_cpu_data.x86 >= 0xf)
250 return 1;
251 return lapic_get_version() >= 0x14;
252 }
253
254 /*
255 * right after this call apic become NOOP driven
256 * so apic->write/read doesn't do anything
257 */
258 static void __init apic_disable(void)
259 {
260 pr_info("APIC: switched to apic NOOP\n");
261 apic = &apic_noop;
262 }
263
264 void native_apic_wait_icr_idle(void)
265 {
266 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
267 cpu_relax();
268 }
269
270 u32 native_safe_apic_wait_icr_idle(void)
271 {
272 u32 send_status;
273 int timeout;
274
275 timeout = 0;
276 do {
277 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
278 if (!send_status)
279 break;
280 inc_irq_stat(icr_read_retry_count);
281 udelay(100);
282 } while (timeout++ < 1000);
283
284 return send_status;
285 }
286
287 void native_apic_icr_write(u32 low, u32 id)
288 {
289 unsigned long flags;
290
291 local_irq_save(flags);
292 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
293 apic_write(APIC_ICR, low);
294 local_irq_restore(flags);
295 }
296
297 u64 native_apic_icr_read(void)
298 {
299 u32 icr1, icr2;
300
301 icr2 = apic_read(APIC_ICR2);
302 icr1 = apic_read(APIC_ICR);
303
304 return icr1 | ((u64)icr2 << 32);
305 }
306
307 #ifdef CONFIG_X86_32
308 /**
309 * get_physical_broadcast - Get number of physical broadcast IDs
310 */
311 int get_physical_broadcast(void)
312 {
313 return modern_apic() ? 0xff : 0xf;
314 }
315 #endif
316
317 /**
318 * lapic_get_maxlvt - get the maximum number of local vector table entries
319 */
320 int lapic_get_maxlvt(void)
321 {
322 unsigned int v;
323
324 v = apic_read(APIC_LVR);
325 /*
326 * - we always have APIC integrated on 64bit mode
327 * - 82489DXs do not report # of LVT entries
328 */
329 return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
330 }
331
332 /*
333 * Local APIC timer
334 */
335
336 /* Clock divisor */
337 #define APIC_DIVISOR 16
338 #define TSC_DIVISOR 32
339
340 /*
341 * This function sets up the local APIC timer, with a timeout of
342 * 'clocks' APIC bus clock. During calibration we actually call
343 * this function twice on the boot CPU, once with a bogus timeout
344 * value, second time for real. The other (noncalibrating) CPUs
345 * call this function only once, with the real, calibrated value.
346 *
347 * We do reads before writes even if unnecessary, to get around the
348 * P5 APIC double write bug.
349 */
350 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
351 {
352 unsigned int lvtt_value, tmp_value;
353
354 lvtt_value = LOCAL_TIMER_VECTOR;
355 if (!oneshot)
356 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
357 else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
358 lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
359
360 if (!lapic_is_integrated())
361 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
362
363 if (!irqen)
364 lvtt_value |= APIC_LVT_MASKED;
365
366 apic_write(APIC_LVTT, lvtt_value);
367
368 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
369 printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
370 return;
371 }
372
373 /*
374 * Divide PICLK by 16
375 */
376 tmp_value = apic_read(APIC_TDCR);
377 apic_write(APIC_TDCR,
378 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
379 APIC_TDR_DIV_16);
380
381 if (!oneshot)
382 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
383 }
384
385 /*
386 * Setup extended LVT, AMD specific
387 *
388 * Software should use the LVT offsets the BIOS provides. The offsets
389 * are determined by the subsystems using it like those for MCE
390 * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
391 * are supported. Beginning with family 10h at least 4 offsets are
392 * available.
393 *
394 * Since the offsets must be consistent for all cores, we keep track
395 * of the LVT offsets in software and reserve the offset for the same
396 * vector also to be used on other cores. An offset is freed by
397 * setting the entry to APIC_EILVT_MASKED.
398 *
399 * If the BIOS is right, there should be no conflicts. Otherwise a
400 * "[Firmware Bug]: ..." error message is generated. However, if
401 * software does not properly determines the offsets, it is not
402 * necessarily a BIOS bug.
403 */
404
405 static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
406
407 static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
408 {
409 return (old & APIC_EILVT_MASKED)
410 || (new == APIC_EILVT_MASKED)
411 || ((new & ~APIC_EILVT_MASKED) == old);
412 }
413
414 static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
415 {
416 unsigned int rsvd, vector;
417
418 if (offset >= APIC_EILVT_NR_MAX)
419 return ~0;
420
421 rsvd = atomic_read(&eilvt_offsets[offset]);
422 do {
423 vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
424 if (vector && !eilvt_entry_is_changeable(vector, new))
425 /* may not change if vectors are different */
426 return rsvd;
427 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
428 } while (rsvd != new);
429
430 rsvd &= ~APIC_EILVT_MASKED;
431 if (rsvd && rsvd != vector)
432 pr_info("LVT offset %d assigned for vector 0x%02x\n",
433 offset, rsvd);
434
435 return new;
436 }
437
438 /*
439 * If mask=1, the LVT entry does not generate interrupts while mask=0
440 * enables the vector. See also the BKDGs. Must be called with
441 * preemption disabled.
442 */
443
444 int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
445 {
446 unsigned long reg = APIC_EILVTn(offset);
447 unsigned int new, old, reserved;
448
449 new = (mask << 16) | (msg_type << 8) | vector;
450 old = apic_read(reg);
451 reserved = reserve_eilvt_offset(offset, new);
452
453 if (reserved != new) {
454 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
455 "vector 0x%x, but the register is already in use for "
456 "vector 0x%x on another cpu\n",
457 smp_processor_id(), reg, offset, new, reserved);
458 return -EINVAL;
459 }
460
461 if (!eilvt_entry_is_changeable(old, new)) {
462 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
463 "vector 0x%x, but the register is already in use for "
464 "vector 0x%x on this cpu\n",
465 smp_processor_id(), reg, offset, new, old);
466 return -EBUSY;
467 }
468
469 apic_write(reg, new);
470
471 return 0;
472 }
473 EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
474
475 /*
476 * Program the next event, relative to now
477 */
478 static int lapic_next_event(unsigned long delta,
479 struct clock_event_device *evt)
480 {
481 apic_write(APIC_TMICT, delta);
482 return 0;
483 }
484
485 static int lapic_next_deadline(unsigned long delta,
486 struct clock_event_device *evt)
487 {
488 u64 tsc;
489
490 rdtscll(tsc);
491 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
492 return 0;
493 }
494
495 /*
496 * Setup the lapic timer in periodic or oneshot mode
497 */
498 static void lapic_timer_setup(enum clock_event_mode mode,
499 struct clock_event_device *evt)
500 {
501 unsigned long flags;
502 unsigned int v;
503
504 /* Lapic used as dummy for broadcast ? */
505 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
506 return;
507
508 local_irq_save(flags);
509
510 switch (mode) {
511 case CLOCK_EVT_MODE_PERIODIC:
512 case CLOCK_EVT_MODE_ONESHOT:
513 __setup_APIC_LVTT(lapic_timer_frequency,
514 mode != CLOCK_EVT_MODE_PERIODIC, 1);
515 break;
516 case CLOCK_EVT_MODE_UNUSED:
517 case CLOCK_EVT_MODE_SHUTDOWN:
518 v = apic_read(APIC_LVTT);
519 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
520 apic_write(APIC_LVTT, v);
521 apic_write(APIC_TMICT, 0);
522 break;
523 case CLOCK_EVT_MODE_RESUME:
524 /* Nothing to do here */
525 break;
526 }
527
528 local_irq_restore(flags);
529 }
530
531 /*
532 * Local APIC timer broadcast function
533 */
534 static void lapic_timer_broadcast(const struct cpumask *mask)
535 {
536 #ifdef CONFIG_SMP
537 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
538 #endif
539 }
540
541
542 /*
543 * The local apic timer can be used for any function which is CPU local.
544 */
545 static struct clock_event_device lapic_clockevent = {
546 .name = "lapic",
547 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
548 | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
549 .shift = 32,
550 .set_mode = lapic_timer_setup,
551 .set_next_event = lapic_next_event,
552 .broadcast = lapic_timer_broadcast,
553 .rating = 100,
554 .irq = -1,
555 };
556 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
557
558 /*
559 * Setup the local APIC timer for this CPU. Copy the initialized values
560 * of the boot CPU and register the clock event in the framework.
561 */
562 static void setup_APIC_timer(void)
563 {
564 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
565
566 if (this_cpu_has(X86_FEATURE_ARAT)) {
567 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
568 /* Make LAPIC timer preferrable over percpu HPET */
569 lapic_clockevent.rating = 150;
570 }
571
572 memcpy(levt, &lapic_clockevent, sizeof(*levt));
573 levt->cpumask = cpumask_of(smp_processor_id());
574
575 if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
576 levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
577 CLOCK_EVT_FEAT_DUMMY);
578 levt->set_next_event = lapic_next_deadline;
579 clockevents_config_and_register(levt,
580 (tsc_khz / TSC_DIVISOR) * 1000,
581 0xF, ~0UL);
582 } else
583 clockevents_register_device(levt);
584 }
585
586 /*
587 * In this functions we calibrate APIC bus clocks to the external timer.
588 *
589 * We want to do the calibration only once since we want to have local timer
590 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
591 * frequency.
592 *
593 * This was previously done by reading the PIT/HPET and waiting for a wrap
594 * around to find out, that a tick has elapsed. I have a box, where the PIT
595 * readout is broken, so it never gets out of the wait loop again. This was
596 * also reported by others.
597 *
598 * Monitoring the jiffies value is inaccurate and the clockevents
599 * infrastructure allows us to do a simple substitution of the interrupt
600 * handler.
601 *
602 * The calibration routine also uses the pm_timer when possible, as the PIT
603 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
604 * back to normal later in the boot process).
605 */
606
607 #define LAPIC_CAL_LOOPS (HZ/10)
608
609 static __initdata int lapic_cal_loops = -1;
610 static __initdata long lapic_cal_t1, lapic_cal_t2;
611 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
612 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
613 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
614
615 /*
616 * Temporary interrupt handler.
617 */
618 static void __init lapic_cal_handler(struct clock_event_device *dev)
619 {
620 unsigned long long tsc = 0;
621 long tapic = apic_read(APIC_TMCCT);
622 unsigned long pm = acpi_pm_read_early();
623
624 if (cpu_has_tsc)
625 rdtscll(tsc);
626
627 switch (lapic_cal_loops++) {
628 case 0:
629 lapic_cal_t1 = tapic;
630 lapic_cal_tsc1 = tsc;
631 lapic_cal_pm1 = pm;
632 lapic_cal_j1 = jiffies;
633 break;
634
635 case LAPIC_CAL_LOOPS:
636 lapic_cal_t2 = tapic;
637 lapic_cal_tsc2 = tsc;
638 if (pm < lapic_cal_pm1)
639 pm += ACPI_PM_OVRRUN;
640 lapic_cal_pm2 = pm;
641 lapic_cal_j2 = jiffies;
642 break;
643 }
644 }
645
646 static int __init
647 calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
648 {
649 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
650 const long pm_thresh = pm_100ms / 100;
651 unsigned long mult;
652 u64 res;
653
654 #ifndef CONFIG_X86_PM_TIMER
655 return -1;
656 #endif
657
658 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
659
660 /* Check, if the PM timer is available */
661 if (!deltapm)
662 return -1;
663
664 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
665
666 if (deltapm > (pm_100ms - pm_thresh) &&
667 deltapm < (pm_100ms + pm_thresh)) {
668 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
669 return 0;
670 }
671
672 res = (((u64)deltapm) * mult) >> 22;
673 do_div(res, 1000000);
674 pr_warning("APIC calibration not consistent "
675 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
676
677 /* Correct the lapic counter value */
678 res = (((u64)(*delta)) * pm_100ms);
679 do_div(res, deltapm);
680 pr_info("APIC delta adjusted to PM-Timer: "
681 "%lu (%ld)\n", (unsigned long)res, *delta);
682 *delta = (long)res;
683
684 /* Correct the tsc counter value */
685 if (cpu_has_tsc) {
686 res = (((u64)(*deltatsc)) * pm_100ms);
687 do_div(res, deltapm);
688 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
689 "PM-Timer: %lu (%ld)\n",
690 (unsigned long)res, *deltatsc);
691 *deltatsc = (long)res;
692 }
693
694 return 0;
695 }
696
697 static int __init calibrate_APIC_clock(void)
698 {
699 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
700 void (*real_handler)(struct clock_event_device *dev);
701 unsigned long deltaj;
702 long delta, deltatsc;
703 int pm_referenced = 0;
704
705 /**
706 * check if lapic timer has already been calibrated by platform
707 * specific routine, such as tsc calibration code. if so, we just fill
708 * in the clockevent structure and return.
709 */
710
711 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
712 return 0;
713 } else if (lapic_timer_frequency) {
714 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
715 lapic_timer_frequency);
716 lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
717 TICK_NSEC, lapic_clockevent.shift);
718 lapic_clockevent.max_delta_ns =
719 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
720 lapic_clockevent.min_delta_ns =
721 clockevent_delta2ns(0xF, &lapic_clockevent);
722 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
723 return 0;
724 }
725
726 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
727 "calibrating APIC timer ...\n");
728
729 local_irq_disable();
730
731 /* Replace the global interrupt handler */
732 real_handler = global_clock_event->event_handler;
733 global_clock_event->event_handler = lapic_cal_handler;
734
735 /*
736 * Setup the APIC counter to maximum. There is no way the lapic
737 * can underflow in the 100ms detection time frame
738 */
739 __setup_APIC_LVTT(0xffffffff, 0, 0);
740
741 /* Let the interrupts run */
742 local_irq_enable();
743
744 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
745 cpu_relax();
746
747 local_irq_disable();
748
749 /* Restore the real event handler */
750 global_clock_event->event_handler = real_handler;
751
752 /* Build delta t1-t2 as apic timer counts down */
753 delta = lapic_cal_t1 - lapic_cal_t2;
754 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
755
756 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
757
758 /* we trust the PM based calibration if possible */
759 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
760 &delta, &deltatsc);
761
762 /* Calculate the scaled math multiplication factor */
763 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
764 lapic_clockevent.shift);
765 lapic_clockevent.max_delta_ns =
766 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
767 lapic_clockevent.min_delta_ns =
768 clockevent_delta2ns(0xF, &lapic_clockevent);
769
770 lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
771
772 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
773 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
774 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
775 lapic_timer_frequency);
776
777 if (cpu_has_tsc) {
778 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
779 "%ld.%04ld MHz.\n",
780 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
781 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
782 }
783
784 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
785 "%u.%04u MHz.\n",
786 lapic_timer_frequency / (1000000 / HZ),
787 lapic_timer_frequency % (1000000 / HZ));
788
789 /*
790 * Do a sanity check on the APIC calibration result
791 */
792 if (lapic_timer_frequency < (1000000 / HZ)) {
793 local_irq_enable();
794 pr_warning("APIC frequency too slow, disabling apic timer\n");
795 return -1;
796 }
797
798 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
799
800 /*
801 * PM timer calibration failed or not turned on
802 * so lets try APIC timer based calibration
803 */
804 if (!pm_referenced) {
805 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
806
807 /*
808 * Setup the apic timer manually
809 */
810 levt->event_handler = lapic_cal_handler;
811 lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
812 lapic_cal_loops = -1;
813
814 /* Let the interrupts run */
815 local_irq_enable();
816
817 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
818 cpu_relax();
819
820 /* Stop the lapic timer */
821 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
822
823 /* Jiffies delta */
824 deltaj = lapic_cal_j2 - lapic_cal_j1;
825 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
826
827 /* Check, if the jiffies result is consistent */
828 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
829 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
830 else
831 levt->features |= CLOCK_EVT_FEAT_DUMMY;
832 } else
833 local_irq_enable();
834
835 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
836 pr_warning("APIC timer disabled due to verification failure\n");
837 return -1;
838 }
839
840 return 0;
841 }
842
843 /*
844 * Setup the boot APIC
845 *
846 * Calibrate and verify the result.
847 */
848 void __init setup_boot_APIC_clock(void)
849 {
850 /*
851 * The local apic timer can be disabled via the kernel
852 * commandline or from the CPU detection code. Register the lapic
853 * timer as a dummy clock event source on SMP systems, so the
854 * broadcast mechanism is used. On UP systems simply ignore it.
855 */
856 if (disable_apic_timer) {
857 pr_info("Disabling APIC timer\n");
858 /* No broadcast on UP ! */
859 if (num_possible_cpus() > 1) {
860 lapic_clockevent.mult = 1;
861 setup_APIC_timer();
862 }
863 return;
864 }
865
866 if (calibrate_APIC_clock()) {
867 /* No broadcast on UP ! */
868 if (num_possible_cpus() > 1)
869 setup_APIC_timer();
870 return;
871 }
872
873 /*
874 * If nmi_watchdog is set to IO_APIC, we need the
875 * PIT/HPET going. Otherwise register lapic as a dummy
876 * device.
877 */
878 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
879
880 /* Setup the lapic or request the broadcast */
881 setup_APIC_timer();
882 }
883
884 void setup_secondary_APIC_clock(void)
885 {
886 setup_APIC_timer();
887 }
888
889 /*
890 * The guts of the apic timer interrupt
891 */
892 static void local_apic_timer_interrupt(void)
893 {
894 int cpu = smp_processor_id();
895 struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
896
897 /*
898 * Normally we should not be here till LAPIC has been initialized but
899 * in some cases like kdump, its possible that there is a pending LAPIC
900 * timer interrupt from previous kernel's context and is delivered in
901 * new kernel the moment interrupts are enabled.
902 *
903 * Interrupts are enabled early and LAPIC is setup much later, hence
904 * its possible that when we get here evt->event_handler is NULL.
905 * Check for event_handler being NULL and discard the interrupt as
906 * spurious.
907 */
908 if (!evt->event_handler) {
909 pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
910 /* Switch it off */
911 lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
912 return;
913 }
914
915 /*
916 * the NMI deadlock-detector uses this.
917 */
918 inc_irq_stat(apic_timer_irqs);
919
920 evt->event_handler(evt);
921 }
922
923 /*
924 * Local APIC timer interrupt. This is the most natural way for doing
925 * local interrupts, but local timer interrupts can be emulated by
926 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
927 *
928 * [ if a single-CPU system runs an SMP kernel then we call the local
929 * interrupt as well. Thus we cannot inline the local irq ... ]
930 */
931 __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
932 {
933 struct pt_regs *old_regs = set_irq_regs(regs);
934
935 /*
936 * NOTE! We'd better ACK the irq immediately,
937 * because timer handling can be slow.
938 *
939 * update_process_times() expects us to have done irq_enter().
940 * Besides, if we don't timer interrupts ignore the global
941 * interrupt lock, which is the WrongThing (tm) to do.
942 */
943 entering_ack_irq();
944 local_apic_timer_interrupt();
945 exiting_irq();
946
947 set_irq_regs(old_regs);
948 }
949
950 __visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
951 {
952 struct pt_regs *old_regs = set_irq_regs(regs);
953
954 /*
955 * NOTE! We'd better ACK the irq immediately,
956 * because timer handling can be slow.
957 *
958 * update_process_times() expects us to have done irq_enter().
959 * Besides, if we don't timer interrupts ignore the global
960 * interrupt lock, which is the WrongThing (tm) to do.
961 */
962 entering_ack_irq();
963 trace_local_timer_entry(LOCAL_TIMER_VECTOR);
964 local_apic_timer_interrupt();
965 trace_local_timer_exit(LOCAL_TIMER_VECTOR);
966 exiting_irq();
967
968 set_irq_regs(old_regs);
969 }
970
971 int setup_profiling_timer(unsigned int multiplier)
972 {
973 return -EINVAL;
974 }
975
976 /*
977 * Local APIC start and shutdown
978 */
979
980 /**
981 * clear_local_APIC - shutdown the local APIC
982 *
983 * This is called, when a CPU is disabled and before rebooting, so the state of
984 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
985 * leftovers during boot.
986 */
987 void clear_local_APIC(void)
988 {
989 int maxlvt;
990 u32 v;
991
992 /* APIC hasn't been mapped yet */
993 if (!x2apic_mode && !apic_phys)
994 return;
995
996 maxlvt = lapic_get_maxlvt();
997 /*
998 * Masking an LVT entry can trigger a local APIC error
999 * if the vector is zero. Mask LVTERR first to prevent this.
1000 */
1001 if (maxlvt >= 3) {
1002 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
1003 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
1004 }
1005 /*
1006 * Careful: we have to set masks only first to deassert
1007 * any level-triggered sources.
1008 */
1009 v = apic_read(APIC_LVTT);
1010 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1011 v = apic_read(APIC_LVT0);
1012 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1013 v = apic_read(APIC_LVT1);
1014 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1015 if (maxlvt >= 4) {
1016 v = apic_read(APIC_LVTPC);
1017 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1018 }
1019
1020 /* lets not touch this if we didn't frob it */
1021 #ifdef CONFIG_X86_THERMAL_VECTOR
1022 if (maxlvt >= 5) {
1023 v = apic_read(APIC_LVTTHMR);
1024 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1025 }
1026 #endif
1027 #ifdef CONFIG_X86_MCE_INTEL
1028 if (maxlvt >= 6) {
1029 v = apic_read(APIC_LVTCMCI);
1030 if (!(v & APIC_LVT_MASKED))
1031 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1032 }
1033 #endif
1034
1035 /*
1036 * Clean APIC state for other OSs:
1037 */
1038 apic_write(APIC_LVTT, APIC_LVT_MASKED);
1039 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1040 apic_write(APIC_LVT1, APIC_LVT_MASKED);
1041 if (maxlvt >= 3)
1042 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1043 if (maxlvt >= 4)
1044 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1045
1046 /* Integrated APIC (!82489DX) ? */
1047 if (lapic_is_integrated()) {
1048 if (maxlvt > 3)
1049 /* Clear ESR due to Pentium errata 3AP and 11AP */
1050 apic_write(APIC_ESR, 0);
1051 apic_read(APIC_ESR);
1052 }
1053 }
1054
1055 /**
1056 * disable_local_APIC - clear and disable the local APIC
1057 */
1058 void disable_local_APIC(void)
1059 {
1060 unsigned int value;
1061
1062 /* APIC hasn't been mapped yet */
1063 if (!x2apic_mode && !apic_phys)
1064 return;
1065
1066 clear_local_APIC();
1067
1068 /*
1069 * Disable APIC (implies clearing of registers
1070 * for 82489DX!).
1071 */
1072 value = apic_read(APIC_SPIV);
1073 value &= ~APIC_SPIV_APIC_ENABLED;
1074 apic_write(APIC_SPIV, value);
1075
1076 #ifdef CONFIG_X86_32
1077 /*
1078 * When LAPIC was disabled by the BIOS and enabled by the kernel,
1079 * restore the disabled state.
1080 */
1081 if (enabled_via_apicbase) {
1082 unsigned int l, h;
1083
1084 rdmsr(MSR_IA32_APICBASE, l, h);
1085 l &= ~MSR_IA32_APICBASE_ENABLE;
1086 wrmsr(MSR_IA32_APICBASE, l, h);
1087 }
1088 #endif
1089 }
1090
1091 /*
1092 * If Linux enabled the LAPIC against the BIOS default disable it down before
1093 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
1094 * not power-off. Additionally clear all LVT entries before disable_local_APIC
1095 * for the case where Linux didn't enable the LAPIC.
1096 */
1097 void lapic_shutdown(void)
1098 {
1099 unsigned long flags;
1100
1101 if (!cpu_has_apic && !apic_from_smp_config())
1102 return;
1103
1104 local_irq_save(flags);
1105
1106 #ifdef CONFIG_X86_32
1107 if (!enabled_via_apicbase)
1108 clear_local_APIC();
1109 else
1110 #endif
1111 disable_local_APIC();
1112
1113
1114 local_irq_restore(flags);
1115 }
1116
1117 /*
1118 * This is to verify that we're looking at a real local APIC.
1119 * Check these against your board if the CPUs aren't getting
1120 * started for no apparent reason.
1121 */
1122 int __init verify_local_APIC(void)
1123 {
1124 unsigned int reg0, reg1;
1125
1126 /*
1127 * The version register is read-only in a real APIC.
1128 */
1129 reg0 = apic_read(APIC_LVR);
1130 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
1131 apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
1132 reg1 = apic_read(APIC_LVR);
1133 apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
1134
1135 /*
1136 * The two version reads above should print the same
1137 * numbers. If the second one is different, then we
1138 * poke at a non-APIC.
1139 */
1140 if (reg1 != reg0)
1141 return 0;
1142
1143 /*
1144 * Check if the version looks reasonably.
1145 */
1146 reg1 = GET_APIC_VERSION(reg0);
1147 if (reg1 == 0x00 || reg1 == 0xff)
1148 return 0;
1149 reg1 = lapic_get_maxlvt();
1150 if (reg1 < 0x02 || reg1 == 0xff)
1151 return 0;
1152
1153 /*
1154 * The ID register is read/write in a real APIC.
1155 */
1156 reg0 = apic_read(APIC_ID);
1157 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
1158 apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
1159 reg1 = apic_read(APIC_ID);
1160 apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
1161 apic_write(APIC_ID, reg0);
1162 if (reg1 != (reg0 ^ apic->apic_id_mask))
1163 return 0;
1164
1165 /*
1166 * The next two are just to see if we have sane values.
1167 * They're only really relevant if we're in Virtual Wire
1168 * compatibility mode, but most boxes are anymore.
1169 */
1170 reg0 = apic_read(APIC_LVT0);
1171 apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
1172 reg1 = apic_read(APIC_LVT1);
1173 apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
1174
1175 return 1;
1176 }
1177
1178 /**
1179 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
1180 */
1181 void __init sync_Arb_IDs(void)
1182 {
1183 /*
1184 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
1185 * needed on AMD.
1186 */
1187 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1188 return;
1189
1190 /*
1191 * Wait for idle.
1192 */
1193 apic_wait_icr_idle();
1194
1195 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1196 apic_write(APIC_ICR, APIC_DEST_ALLINC |
1197 APIC_INT_LEVELTRIG | APIC_DM_INIT);
1198 }
1199
1200 /*
1201 * An initial setup of the virtual wire mode.
1202 */
1203 void __init init_bsp_APIC(void)
1204 {
1205 unsigned int value;
1206
1207 /*
1208 * Don't do the setup now if we have a SMP BIOS as the
1209 * through-I/O-APIC virtual wire mode might be active.
1210 */
1211 if (smp_found_config || !cpu_has_apic)
1212 return;
1213
1214 /*
1215 * Do not trust the local APIC being empty at bootup.
1216 */
1217 clear_local_APIC();
1218
1219 /*
1220 * Enable APIC.
1221 */
1222 value = apic_read(APIC_SPIV);
1223 value &= ~APIC_VECTOR_MASK;
1224 value |= APIC_SPIV_APIC_ENABLED;
1225
1226 #ifdef CONFIG_X86_32
1227 /* This bit is reserved on P4/Xeon and should be cleared */
1228 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1229 (boot_cpu_data.x86 == 15))
1230 value &= ~APIC_SPIV_FOCUS_DISABLED;
1231 else
1232 #endif
1233 value |= APIC_SPIV_FOCUS_DISABLED;
1234 value |= SPURIOUS_APIC_VECTOR;
1235 apic_write(APIC_SPIV, value);
1236
1237 /*
1238 * Set up the virtual wire mode.
1239 */
1240 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1241 value = APIC_DM_NMI;
1242 if (!lapic_is_integrated()) /* 82489DX */
1243 value |= APIC_LVT_LEVEL_TRIGGER;
1244 apic_write(APIC_LVT1, value);
1245 }
1246
1247 static void lapic_setup_esr(void)
1248 {
1249 unsigned int oldvalue, value, maxlvt;
1250
1251 if (!lapic_is_integrated()) {
1252 pr_info("No ESR for 82489DX.\n");
1253 return;
1254 }
1255
1256 if (apic->disable_esr) {
1257 /*
1258 * Something untraceable is creating bad interrupts on
1259 * secondary quads ... for the moment, just leave the
1260 * ESR disabled - we can't do anything useful with the
1261 * errors anyway - mbligh
1262 */
1263 pr_info("Leaving ESR disabled.\n");
1264 return;
1265 }
1266
1267 maxlvt = lapic_get_maxlvt();
1268 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1269 apic_write(APIC_ESR, 0);
1270 oldvalue = apic_read(APIC_ESR);
1271
1272 /* enables sending errors */
1273 value = ERROR_APIC_VECTOR;
1274 apic_write(APIC_LVTERR, value);
1275
1276 /*
1277 * spec says clear errors after enabling vector.
1278 */
1279 if (maxlvt > 3)
1280 apic_write(APIC_ESR, 0);
1281 value = apic_read(APIC_ESR);
1282 if (value != oldvalue)
1283 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1284 "vector: 0x%08x after: 0x%08x\n",
1285 oldvalue, value);
1286 }
1287
1288 /**
1289 * setup_local_APIC - setup the local APIC
1290 *
1291 * Used to setup local APIC while initializing BSP or bringin up APs.
1292 * Always called with preemption disabled.
1293 */
1294 void setup_local_APIC(void)
1295 {
1296 int cpu = smp_processor_id();
1297 unsigned int value, queued;
1298 int i, j, acked = 0;
1299 unsigned long long tsc = 0, ntsc;
1300 long long max_loops = cpu_khz;
1301
1302 if (cpu_has_tsc)
1303 rdtscll(tsc);
1304
1305 if (disable_apic) {
1306 disable_ioapic_support();
1307 return;
1308 }
1309
1310 #ifdef CONFIG_X86_32
1311 /* Pound the ESR really hard over the head with a big hammer - mbligh */
1312 if (lapic_is_integrated() && apic->disable_esr) {
1313 apic_write(APIC_ESR, 0);
1314 apic_write(APIC_ESR, 0);
1315 apic_write(APIC_ESR, 0);
1316 apic_write(APIC_ESR, 0);
1317 }
1318 #endif
1319 perf_events_lapic_init();
1320
1321 /*
1322 * Double-check whether this APIC is really registered.
1323 * This is meaningless in clustered apic mode, so we skip it.
1324 */
1325 BUG_ON(!apic->apic_id_registered());
1326
1327 /*
1328 * Intel recommends to set DFR, LDR and TPR before enabling
1329 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1330 * document number 292116). So here it goes...
1331 */
1332 apic->init_apic_ldr();
1333
1334 #ifdef CONFIG_X86_32
1335 /*
1336 * APIC LDR is initialized. If logical_apicid mapping was
1337 * initialized during get_smp_config(), make sure it matches the
1338 * actual value.
1339 */
1340 i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1341 WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
1342 /* always use the value from LDR */
1343 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
1344 logical_smp_processor_id();
1345
1346 /*
1347 * Some NUMA implementations (NUMAQ) don't initialize apicid to
1348 * node mapping during NUMA init. Now that logical apicid is
1349 * guaranteed to be known, give it another chance. This is already
1350 * a bit too late - percpu allocation has already happened without
1351 * proper NUMA affinity.
1352 */
1353 if (apic->x86_32_numa_cpu_node)
1354 set_apicid_to_node(early_per_cpu(x86_cpu_to_apicid, cpu),
1355 apic->x86_32_numa_cpu_node(cpu));
1356 #endif
1357
1358 /*
1359 * Set Task Priority to 'accept all'. We never change this
1360 * later on.
1361 */
1362 value = apic_read(APIC_TASKPRI);
1363 value &= ~APIC_TPRI_MASK;
1364 apic_write(APIC_TASKPRI, value);
1365
1366 /*
1367 * After a crash, we no longer service the interrupts and a pending
1368 * interrupt from previous kernel might still have ISR bit set.
1369 *
1370 * Most probably by now CPU has serviced that pending interrupt and
1371 * it might not have done the ack_APIC_irq() because it thought,
1372 * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
1373 * does not clear the ISR bit and cpu thinks it has already serivced
1374 * the interrupt. Hence a vector might get locked. It was noticed
1375 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1376 */
1377 do {
1378 queued = 0;
1379 for (i = APIC_ISR_NR - 1; i >= 0; i--)
1380 queued |= apic_read(APIC_IRR + i*0x10);
1381
1382 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1383 value = apic_read(APIC_ISR + i*0x10);
1384 for (j = 31; j >= 0; j--) {
1385 if (value & (1<<j)) {
1386 ack_APIC_irq();
1387 acked++;
1388 }
1389 }
1390 }
1391 if (acked > 256) {
1392 printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
1393 acked);
1394 break;
1395 }
1396 if (queued) {
1397 if (cpu_has_tsc) {
1398 rdtscll(ntsc);
1399 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1400 } else
1401 max_loops--;
1402 }
1403 } while (queued && max_loops > 0);
1404 WARN_ON(max_loops <= 0);
1405
1406 /*
1407 * Now that we are all set up, enable the APIC
1408 */
1409 value = apic_read(APIC_SPIV);
1410 value &= ~APIC_VECTOR_MASK;
1411 /*
1412 * Enable APIC
1413 */
1414 value |= APIC_SPIV_APIC_ENABLED;
1415
1416 #ifdef CONFIG_X86_32
1417 /*
1418 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1419 * certain networking cards. If high frequency interrupts are
1420 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1421 * entry is masked/unmasked at a high rate as well then sooner or
1422 * later IOAPIC line gets 'stuck', no more interrupts are received
1423 * from the device. If focus CPU is disabled then the hang goes
1424 * away, oh well :-(
1425 *
1426 * [ This bug can be reproduced easily with a level-triggered
1427 * PCI Ne2000 networking cards and PII/PIII processors, dual
1428 * BX chipset. ]
1429 */
1430 /*
1431 * Actually disabling the focus CPU check just makes the hang less
1432 * frequent as it makes the interrupt distributon model be more
1433 * like LRU than MRU (the short-term load is more even across CPUs).
1434 * See also the comment in end_level_ioapic_irq(). --macro
1435 */
1436
1437 /*
1438 * - enable focus processor (bit==0)
1439 * - 64bit mode always use processor focus
1440 * so no need to set it
1441 */
1442 value &= ~APIC_SPIV_FOCUS_DISABLED;
1443 #endif
1444
1445 /*
1446 * Set spurious IRQ vector
1447 */
1448 value |= SPURIOUS_APIC_VECTOR;
1449 apic_write(APIC_SPIV, value);
1450
1451 /*
1452 * Set up LVT0, LVT1:
1453 *
1454 * set up through-local-APIC on the BP's LINT0. This is not
1455 * strictly necessary in pure symmetric-IO mode, but sometimes
1456 * we delegate interrupts to the 8259A.
1457 */
1458 /*
1459 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1460 */
1461 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1462 if (!cpu && (pic_mode || !value)) {
1463 value = APIC_DM_EXTINT;
1464 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1465 } else {
1466 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1467 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1468 }
1469 apic_write(APIC_LVT0, value);
1470
1471 /*
1472 * only the BP should see the LINT1 NMI signal, obviously.
1473 */
1474 if (!cpu)
1475 value = APIC_DM_NMI;
1476 else
1477 value = APIC_DM_NMI | APIC_LVT_MASKED;
1478 if (!lapic_is_integrated()) /* 82489DX */
1479 value |= APIC_LVT_LEVEL_TRIGGER;
1480 apic_write(APIC_LVT1, value);
1481
1482 #ifdef CONFIG_X86_MCE_INTEL
1483 /* Recheck CMCI information after local APIC is up on CPU #0 */
1484 if (!cpu)
1485 cmci_recheck();
1486 #endif
1487 }
1488
1489 void end_local_APIC_setup(void)
1490 {
1491 lapic_setup_esr();
1492
1493 #ifdef CONFIG_X86_32
1494 {
1495 unsigned int value;
1496 /* Disable the local apic timer */
1497 value = apic_read(APIC_LVTT);
1498 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1499 apic_write(APIC_LVTT, value);
1500 }
1501 #endif
1502
1503 apic_pm_activate();
1504 }
1505
1506 void __init bsp_end_local_APIC_setup(void)
1507 {
1508 end_local_APIC_setup();
1509
1510 /*
1511 * Now that local APIC setup is completed for BP, configure the fault
1512 * handling for interrupt remapping.
1513 */
1514 irq_remap_enable_fault_handling();
1515
1516 }
1517
1518 #ifdef CONFIG_X86_X2APIC
1519 /*
1520 * Need to disable xapic and x2apic at the same time and then enable xapic mode
1521 */
1522 static inline void __disable_x2apic(u64 msr)
1523 {
1524 wrmsrl(MSR_IA32_APICBASE,
1525 msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1526 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1527 }
1528
1529 static __init void disable_x2apic(void)
1530 {
1531 u64 msr;
1532
1533 if (!cpu_has_x2apic)
1534 return;
1535
1536 rdmsrl(MSR_IA32_APICBASE, msr);
1537 if (msr & X2APIC_ENABLE) {
1538 u32 x2apic_id = read_apic_id();
1539
1540 if (x2apic_id >= 255)
1541 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1542
1543 pr_info("Disabling x2apic\n");
1544 __disable_x2apic(msr);
1545
1546 if (nox2apic) {
1547 clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
1548 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1549 }
1550
1551 x2apic_disabled = 1;
1552 x2apic_mode = 0;
1553
1554 register_lapic_address(mp_lapic_addr);
1555 }
1556 }
1557
1558 void check_x2apic(void)
1559 {
1560 if (x2apic_enabled()) {
1561 pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
1562 x2apic_preenabled = x2apic_mode = 1;
1563 }
1564 }
1565
1566 void enable_x2apic(void)
1567 {
1568 u64 msr;
1569
1570 rdmsrl(MSR_IA32_APICBASE, msr);
1571 if (x2apic_disabled) {
1572 __disable_x2apic(msr);
1573 return;
1574 }
1575
1576 if (!x2apic_mode)
1577 return;
1578
1579 if (!(msr & X2APIC_ENABLE)) {
1580 printk_once(KERN_INFO "Enabling x2apic\n");
1581 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1582 }
1583 }
1584 #endif /* CONFIG_X86_X2APIC */
1585
1586 int __init enable_IR(void)
1587 {
1588 #ifdef CONFIG_IRQ_REMAP
1589 if (!irq_remapping_supported()) {
1590 pr_debug("intr-remapping not supported\n");
1591 return -1;
1592 }
1593
1594 if (!x2apic_preenabled && skip_ioapic_setup) {
1595 pr_info("Skipped enabling intr-remap because of skipping "
1596 "io-apic setup\n");
1597 return -1;
1598 }
1599
1600 return irq_remapping_enable();
1601 #endif
1602 return -1;
1603 }
1604
1605 void __init enable_IR_x2apic(void)
1606 {
1607 unsigned long flags;
1608 int ret, x2apic_enabled = 0;
1609 int hardware_init_ret;
1610
1611 /* Make sure irq_remap_ops are initialized */
1612 setup_irq_remapping_ops();
1613
1614 hardware_init_ret = irq_remapping_prepare();
1615 if (hardware_init_ret && !x2apic_supported())
1616 return;
1617
1618 ret = save_ioapic_entries();
1619 if (ret) {
1620 pr_info("Saving IO-APIC state failed: %d\n", ret);
1621 return;
1622 }
1623
1624 local_irq_save(flags);
1625 legacy_pic->mask_all();
1626 mask_ioapic_entries();
1627
1628 if (x2apic_preenabled && nox2apic)
1629 disable_x2apic();
1630
1631 if (hardware_init_ret)
1632 ret = -1;
1633 else
1634 ret = enable_IR();
1635
1636 if (!x2apic_supported())
1637 goto skip_x2apic;
1638
1639 if (ret < 0) {
1640 /* IR is required if there is APIC ID > 255 even when running
1641 * under KVM
1642 */
1643 if (max_physical_apicid > 255 ||
1644 !hypervisor_x2apic_available()) {
1645 if (x2apic_preenabled)
1646 disable_x2apic();
1647 goto skip_x2apic;
1648 }
1649 /*
1650 * without IR all CPUs can be addressed by IOAPIC/MSI
1651 * only in physical mode
1652 */
1653 x2apic_force_phys();
1654 }
1655
1656 if (ret == IRQ_REMAP_XAPIC_MODE) {
1657 pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
1658 goto skip_x2apic;
1659 }
1660
1661 x2apic_enabled = 1;
1662
1663 if (x2apic_supported() && !x2apic_mode) {
1664 x2apic_mode = 1;
1665 enable_x2apic();
1666 pr_info("Enabled x2apic\n");
1667 }
1668
1669 skip_x2apic:
1670 if (ret < 0) /* IR enabling failed */
1671 restore_ioapic_entries();
1672 legacy_pic->restore_mask();
1673 local_irq_restore(flags);
1674 }
1675
1676 #ifdef CONFIG_X86_64
1677 /*
1678 * Detect and enable local APICs on non-SMP boards.
1679 * Original code written by Keir Fraser.
1680 * On AMD64 we trust the BIOS - if it says no APIC it is likely
1681 * not correctly set up (usually the APIC timer won't work etc.)
1682 */
1683 static int __init detect_init_APIC(void)
1684 {
1685 if (!cpu_has_apic) {
1686 pr_info("No local APIC present\n");
1687 return -1;
1688 }
1689
1690 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1691 return 0;
1692 }
1693 #else
1694
1695 static int __init apic_verify(void)
1696 {
1697 u32 features, h, l;
1698
1699 /*
1700 * The APIC feature bit should now be enabled
1701 * in `cpuid'
1702 */
1703 features = cpuid_edx(1);
1704 if (!(features & (1 << X86_FEATURE_APIC))) {
1705 pr_warning("Could not enable APIC!\n");
1706 return -1;
1707 }
1708 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1709 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1710
1711 /* The BIOS may have set up the APIC at some other address */
1712 if (boot_cpu_data.x86 >= 6) {
1713 rdmsr(MSR_IA32_APICBASE, l, h);
1714 if (l & MSR_IA32_APICBASE_ENABLE)
1715 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1716 }
1717
1718 pr_info("Found and enabled local APIC!\n");
1719 return 0;
1720 }
1721
1722 int __init apic_force_enable(unsigned long addr)
1723 {
1724 u32 h, l;
1725
1726 if (disable_apic)
1727 return -1;
1728
1729 /*
1730 * Some BIOSes disable the local APIC in the APIC_BASE
1731 * MSR. This can only be done in software for Intel P6 or later
1732 * and AMD K7 (Model > 1) or later.
1733 */
1734 if (boot_cpu_data.x86 >= 6) {
1735 rdmsr(MSR_IA32_APICBASE, l, h);
1736 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1737 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1738 l &= ~MSR_IA32_APICBASE_BASE;
1739 l |= MSR_IA32_APICBASE_ENABLE | addr;
1740 wrmsr(MSR_IA32_APICBASE, l, h);
1741 enabled_via_apicbase = 1;
1742 }
1743 }
1744 return apic_verify();
1745 }
1746
1747 /*
1748 * Detect and initialize APIC
1749 */
1750 static int __init detect_init_APIC(void)
1751 {
1752 /* Disabled by kernel option? */
1753 if (disable_apic)
1754 return -1;
1755
1756 switch (boot_cpu_data.x86_vendor) {
1757 case X86_VENDOR_AMD:
1758 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1759 (boot_cpu_data.x86 >= 15))
1760 break;
1761 goto no_apic;
1762 case X86_VENDOR_INTEL:
1763 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1764 (boot_cpu_data.x86 == 5 && cpu_has_apic))
1765 break;
1766 goto no_apic;
1767 default:
1768 goto no_apic;
1769 }
1770
1771 if (!cpu_has_apic) {
1772 /*
1773 * Over-ride BIOS and try to enable the local APIC only if
1774 * "lapic" specified.
1775 */
1776 if (!force_enable_local_apic) {
1777 pr_info("Local APIC disabled by BIOS -- "
1778 "you can enable it with \"lapic\"\n");
1779 return -1;
1780 }
1781 if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
1782 return -1;
1783 } else {
1784 if (apic_verify())
1785 return -1;
1786 }
1787
1788 apic_pm_activate();
1789
1790 return 0;
1791
1792 no_apic:
1793 pr_info("No local APIC present or hardware disabled\n");
1794 return -1;
1795 }
1796 #endif
1797
1798 /**
1799 * init_apic_mappings - initialize APIC mappings
1800 */
1801 void __init init_apic_mappings(void)
1802 {
1803 unsigned int new_apicid;
1804
1805 if (x2apic_mode) {
1806 boot_cpu_physical_apicid = read_apic_id();
1807 return;
1808 }
1809
1810 /* If no local APIC can be found return early */
1811 if (!smp_found_config && detect_init_APIC()) {
1812 /* lets NOP'ify apic operations */
1813 pr_info("APIC: disable apic facility\n");
1814 apic_disable();
1815 } else {
1816 apic_phys = mp_lapic_addr;
1817
1818 /*
1819 * acpi lapic path already maps that address in
1820 * acpi_register_lapic_address()
1821 */
1822 if (!acpi_lapic && !smp_found_config)
1823 register_lapic_address(apic_phys);
1824 }
1825
1826 /*
1827 * Fetch the APIC ID of the BSP in case we have a
1828 * default configuration (or the MP table is broken).
1829 */
1830 new_apicid = read_apic_id();
1831 if (boot_cpu_physical_apicid != new_apicid) {
1832 boot_cpu_physical_apicid = new_apicid;
1833 /*
1834 * yeah -- we lie about apic_version
1835 * in case if apic was disabled via boot option
1836 * but it's not a problem for SMP compiled kernel
1837 * since smp_sanity_check is prepared for such a case
1838 * and disable smp mode
1839 */
1840 apic_version[new_apicid] =
1841 GET_APIC_VERSION(apic_read(APIC_LVR));
1842 }
1843 }
1844
1845 void __init register_lapic_address(unsigned long address)
1846 {
1847 mp_lapic_addr = address;
1848
1849 if (!x2apic_mode) {
1850 set_fixmap_nocache(FIX_APIC_BASE, address);
1851 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1852 APIC_BASE, mp_lapic_addr);
1853 }
1854 if (boot_cpu_physical_apicid == -1U) {
1855 boot_cpu_physical_apicid = read_apic_id();
1856 apic_version[boot_cpu_physical_apicid] =
1857 GET_APIC_VERSION(apic_read(APIC_LVR));
1858 }
1859 }
1860
1861 /*
1862 * This initializes the IO-APIC and APIC hardware if this is
1863 * a UP kernel.
1864 */
1865 int apic_version[MAX_LOCAL_APIC];
1866
1867 int __init APIC_init_uniprocessor(void)
1868 {
1869 if (disable_apic) {
1870 pr_info("Apic disabled\n");
1871 return -1;
1872 }
1873 #ifdef CONFIG_X86_64
1874 if (!cpu_has_apic) {
1875 disable_apic = 1;
1876 pr_info("Apic disabled by BIOS\n");
1877 return -1;
1878 }
1879 #else
1880 if (!smp_found_config && !cpu_has_apic)
1881 return -1;
1882
1883 /*
1884 * Complain if the BIOS pretends there is one.
1885 */
1886 if (!cpu_has_apic &&
1887 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1888 pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
1889 boot_cpu_physical_apicid);
1890 return -1;
1891 }
1892 #endif
1893
1894 default_setup_apic_routing();
1895
1896 verify_local_APIC();
1897 connect_bsp_APIC();
1898
1899 #ifdef CONFIG_X86_64
1900 apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
1901 #else
1902 /*
1903 * Hack: In case of kdump, after a crash, kernel might be booting
1904 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
1905 * might be zero if read from MP tables. Get it from LAPIC.
1906 */
1907 # ifdef CONFIG_CRASH_DUMP
1908 boot_cpu_physical_apicid = read_apic_id();
1909 # endif
1910 #endif
1911 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1912 setup_local_APIC();
1913
1914 #ifdef CONFIG_X86_IO_APIC
1915 /*
1916 * Now enable IO-APICs, actually call clear_IO_APIC
1917 * We need clear_IO_APIC before enabling error vector
1918 */
1919 if (!skip_ioapic_setup && nr_ioapics)
1920 enable_IO_APIC();
1921 #endif
1922
1923 bsp_end_local_APIC_setup();
1924
1925 #ifdef CONFIG_X86_IO_APIC
1926 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1927 setup_IO_APIC();
1928 else {
1929 nr_ioapics = 0;
1930 }
1931 #endif
1932
1933 x86_init.timers.setup_percpu_clockev();
1934 return 0;
1935 }
1936
1937 /*
1938 * Local APIC interrupts
1939 */
1940
1941 /*
1942 * This interrupt should _never_ happen with our APIC/SMP architecture
1943 */
1944 static inline void __smp_spurious_interrupt(void)
1945 {
1946 u32 v;
1947
1948 /*
1949 * Check if this really is a spurious interrupt and ACK it
1950 * if it is a vectored one. Just in case...
1951 * Spurious interrupts should not be ACKed.
1952 */
1953 v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1954 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1955 ack_APIC_irq();
1956
1957 inc_irq_stat(irq_spurious_count);
1958
1959 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1960 pr_info("spurious APIC interrupt on CPU#%d, "
1961 "should never happen.\n", smp_processor_id());
1962 }
1963
1964 __visible void smp_spurious_interrupt(struct pt_regs *regs)
1965 {
1966 entering_irq();
1967 __smp_spurious_interrupt();
1968 exiting_irq();
1969 }
1970
1971 __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
1972 {
1973 entering_irq();
1974 trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
1975 __smp_spurious_interrupt();
1976 trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR);
1977 exiting_irq();
1978 }
1979
1980 /*
1981 * This interrupt should never happen with our APIC/SMP architecture
1982 */
1983 static inline void __smp_error_interrupt(struct pt_regs *regs)
1984 {
1985 u32 v;
1986 u32 i = 0;
1987 static const char * const error_interrupt_reason[] = {
1988 "Send CS error", /* APIC Error Bit 0 */
1989 "Receive CS error", /* APIC Error Bit 1 */
1990 "Send accept error", /* APIC Error Bit 2 */
1991 "Receive accept error", /* APIC Error Bit 3 */
1992 "Redirectable IPI", /* APIC Error Bit 4 */
1993 "Send illegal vector", /* APIC Error Bit 5 */
1994 "Received illegal vector", /* APIC Error Bit 6 */
1995 "Illegal register address", /* APIC Error Bit 7 */
1996 };
1997
1998 /* First tickle the hardware, only then report what went on. -- REW */
1999 if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
2000 apic_write(APIC_ESR, 0);
2001 v = apic_read(APIC_ESR);
2002 ack_APIC_irq();
2003 atomic_inc(&irq_err_count);
2004
2005 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
2006 smp_processor_id(), v);
2007
2008 v &= 0xff;
2009 while (v) {
2010 if (v & 0x1)
2011 apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
2012 i++;
2013 v >>= 1;
2014 }
2015
2016 apic_printk(APIC_DEBUG, KERN_CONT "\n");
2017
2018 }
2019
2020 __visible void smp_error_interrupt(struct pt_regs *regs)
2021 {
2022 entering_irq();
2023 __smp_error_interrupt(regs);
2024 exiting_irq();
2025 }
2026
2027 __visible void smp_trace_error_interrupt(struct pt_regs *regs)
2028 {
2029 entering_irq();
2030 trace_error_apic_entry(ERROR_APIC_VECTOR);
2031 __smp_error_interrupt(regs);
2032 trace_error_apic_exit(ERROR_APIC_VECTOR);
2033 exiting_irq();
2034 }
2035
2036 /**
2037 * connect_bsp_APIC - attach the APIC to the interrupt system
2038 */
2039 void __init connect_bsp_APIC(void)
2040 {
2041 #ifdef CONFIG_X86_32
2042 if (pic_mode) {
2043 /*
2044 * Do not trust the local APIC being empty at bootup.
2045 */
2046 clear_local_APIC();
2047 /*
2048 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
2049 * local APIC to INT and NMI lines.
2050 */
2051 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
2052 "enabling APIC mode.\n");
2053 imcr_pic_to_apic();
2054 }
2055 #endif
2056 if (apic->enable_apic_mode)
2057 apic->enable_apic_mode();
2058 }
2059
2060 /**
2061 * disconnect_bsp_APIC - detach the APIC from the interrupt system
2062 * @virt_wire_setup: indicates, whether virtual wire mode is selected
2063 *
2064 * Virtual wire mode is necessary to deliver legacy interrupts even when the
2065 * APIC is disabled.
2066 */
2067 void disconnect_bsp_APIC(int virt_wire_setup)
2068 {
2069 unsigned int value;
2070
2071 #ifdef CONFIG_X86_32
2072 if (pic_mode) {
2073 /*
2074 * Put the board back into PIC mode (has an effect only on
2075 * certain older boards). Note that APIC interrupts, including
2076 * IPIs, won't work beyond this point! The only exception are
2077 * INIT IPIs.
2078 */
2079 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2080 "entering PIC mode.\n");
2081 imcr_apic_to_pic();
2082 return;
2083 }
2084 #endif
2085
2086 /* Go back to Virtual Wire compatibility mode */
2087
2088 /* For the spurious interrupt use vector F, and enable it */
2089 value = apic_read(APIC_SPIV);
2090 value &= ~APIC_VECTOR_MASK;
2091 value |= APIC_SPIV_APIC_ENABLED;
2092 value |= 0xf;
2093 apic_write(APIC_SPIV, value);
2094
2095 if (!virt_wire_setup) {
2096 /*
2097 * For LVT0 make it edge triggered, active high,
2098 * external and enabled
2099 */
2100 value = apic_read(APIC_LVT0);
2101 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2102 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2103 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2104 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2105 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2106 apic_write(APIC_LVT0, value);
2107 } else {
2108 /* Disable LVT0 */
2109 apic_write(APIC_LVT0, APIC_LVT_MASKED);
2110 }
2111
2112 /*
2113 * For LVT1 make it edge triggered, active high,
2114 * nmi and enabled
2115 */
2116 value = apic_read(APIC_LVT1);
2117 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2118 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2119 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2120 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2121 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2122 apic_write(APIC_LVT1, value);
2123 }
2124
2125 int generic_processor_info(int apicid, int version)
2126 {
2127 int cpu, max = nr_cpu_ids;
2128 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2129 phys_cpu_present_map);
2130
2131 /*
2132 * boot_cpu_physical_apicid is designed to have the apicid
2133 * returned by read_apic_id(), i.e, the apicid of the
2134 * currently booting-up processor. However, on some platforms,
2135 * it is temporarily modified by the apicid reported as BSP
2136 * through MP table. Concretely:
2137 *
2138 * - arch/x86/kernel/mpparse.c: MP_processor_info()
2139 * - arch/x86/mm/amdtopology.c: amd_numa_init()
2140 *
2141 * This function is executed with the modified
2142 * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
2143 * parameter doesn't work to disable APs on kdump 2nd kernel.
2144 *
2145 * Since fixing handling of boot_cpu_physical_apicid requires
2146 * another discussion and tests on each platform, we leave it
2147 * for now and here we use read_apic_id() directly in this
2148 * function, generic_processor_info().
2149 */
2150 if (disabled_cpu_apicid != BAD_APICID &&
2151 disabled_cpu_apicid != read_apic_id() &&
2152 disabled_cpu_apicid == apicid) {
2153 int thiscpu = num_processors + disabled_cpus;
2154
2155 pr_warning("APIC: Disabling requested cpu."
2156 " Processor %d/0x%x ignored.\n",
2157 thiscpu, apicid);
2158
2159 disabled_cpus++;
2160 return -ENODEV;
2161 }
2162
2163 /*
2164 * If boot cpu has not been detected yet, then only allow upto
2165 * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
2166 */
2167 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2168 apicid != boot_cpu_physical_apicid) {
2169 int thiscpu = max + disabled_cpus - 1;
2170
2171 pr_warning(
2172 "ACPI: NR_CPUS/possible_cpus limit of %i almost"
2173 " reached. Keeping one slot for boot cpu."
2174 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2175
2176 disabled_cpus++;
2177 return -ENODEV;
2178 }
2179
2180 if (num_processors >= nr_cpu_ids) {
2181 int thiscpu = max + disabled_cpus;
2182
2183 pr_warning(
2184 "ACPI: NR_CPUS/possible_cpus limit of %i reached."
2185 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2186
2187 disabled_cpus++;
2188 return -EINVAL;
2189 }
2190
2191 num_processors++;
2192 if (apicid == boot_cpu_physical_apicid) {
2193 /*
2194 * x86_bios_cpu_apicid is required to have processors listed
2195 * in same order as logical cpu numbers. Hence the first
2196 * entry is BSP, and so on.
2197 * boot_cpu_init() already hold bit 0 in cpu_present_mask
2198 * for BSP.
2199 */
2200 cpu = 0;
2201 } else
2202 cpu = cpumask_next_zero(-1, cpu_present_mask);
2203
2204 /*
2205 * Validate version
2206 */
2207 if (version == 0x0) {
2208 pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2209 cpu, apicid);
2210 version = 0x10;
2211 }
2212 apic_version[apicid] = version;
2213
2214 if (version != apic_version[boot_cpu_physical_apicid]) {
2215 pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2216 apic_version[boot_cpu_physical_apicid], cpu, version);
2217 }
2218
2219 physid_set(apicid, phys_cpu_present_map);
2220 if (apicid > max_physical_apicid)
2221 max_physical_apicid = apicid;
2222
2223 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
2224 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2225 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
2226 #endif
2227 #ifdef CONFIG_X86_32
2228 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2229 apic->x86_32_early_logical_apicid(cpu);
2230 #endif
2231 set_cpu_possible(cpu, true);
2232 set_cpu_present(cpu, true);
2233
2234 return cpu;
2235 }
2236
2237 int hard_smp_processor_id(void)
2238 {
2239 return read_apic_id();
2240 }
2241
2242 void default_init_apic_ldr(void)
2243 {
2244 unsigned long val;
2245
2246 apic_write(APIC_DFR, APIC_DFR_VALUE);
2247 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
2248 val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
2249 apic_write(APIC_LDR, val);
2250 }
2251
2252 int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
2253 const struct cpumask *andmask,
2254 unsigned int *apicid)
2255 {
2256 unsigned int cpu;
2257
2258 for_each_cpu_and(cpu, cpumask, andmask) {
2259 if (cpumask_test_cpu(cpu, cpu_online_mask))
2260 break;
2261 }
2262
2263 if (likely(cpu < nr_cpu_ids)) {
2264 *apicid = per_cpu(x86_cpu_to_apicid, cpu);
2265 return 0;
2266 }
2267
2268 return -EINVAL;
2269 }
2270
2271 /*
2272 * Override the generic EOI implementation with an optimized version.
2273 * Only called during early boot when only one CPU is active and with
2274 * interrupts disabled, so we know this does not race with actual APIC driver
2275 * use.
2276 */
2277 void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2278 {
2279 struct apic **drv;
2280
2281 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2282 /* Should happen once for each apic */
2283 WARN_ON((*drv)->eoi_write == eoi_write);
2284 (*drv)->eoi_write = eoi_write;
2285 }
2286 }
2287
2288 /*
2289 * Power management
2290 */
2291 #ifdef CONFIG_PM
2292
2293 static struct {
2294 /*
2295 * 'active' is true if the local APIC was enabled by us and
2296 * not the BIOS; this signifies that we are also responsible
2297 * for disabling it before entering apm/acpi suspend
2298 */
2299 int active;
2300 /* r/w apic fields */
2301 unsigned int apic_id;
2302 unsigned int apic_taskpri;
2303 unsigned int apic_ldr;
2304 unsigned int apic_dfr;
2305 unsigned int apic_spiv;
2306 unsigned int apic_lvtt;
2307 unsigned int apic_lvtpc;
2308 unsigned int apic_lvt0;
2309 unsigned int apic_lvt1;
2310 unsigned int apic_lvterr;
2311 unsigned int apic_tmict;
2312 unsigned int apic_tdcr;
2313 unsigned int apic_thmr;
2314 } apic_pm_state;
2315
2316 static int lapic_suspend(void)
2317 {
2318 unsigned long flags;
2319 int maxlvt;
2320
2321 if (!apic_pm_state.active)
2322 return 0;
2323
2324 maxlvt = lapic_get_maxlvt();
2325
2326 apic_pm_state.apic_id = apic_read(APIC_ID);
2327 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2328 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2329 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2330 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2331 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2332 if (maxlvt >= 4)
2333 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2334 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2335 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2336 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2337 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2338 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2339 #ifdef CONFIG_X86_THERMAL_VECTOR
2340 if (maxlvt >= 5)
2341 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2342 #endif
2343
2344 local_irq_save(flags);
2345 disable_local_APIC();
2346
2347 irq_remapping_disable();
2348
2349 local_irq_restore(flags);
2350 return 0;
2351 }
2352
2353 static void lapic_resume(void)
2354 {
2355 unsigned int l, h;
2356 unsigned long flags;
2357 int maxlvt;
2358
2359 if (!apic_pm_state.active)
2360 return;
2361
2362 local_irq_save(flags);
2363
2364 /*
2365 * IO-APIC and PIC have their own resume routines.
2366 * We just mask them here to make sure the interrupt
2367 * subsystem is completely quiet while we enable x2apic
2368 * and interrupt-remapping.
2369 */
2370 mask_ioapic_entries();
2371 legacy_pic->mask_all();
2372
2373 if (x2apic_mode)
2374 enable_x2apic();
2375 else {
2376 /*
2377 * Make sure the APICBASE points to the right address
2378 *
2379 * FIXME! This will be wrong if we ever support suspend on
2380 * SMP! We'll need to do this as part of the CPU restore!
2381 */
2382 if (boot_cpu_data.x86 >= 6) {
2383 rdmsr(MSR_IA32_APICBASE, l, h);
2384 l &= ~MSR_IA32_APICBASE_BASE;
2385 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2386 wrmsr(MSR_IA32_APICBASE, l, h);
2387 }
2388 }
2389
2390 maxlvt = lapic_get_maxlvt();
2391 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2392 apic_write(APIC_ID, apic_pm_state.apic_id);
2393 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2394 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2395 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2396 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2397 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2398 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2399 #if defined(CONFIG_X86_MCE_INTEL)
2400 if (maxlvt >= 5)
2401 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2402 #endif
2403 if (maxlvt >= 4)
2404 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2405 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2406 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2407 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2408 apic_write(APIC_ESR, 0);
2409 apic_read(APIC_ESR);
2410 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2411 apic_write(APIC_ESR, 0);
2412 apic_read(APIC_ESR);
2413
2414 irq_remapping_reenable(x2apic_mode);
2415
2416 local_irq_restore(flags);
2417 }
2418
2419 /*
2420 * This device has no shutdown method - fully functioning local APICs
2421 * are needed on every CPU up until machine_halt/restart/poweroff.
2422 */
2423
2424 static struct syscore_ops lapic_syscore_ops = {
2425 .resume = lapic_resume,
2426 .suspend = lapic_suspend,
2427 };
2428
2429 static void apic_pm_activate(void)
2430 {
2431 apic_pm_state.active = 1;
2432 }
2433
2434 static int __init init_lapic_sysfs(void)
2435 {
2436 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
2437 if (cpu_has_apic)
2438 register_syscore_ops(&lapic_syscore_ops);
2439
2440 return 0;
2441 }
2442
2443 /* local apic needs to resume before other devices access its registers. */
2444 core_initcall(init_lapic_sysfs);
2445
2446 #else /* CONFIG_PM */
2447
2448 static void apic_pm_activate(void) { }
2449
2450 #endif /* CONFIG_PM */
2451
2452 #ifdef CONFIG_X86_64
2453
2454 static int apic_cluster_num(void)
2455 {
2456 int i, clusters, zeros;
2457 unsigned id;
2458 u16 *bios_cpu_apicid;
2459 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
2460
2461 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
2462 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
2463
2464 for (i = 0; i < nr_cpu_ids; i++) {
2465 /* are we being called early in kernel startup? */
2466 if (bios_cpu_apicid) {
2467 id = bios_cpu_apicid[i];
2468 } else if (i < nr_cpu_ids) {
2469 if (cpu_present(i))
2470 id = per_cpu(x86_bios_cpu_apicid, i);
2471 else
2472 continue;
2473 } else
2474 break;
2475
2476 if (id != BAD_APICID)
2477 __set_bit(APIC_CLUSTERID(id), clustermap);
2478 }
2479
2480 /* Problem: Partially populated chassis may not have CPUs in some of
2481 * the APIC clusters they have been allocated. Only present CPUs have
2482 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
2483 * Since clusters are allocated sequentially, count zeros only if
2484 * they are bounded by ones.
2485 */
2486 clusters = 0;
2487 zeros = 0;
2488 for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
2489 if (test_bit(i, clustermap)) {
2490 clusters += 1 + zeros;
2491 zeros = 0;
2492 } else
2493 ++zeros;
2494 }
2495
2496 return clusters;
2497 }
2498
2499 static int multi_checked;
2500 static int multi;
2501
2502 static int set_multi(const struct dmi_system_id *d)
2503 {
2504 if (multi)
2505 return 0;
2506 pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2507 multi = 1;
2508 return 0;
2509 }
2510
2511 static const struct dmi_system_id multi_dmi_table[] = {
2512 {
2513 .callback = set_multi,
2514 .ident = "IBM System Summit2",
2515 .matches = {
2516 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2517 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2518 },
2519 },
2520 {}
2521 };
2522
2523 static void dmi_check_multi(void)
2524 {
2525 if (multi_checked)
2526 return;
2527
2528 dmi_check_system(multi_dmi_table);
2529 multi_checked = 1;
2530 }
2531
2532 /*
2533 * apic_is_clustered_box() -- Check if we can expect good TSC
2534 *
2535 * Thus far, the major user of this is IBM's Summit2 series:
2536 * Clustered boxes may have unsynced TSC problems if they are
2537 * multi-chassis.
2538 * Use DMI to check them
2539 */
2540 int apic_is_clustered_box(void)
2541 {
2542 dmi_check_multi();
2543 if (multi)
2544 return 1;
2545
2546 if (!is_vsmp_box())
2547 return 0;
2548
2549 /*
2550 * ScaleMP vSMPowered boxes have one cluster per board and TSCs are
2551 * not guaranteed to be synced between boards
2552 */
2553 if (apic_cluster_num() > 1)
2554 return 1;
2555
2556 return 0;
2557 }
2558 #endif
2559
2560 /*
2561 * APIC command line parameters
2562 */
2563 static int __init setup_disableapic(char *arg)
2564 {
2565 disable_apic = 1;
2566 setup_clear_cpu_cap(X86_FEATURE_APIC);
2567 return 0;
2568 }
2569 early_param("disableapic", setup_disableapic);
2570
2571 /* same as disableapic, for compatibility */
2572 static int __init setup_nolapic(char *arg)
2573 {
2574 return setup_disableapic(arg);
2575 }
2576 early_param("nolapic", setup_nolapic);
2577
2578 static int __init parse_lapic_timer_c2_ok(char *arg)
2579 {
2580 local_apic_timer_c2_ok = 1;
2581 return 0;
2582 }
2583 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2584
2585 static int __init parse_disable_apic_timer(char *arg)
2586 {
2587 disable_apic_timer = 1;
2588 return 0;
2589 }
2590 early_param("noapictimer", parse_disable_apic_timer);
2591
2592 static int __init parse_nolapic_timer(char *arg)
2593 {
2594 disable_apic_timer = 1;
2595 return 0;
2596 }
2597 early_param("nolapic_timer", parse_nolapic_timer);
2598
2599 static int __init apic_set_verbosity(char *arg)
2600 {
2601 if (!arg) {
2602 #ifdef CONFIG_X86_64
2603 skip_ioapic_setup = 0;
2604 return 0;
2605 #endif
2606 return -EINVAL;
2607 }
2608
2609 if (strcmp("debug", arg) == 0)
2610 apic_verbosity = APIC_DEBUG;
2611 else if (strcmp("verbose", arg) == 0)
2612 apic_verbosity = APIC_VERBOSE;
2613 else {
2614 pr_warning("APIC Verbosity level %s not recognised"
2615 " use apic=verbose or apic=debug\n", arg);
2616 return -EINVAL;
2617 }
2618
2619 return 0;
2620 }
2621 early_param("apic", apic_set_verbosity);
2622
2623 static int __init lapic_insert_resource(void)
2624 {
2625 if (!apic_phys)
2626 return -1;
2627
2628 /* Put local APIC into the resource map. */
2629 lapic_resource.start = apic_phys;
2630 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2631 insert_resource(&iomem_resource, &lapic_resource);
2632
2633 return 0;
2634 }
2635
2636 /*
2637 * need call insert after e820_reserve_resources()
2638 * that is using request_resource
2639 */
2640 late_initcall(lapic_insert_resource);
2641
2642 static int __init apic_set_disabled_cpu_apicid(char *arg)
2643 {
2644 if (!arg || !get_option(&arg, &disabled_cpu_apicid))
2645 return -EINVAL;
2646
2647 return 0;
2648 }
2649 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);