3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49 #define mod_64(x, y) ((x) % (y))
57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58 #define apic_debug(fmt, arg...) do {} while (0)
60 /* 14 is the version for Xeon and Pentium 8.4.8*/
61 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
62 #define LAPIC_MMIO_LENGTH (1 << 12)
63 /* followed define is not in apicdef.h */
64 #define APIC_SHORT_MASK 0xc0000
65 #define APIC_DEST_NOSHORT 0x0
66 #define APIC_DEST_MASK 0x800
67 #define MAX_APIC_VECTOR 256
68 #define APIC_VECTORS_PER_REG 32
70 #define APIC_BROADCAST 0xFF
71 #define X2APIC_BROADCAST 0xFFFFFFFFul
73 static bool lapic_timer_advance_adjust_done
= false;
74 #define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
75 /* step-by-step approximation to mitigate fluctuation */
76 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
78 static inline int apic_test_vector(int vec
, void *bitmap
)
80 return test_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
83 bool kvm_apic_pending_eoi(struct kvm_vcpu
*vcpu
, int vector
)
85 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
87 return apic_test_vector(vector
, apic
->regs
+ APIC_ISR
) ||
88 apic_test_vector(vector
, apic
->regs
+ APIC_IRR
);
91 static inline void apic_clear_vector(int vec
, void *bitmap
)
93 clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
96 static inline int __apic_test_and_set_vector(int vec
, void *bitmap
)
98 return __test_and_set_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
101 static inline int __apic_test_and_clear_vector(int vec
, void *bitmap
)
103 return __test_and_clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
106 struct static_key_deferred apic_hw_disabled __read_mostly
;
107 struct static_key_deferred apic_sw_disabled __read_mostly
;
109 static inline int apic_enabled(struct kvm_lapic
*apic
)
111 return kvm_apic_sw_enabled(apic
) && kvm_apic_hw_enabled(apic
);
115 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
118 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
119 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
121 static inline u8
kvm_xapic_id(struct kvm_lapic
*apic
)
123 return kvm_lapic_get_reg(apic
, APIC_ID
) >> 24;
126 static inline u32
kvm_x2apic_id(struct kvm_lapic
*apic
)
128 return apic
->vcpu
->vcpu_id
;
131 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map
*map
,
132 u32 dest_id
, struct kvm_lapic
***cluster
, u16
*mask
) {
134 case KVM_APIC_MODE_X2APIC
: {
135 u32 offset
= (dest_id
>> 16) * 16;
136 u32 max_apic_id
= map
->max_apic_id
;
138 if (offset
<= max_apic_id
) {
139 u8 cluster_size
= min(max_apic_id
- offset
+ 1, 16U);
141 *cluster
= &map
->phys_map
[offset
];
142 *mask
= dest_id
& (0xffff >> (16 - cluster_size
));
149 case KVM_APIC_MODE_XAPIC_FLAT
:
150 *cluster
= map
->xapic_flat_map
;
151 *mask
= dest_id
& 0xff;
153 case KVM_APIC_MODE_XAPIC_CLUSTER
:
154 *cluster
= map
->xapic_cluster_map
[(dest_id
>> 4) & 0xf];
155 *mask
= dest_id
& 0xf;
163 static void kvm_apic_map_free(struct rcu_head
*rcu
)
165 struct kvm_apic_map
*map
= container_of(rcu
, struct kvm_apic_map
, rcu
);
170 static void recalculate_apic_map(struct kvm
*kvm
)
172 struct kvm_apic_map
*new, *old
= NULL
;
173 struct kvm_vcpu
*vcpu
;
175 u32 max_id
= 255; /* enough space for any xAPIC ID */
177 mutex_lock(&kvm
->arch
.apic_map_lock
);
179 kvm_for_each_vcpu(i
, vcpu
, kvm
)
180 if (kvm_apic_present(vcpu
))
181 max_id
= max(max_id
, kvm_x2apic_id(vcpu
->arch
.apic
));
183 new = kvzalloc(sizeof(struct kvm_apic_map
) +
184 sizeof(struct kvm_lapic
*) * ((u64
)max_id
+ 1), GFP_KERNEL
);
189 new->max_apic_id
= max_id
;
191 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
192 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
193 struct kvm_lapic
**cluster
;
199 if (!kvm_apic_present(vcpu
))
202 xapic_id
= kvm_xapic_id(apic
);
203 x2apic_id
= kvm_x2apic_id(apic
);
205 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
206 if ((apic_x2apic_mode(apic
) || x2apic_id
> 0xff) &&
207 x2apic_id
<= new->max_apic_id
)
208 new->phys_map
[x2apic_id
] = apic
;
210 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
211 * prevent them from masking VCPUs with APIC ID <= 0xff.
213 if (!apic_x2apic_mode(apic
) && !new->phys_map
[xapic_id
])
214 new->phys_map
[xapic_id
] = apic
;
216 ldr
= kvm_lapic_get_reg(apic
, APIC_LDR
);
218 if (apic_x2apic_mode(apic
)) {
219 new->mode
|= KVM_APIC_MODE_X2APIC
;
221 ldr
= GET_APIC_LOGICAL_ID(ldr
);
222 if (kvm_lapic_get_reg(apic
, APIC_DFR
) == APIC_DFR_FLAT
)
223 new->mode
|= KVM_APIC_MODE_XAPIC_FLAT
;
225 new->mode
|= KVM_APIC_MODE_XAPIC_CLUSTER
;
228 if (!kvm_apic_map_get_logical_dest(new, ldr
, &cluster
, &mask
))
232 cluster
[ffs(mask
) - 1] = apic
;
235 old
= rcu_dereference_protected(kvm
->arch
.apic_map
,
236 lockdep_is_held(&kvm
->arch
.apic_map_lock
));
237 rcu_assign_pointer(kvm
->arch
.apic_map
, new);
238 mutex_unlock(&kvm
->arch
.apic_map_lock
);
241 call_rcu(&old
->rcu
, kvm_apic_map_free
);
243 kvm_make_scan_ioapic_request(kvm
);
246 static inline void apic_set_spiv(struct kvm_lapic
*apic
, u32 val
)
248 bool enabled
= val
& APIC_SPIV_APIC_ENABLED
;
250 kvm_lapic_set_reg(apic
, APIC_SPIV
, val
);
252 if (enabled
!= apic
->sw_enabled
) {
253 apic
->sw_enabled
= enabled
;
255 static_key_slow_dec_deferred(&apic_sw_disabled
);
257 static_key_slow_inc(&apic_sw_disabled
.key
);
261 static inline void kvm_apic_set_xapic_id(struct kvm_lapic
*apic
, u8 id
)
263 kvm_lapic_set_reg(apic
, APIC_ID
, id
<< 24);
264 recalculate_apic_map(apic
->vcpu
->kvm
);
267 static inline void kvm_apic_set_ldr(struct kvm_lapic
*apic
, u32 id
)
269 kvm_lapic_set_reg(apic
, APIC_LDR
, id
);
270 recalculate_apic_map(apic
->vcpu
->kvm
);
273 static inline u32
kvm_apic_calc_x2apic_ldr(u32 id
)
275 return ((id
>> 4) << 16) | (1 << (id
& 0xf));
278 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic
*apic
, u32 id
)
280 u32 ldr
= kvm_apic_calc_x2apic_ldr(id
);
282 WARN_ON_ONCE(id
!= apic
->vcpu
->vcpu_id
);
284 kvm_lapic_set_reg(apic
, APIC_ID
, id
);
285 kvm_lapic_set_reg(apic
, APIC_LDR
, ldr
);
286 recalculate_apic_map(apic
->vcpu
->kvm
);
289 static inline int apic_lvt_enabled(struct kvm_lapic
*apic
, int lvt_type
)
291 return !(kvm_lapic_get_reg(apic
, lvt_type
) & APIC_LVT_MASKED
);
294 static inline int apic_lvt_vector(struct kvm_lapic
*apic
, int lvt_type
)
296 return kvm_lapic_get_reg(apic
, lvt_type
) & APIC_VECTOR_MASK
;
299 static inline int apic_lvtt_oneshot(struct kvm_lapic
*apic
)
301 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_ONESHOT
;
304 static inline int apic_lvtt_period(struct kvm_lapic
*apic
)
306 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_PERIODIC
;
309 static inline int apic_lvtt_tscdeadline(struct kvm_lapic
*apic
)
311 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_TSCDEADLINE
;
314 static inline int apic_lvt_nmi_mode(u32 lvt_val
)
316 return (lvt_val
& (APIC_MODE_MASK
| APIC_LVT_MASKED
)) == APIC_DM_NMI
;
319 void kvm_apic_set_version(struct kvm_vcpu
*vcpu
)
321 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
322 struct kvm_cpuid_entry2
*feat
;
323 u32 v
= APIC_VERSION
;
325 if (!lapic_in_kernel(vcpu
))
329 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
330 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
331 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
332 * version first and level-triggered interrupts never get EOIed in
335 feat
= kvm_find_cpuid_entry(apic
->vcpu
, 0x1, 0);
336 if (feat
&& (feat
->ecx
& (1 << (X86_FEATURE_X2APIC
& 31))) &&
337 !ioapic_in_kernel(vcpu
->kvm
))
338 v
|= APIC_LVR_DIRECTED_EOI
;
339 kvm_lapic_set_reg(apic
, APIC_LVR
, v
);
342 static const unsigned int apic_lvt_mask
[KVM_APIC_LVT_NUM
] = {
343 LVT_MASK
, /* part LVTT mask, timer mode mask added at runtime */
344 LVT_MASK
| APIC_MODE_MASK
, /* LVTTHMR */
345 LVT_MASK
| APIC_MODE_MASK
, /* LVTPC */
346 LINT_MASK
, LINT_MASK
, /* LVT0-1 */
347 LVT_MASK
/* LVTERR */
350 static int find_highest_vector(void *bitmap
)
355 for (vec
= MAX_APIC_VECTOR
- APIC_VECTORS_PER_REG
;
356 vec
>= 0; vec
-= APIC_VECTORS_PER_REG
) {
357 reg
= bitmap
+ REG_POS(vec
);
359 return __fls(*reg
) + vec
;
365 static u8
count_vectors(void *bitmap
)
371 for (vec
= 0; vec
< MAX_APIC_VECTOR
; vec
+= APIC_VECTORS_PER_REG
) {
372 reg
= bitmap
+ REG_POS(vec
);
373 count
+= hweight32(*reg
);
379 bool __kvm_apic_update_irr(u32
*pir
, void *regs
, int *max_irr
)
382 u32 pir_val
, irr_val
, prev_irr_val
;
385 max_updated_irr
= -1;
388 for (i
= vec
= 0; i
<= 7; i
++, vec
+= 32) {
389 pir_val
= READ_ONCE(pir
[i
]);
390 irr_val
= *((u32
*)(regs
+ APIC_IRR
+ i
* 0x10));
392 prev_irr_val
= irr_val
;
393 irr_val
|= xchg(&pir
[i
], 0);
394 *((u32
*)(regs
+ APIC_IRR
+ i
* 0x10)) = irr_val
;
395 if (prev_irr_val
!= irr_val
) {
397 __fls(irr_val
^ prev_irr_val
) + vec
;
401 *max_irr
= __fls(irr_val
) + vec
;
404 return ((max_updated_irr
!= -1) &&
405 (max_updated_irr
== *max_irr
));
407 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr
);
409 bool kvm_apic_update_irr(struct kvm_vcpu
*vcpu
, u32
*pir
, int *max_irr
)
411 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
413 return __kvm_apic_update_irr(pir
, apic
->regs
, max_irr
);
415 EXPORT_SYMBOL_GPL(kvm_apic_update_irr
);
417 static inline int apic_search_irr(struct kvm_lapic
*apic
)
419 return find_highest_vector(apic
->regs
+ APIC_IRR
);
422 static inline int apic_find_highest_irr(struct kvm_lapic
*apic
)
427 * Note that irr_pending is just a hint. It will be always
428 * true with virtual interrupt delivery enabled.
430 if (!apic
->irr_pending
)
433 result
= apic_search_irr(apic
);
434 ASSERT(result
== -1 || result
>= 16);
439 static inline void apic_clear_irr(int vec
, struct kvm_lapic
*apic
)
441 struct kvm_vcpu
*vcpu
;
445 if (unlikely(vcpu
->arch
.apicv_active
)) {
446 /* need to update RVI */
447 apic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
448 kvm_x86_ops
->hwapic_irr_update(vcpu
,
449 apic_find_highest_irr(apic
));
451 apic
->irr_pending
= false;
452 apic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
453 if (apic_search_irr(apic
) != -1)
454 apic
->irr_pending
= true;
458 static inline void apic_set_isr(int vec
, struct kvm_lapic
*apic
)
460 struct kvm_vcpu
*vcpu
;
462 if (__apic_test_and_set_vector(vec
, apic
->regs
+ APIC_ISR
))
468 * With APIC virtualization enabled, all caching is disabled
469 * because the processor can modify ISR under the hood. Instead
472 if (unlikely(vcpu
->arch
.apicv_active
))
473 kvm_x86_ops
->hwapic_isr_update(vcpu
, vec
);
476 BUG_ON(apic
->isr_count
> MAX_APIC_VECTOR
);
478 * ISR (in service register) bit is set when injecting an interrupt.
479 * The highest vector is injected. Thus the latest bit set matches
480 * the highest bit in ISR.
482 apic
->highest_isr_cache
= vec
;
486 static inline int apic_find_highest_isr(struct kvm_lapic
*apic
)
491 * Note that isr_count is always 1, and highest_isr_cache
492 * is always -1, with APIC virtualization enabled.
494 if (!apic
->isr_count
)
496 if (likely(apic
->highest_isr_cache
!= -1))
497 return apic
->highest_isr_cache
;
499 result
= find_highest_vector(apic
->regs
+ APIC_ISR
);
500 ASSERT(result
== -1 || result
>= 16);
505 static inline void apic_clear_isr(int vec
, struct kvm_lapic
*apic
)
507 struct kvm_vcpu
*vcpu
;
508 if (!__apic_test_and_clear_vector(vec
, apic
->regs
+ APIC_ISR
))
514 * We do get here for APIC virtualization enabled if the guest
515 * uses the Hyper-V APIC enlightenment. In this case we may need
516 * to trigger a new interrupt delivery by writing the SVI field;
517 * on the other hand isr_count and highest_isr_cache are unused
518 * and must be left alone.
520 if (unlikely(vcpu
->arch
.apicv_active
))
521 kvm_x86_ops
->hwapic_isr_update(vcpu
,
522 apic_find_highest_isr(apic
));
525 BUG_ON(apic
->isr_count
< 0);
526 apic
->highest_isr_cache
= -1;
530 int kvm_lapic_find_highest_irr(struct kvm_vcpu
*vcpu
)
532 /* This may race with setting of irr in __apic_accept_irq() and
533 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
534 * will cause vmexit immediately and the value will be recalculated
535 * on the next vmentry.
537 return apic_find_highest_irr(vcpu
->arch
.apic
);
539 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr
);
541 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
542 int vector
, int level
, int trig_mode
,
543 struct dest_map
*dest_map
);
545 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
,
546 struct dest_map
*dest_map
)
548 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
550 return __apic_accept_irq(apic
, irq
->delivery_mode
, irq
->vector
,
551 irq
->level
, irq
->trig_mode
, dest_map
);
554 int kvm_pv_send_ipi(struct kvm
*kvm
, unsigned long ipi_bitmap_low
,
555 unsigned long ipi_bitmap_high
, u32 min
,
556 unsigned long icr
, int op_64_bit
)
559 struct kvm_apic_map
*map
;
560 struct kvm_vcpu
*vcpu
;
561 struct kvm_lapic_irq irq
= {0};
562 int cluster_size
= op_64_bit
? 64 : 32;
565 irq
.vector
= icr
& APIC_VECTOR_MASK
;
566 irq
.delivery_mode
= icr
& APIC_MODE_MASK
;
567 irq
.level
= (icr
& APIC_INT_ASSERT
) != 0;
568 irq
.trig_mode
= icr
& APIC_INT_LEVELTRIG
;
570 if (icr
& APIC_DEST_MASK
)
572 if (icr
& APIC_SHORT_MASK
)
576 map
= rcu_dereference(kvm
->arch
.apic_map
);
578 if (unlikely(!map
)) {
583 if (min
> map
->max_apic_id
)
585 /* Bits above cluster_size are masked in the caller. */
586 for_each_set_bit(i
, &ipi_bitmap_low
,
587 min((u32
)BITS_PER_LONG
, (map
->max_apic_id
- min
+ 1))) {
588 if (map
->phys_map
[min
+ i
]) {
589 vcpu
= map
->phys_map
[min
+ i
]->vcpu
;
590 count
+= kvm_apic_set_irq(vcpu
, &irq
, NULL
);
596 if (min
> map
->max_apic_id
)
599 for_each_set_bit(i
, &ipi_bitmap_high
,
600 min((u32
)BITS_PER_LONG
, (map
->max_apic_id
- min
+ 1))) {
601 if (map
->phys_map
[min
+ i
]) {
602 vcpu
= map
->phys_map
[min
+ i
]->vcpu
;
603 count
+= kvm_apic_set_irq(vcpu
, &irq
, NULL
);
612 static int pv_eoi_put_user(struct kvm_vcpu
*vcpu
, u8 val
)
615 return kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, &val
,
619 static int pv_eoi_get_user(struct kvm_vcpu
*vcpu
, u8
*val
)
622 return kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, val
,
626 static inline bool pv_eoi_enabled(struct kvm_vcpu
*vcpu
)
628 return vcpu
->arch
.pv_eoi
.msr_val
& KVM_MSR_ENABLED
;
631 static bool pv_eoi_get_pending(struct kvm_vcpu
*vcpu
)
634 if (pv_eoi_get_user(vcpu
, &val
) < 0)
635 apic_debug("Can't read EOI MSR value: 0x%llx\n",
636 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
640 static void pv_eoi_set_pending(struct kvm_vcpu
*vcpu
)
642 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_ENABLED
) < 0) {
643 apic_debug("Can't set EOI MSR value: 0x%llx\n",
644 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
647 __set_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
650 static void pv_eoi_clr_pending(struct kvm_vcpu
*vcpu
)
652 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_DISABLED
) < 0) {
653 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
654 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
657 __clear_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
660 static int apic_has_interrupt_for_ppr(struct kvm_lapic
*apic
, u32 ppr
)
663 if (apic
->vcpu
->arch
.apicv_active
)
664 highest_irr
= kvm_x86_ops
->sync_pir_to_irr(apic
->vcpu
);
666 highest_irr
= apic_find_highest_irr(apic
);
667 if (highest_irr
== -1 || (highest_irr
& 0xF0) <= ppr
)
672 static bool __apic_update_ppr(struct kvm_lapic
*apic
, u32
*new_ppr
)
674 u32 tpr
, isrv
, ppr
, old_ppr
;
677 old_ppr
= kvm_lapic_get_reg(apic
, APIC_PROCPRI
);
678 tpr
= kvm_lapic_get_reg(apic
, APIC_TASKPRI
);
679 isr
= apic_find_highest_isr(apic
);
680 isrv
= (isr
!= -1) ? isr
: 0;
682 if ((tpr
& 0xf0) >= (isrv
& 0xf0))
687 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
688 apic
, ppr
, isr
, isrv
);
692 kvm_lapic_set_reg(apic
, APIC_PROCPRI
, ppr
);
694 return ppr
< old_ppr
;
697 static void apic_update_ppr(struct kvm_lapic
*apic
)
701 if (__apic_update_ppr(apic
, &ppr
) &&
702 apic_has_interrupt_for_ppr(apic
, ppr
) != -1)
703 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
706 void kvm_apic_update_ppr(struct kvm_vcpu
*vcpu
)
708 apic_update_ppr(vcpu
->arch
.apic
);
710 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr
);
712 static void apic_set_tpr(struct kvm_lapic
*apic
, u32 tpr
)
714 kvm_lapic_set_reg(apic
, APIC_TASKPRI
, tpr
);
715 apic_update_ppr(apic
);
718 static bool kvm_apic_broadcast(struct kvm_lapic
*apic
, u32 mda
)
720 return mda
== (apic_x2apic_mode(apic
) ?
721 X2APIC_BROADCAST
: APIC_BROADCAST
);
724 static bool kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u32 mda
)
726 if (kvm_apic_broadcast(apic
, mda
))
729 if (apic_x2apic_mode(apic
))
730 return mda
== kvm_x2apic_id(apic
);
733 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
734 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
735 * this allows unique addressing of VCPUs with APIC ID over 0xff.
736 * The 0xff condition is needed because writeable xAPIC ID.
738 if (kvm_x2apic_id(apic
) > 0xff && mda
== kvm_x2apic_id(apic
))
741 return mda
== kvm_xapic_id(apic
);
744 static bool kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u32 mda
)
748 if (kvm_apic_broadcast(apic
, mda
))
751 logical_id
= kvm_lapic_get_reg(apic
, APIC_LDR
);
753 if (apic_x2apic_mode(apic
))
754 return ((logical_id
>> 16) == (mda
>> 16))
755 && (logical_id
& mda
& 0xffff) != 0;
757 logical_id
= GET_APIC_LOGICAL_ID(logical_id
);
759 switch (kvm_lapic_get_reg(apic
, APIC_DFR
)) {
761 return (logical_id
& mda
) != 0;
762 case APIC_DFR_CLUSTER
:
763 return ((logical_id
>> 4) == (mda
>> 4))
764 && (logical_id
& mda
& 0xf) != 0;
766 apic_debug("Bad DFR vcpu %d: %08x\n",
767 apic
->vcpu
->vcpu_id
, kvm_lapic_get_reg(apic
, APIC_DFR
));
772 /* The KVM local APIC implementation has two quirks:
774 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
775 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
776 * KVM doesn't do that aliasing.
778 * - in-kernel IOAPIC messages have to be delivered directly to
779 * x2APIC, because the kernel does not support interrupt remapping.
780 * In order to support broadcast without interrupt remapping, x2APIC
781 * rewrites the destination of non-IPI messages from APIC_BROADCAST
782 * to X2APIC_BROADCAST.
784 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
785 * important when userspace wants to use x2APIC-format MSIs, because
786 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
788 static u32
kvm_apic_mda(struct kvm_vcpu
*vcpu
, unsigned int dest_id
,
789 struct kvm_lapic
*source
, struct kvm_lapic
*target
)
791 bool ipi
= source
!= NULL
;
793 if (!vcpu
->kvm
->arch
.x2apic_broadcast_quirk_disabled
&&
794 !ipi
&& dest_id
== APIC_BROADCAST
&& apic_x2apic_mode(target
))
795 return X2APIC_BROADCAST
;
800 bool kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
801 int short_hand
, unsigned int dest
, int dest_mode
)
803 struct kvm_lapic
*target
= vcpu
->arch
.apic
;
804 u32 mda
= kvm_apic_mda(vcpu
, dest
, source
, target
);
806 apic_debug("target %p, source %p, dest 0x%x, "
807 "dest_mode 0x%x, short_hand 0x%x\n",
808 target
, source
, dest
, dest_mode
, short_hand
);
811 switch (short_hand
) {
812 case APIC_DEST_NOSHORT
:
813 if (dest_mode
== APIC_DEST_PHYSICAL
)
814 return kvm_apic_match_physical_addr(target
, mda
);
816 return kvm_apic_match_logical_addr(target
, mda
);
818 return target
== source
;
819 case APIC_DEST_ALLINC
:
821 case APIC_DEST_ALLBUT
:
822 return target
!= source
;
824 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
829 EXPORT_SYMBOL_GPL(kvm_apic_match_dest
);
831 int kvm_vector_to_index(u32 vector
, u32 dest_vcpus
,
832 const unsigned long *bitmap
, u32 bitmap_size
)
837 mod
= vector
% dest_vcpus
;
839 for (i
= 0; i
<= mod
; i
++) {
840 idx
= find_next_bit(bitmap
, bitmap_size
, idx
+ 1);
841 BUG_ON(idx
== bitmap_size
);
847 static void kvm_apic_disabled_lapic_found(struct kvm
*kvm
)
849 if (!kvm
->arch
.disabled_lapic_found
) {
850 kvm
->arch
.disabled_lapic_found
= true;
852 "Disabled LAPIC found during irq injection\n");
856 static bool kvm_apic_is_broadcast_dest(struct kvm
*kvm
, struct kvm_lapic
**src
,
857 struct kvm_lapic_irq
*irq
, struct kvm_apic_map
*map
)
859 if (kvm
->arch
.x2apic_broadcast_quirk_disabled
) {
860 if ((irq
->dest_id
== APIC_BROADCAST
&&
861 map
->mode
!= KVM_APIC_MODE_X2APIC
))
863 if (irq
->dest_id
== X2APIC_BROADCAST
)
866 bool x2apic_ipi
= src
&& *src
&& apic_x2apic_mode(*src
);
867 if (irq
->dest_id
== (x2apic_ipi
?
868 X2APIC_BROADCAST
: APIC_BROADCAST
))
875 /* Return true if the interrupt can be handled by using *bitmap as index mask
876 * for valid destinations in *dst array.
877 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
878 * Note: we may have zero kvm_lapic destinations when we return true, which
879 * means that the interrupt should be dropped. In this case, *bitmap would be
880 * zero and *dst undefined.
882 static inline bool kvm_apic_map_get_dest_lapic(struct kvm
*kvm
,
883 struct kvm_lapic
**src
, struct kvm_lapic_irq
*irq
,
884 struct kvm_apic_map
*map
, struct kvm_lapic
***dst
,
885 unsigned long *bitmap
)
889 if (irq
->shorthand
== APIC_DEST_SELF
&& src
) {
893 } else if (irq
->shorthand
)
896 if (!map
|| kvm_apic_is_broadcast_dest(kvm
, src
, irq
, map
))
899 if (irq
->dest_mode
== APIC_DEST_PHYSICAL
) {
900 if (irq
->dest_id
> map
->max_apic_id
) {
903 *dst
= &map
->phys_map
[irq
->dest_id
];
910 if (!kvm_apic_map_get_logical_dest(map
, irq
->dest_id
, dst
,
914 if (!kvm_lowest_prio_delivery(irq
))
917 if (!kvm_vector_hashing_enabled()) {
919 for_each_set_bit(i
, bitmap
, 16) {
924 else if (kvm_apic_compare_prio((*dst
)[i
]->vcpu
,
925 (*dst
)[lowest
]->vcpu
) < 0)
932 lowest
= kvm_vector_to_index(irq
->vector
, hweight16(*bitmap
),
935 if (!(*dst
)[lowest
]) {
936 kvm_apic_disabled_lapic_found(kvm
);
942 *bitmap
= (lowest
>= 0) ? 1 << lowest
: 0;
947 bool kvm_irq_delivery_to_apic_fast(struct kvm
*kvm
, struct kvm_lapic
*src
,
948 struct kvm_lapic_irq
*irq
, int *r
, struct dest_map
*dest_map
)
950 struct kvm_apic_map
*map
;
951 unsigned long bitmap
;
952 struct kvm_lapic
**dst
= NULL
;
958 if (irq
->shorthand
== APIC_DEST_SELF
) {
959 *r
= kvm_apic_set_irq(src
->vcpu
, irq
, dest_map
);
964 map
= rcu_dereference(kvm
->arch
.apic_map
);
966 ret
= kvm_apic_map_get_dest_lapic(kvm
, &src
, irq
, map
, &dst
, &bitmap
);
969 for_each_set_bit(i
, &bitmap
, 16) {
972 *r
+= kvm_apic_set_irq(dst
[i
]->vcpu
, irq
, dest_map
);
981 * This routine tries to handler interrupts in posted mode, here is how
982 * it deals with different cases:
983 * - For single-destination interrupts, handle it in posted mode
984 * - Else if vector hashing is enabled and it is a lowest-priority
985 * interrupt, handle it in posted mode and use the following mechanism
986 * to find the destinaiton vCPU.
987 * 1. For lowest-priority interrupts, store all the possible
988 * destination vCPUs in an array.
989 * 2. Use "guest vector % max number of destination vCPUs" to find
990 * the right destination vCPU in the array for the lowest-priority
992 * - Otherwise, use remapped mode to inject the interrupt.
994 bool kvm_intr_is_single_vcpu_fast(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
995 struct kvm_vcpu
**dest_vcpu
)
997 struct kvm_apic_map
*map
;
998 unsigned long bitmap
;
999 struct kvm_lapic
**dst
= NULL
;
1006 map
= rcu_dereference(kvm
->arch
.apic_map
);
1008 if (kvm_apic_map_get_dest_lapic(kvm
, NULL
, irq
, map
, &dst
, &bitmap
) &&
1009 hweight16(bitmap
) == 1) {
1010 unsigned long i
= find_first_bit(&bitmap
, 16);
1013 *dest_vcpu
= dst
[i
]->vcpu
;
1023 * Add a pending IRQ into lapic.
1024 * Return 1 if successfully added and 0 if discarded.
1026 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
1027 int vector
, int level
, int trig_mode
,
1028 struct dest_map
*dest_map
)
1031 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1033 trace_kvm_apic_accept_irq(vcpu
->vcpu_id
, delivery_mode
,
1035 switch (delivery_mode
) {
1036 case APIC_DM_LOWEST
:
1037 vcpu
->arch
.apic_arb_prio
++;
1040 if (unlikely(trig_mode
&& !level
))
1043 /* FIXME add logic for vcpu on reset */
1044 if (unlikely(!apic_enabled(apic
)))
1050 __set_bit(vcpu
->vcpu_id
, dest_map
->map
);
1051 dest_map
->vectors
[vcpu
->vcpu_id
] = vector
;
1054 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
) != !!trig_mode
) {
1056 kvm_lapic_set_vector(vector
, apic
->regs
+ APIC_TMR
);
1058 apic_clear_vector(vector
, apic
->regs
+ APIC_TMR
);
1061 if (vcpu
->arch
.apicv_active
)
1062 kvm_x86_ops
->deliver_posted_interrupt(vcpu
, vector
);
1064 kvm_lapic_set_irr(vector
, apic
);
1066 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1067 kvm_vcpu_kick(vcpu
);
1073 vcpu
->arch
.pv
.pv_unhalted
= 1;
1074 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1075 kvm_vcpu_kick(vcpu
);
1080 kvm_make_request(KVM_REQ_SMI
, vcpu
);
1081 kvm_vcpu_kick(vcpu
);
1086 kvm_inject_nmi(vcpu
);
1087 kvm_vcpu_kick(vcpu
);
1091 if (!trig_mode
|| level
) {
1093 /* assumes that there are only KVM_APIC_INIT/SIPI */
1094 apic
->pending_events
= (1UL << KVM_APIC_INIT
);
1095 /* make sure pending_events is visible before sending
1098 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1099 kvm_vcpu_kick(vcpu
);
1101 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
1106 case APIC_DM_STARTUP
:
1107 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
1108 vcpu
->vcpu_id
, vector
);
1110 apic
->sipi_vector
= vector
;
1111 /* make sure sipi_vector is visible for the receiver */
1113 set_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
1114 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1115 kvm_vcpu_kick(vcpu
);
1118 case APIC_DM_EXTINT
:
1120 * Should only be called by kvm_apic_local_deliver() with LVT0,
1121 * before NMI watchdog was enabled. Already handled by
1122 * kvm_apic_accept_pic_intr().
1127 printk(KERN_ERR
"TODO: unsupported delivery mode %x\n",
1134 int kvm_apic_compare_prio(struct kvm_vcpu
*vcpu1
, struct kvm_vcpu
*vcpu2
)
1136 return vcpu1
->arch
.apic_arb_prio
- vcpu2
->arch
.apic_arb_prio
;
1139 static bool kvm_ioapic_handles_vector(struct kvm_lapic
*apic
, int vector
)
1141 return test_bit(vector
, apic
->vcpu
->arch
.ioapic_handled_vectors
);
1144 static void kvm_ioapic_send_eoi(struct kvm_lapic
*apic
, int vector
)
1148 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1149 if (!kvm_ioapic_handles_vector(apic
, vector
))
1152 /* Request a KVM exit to inform the userspace IOAPIC. */
1153 if (irqchip_split(apic
->vcpu
->kvm
)) {
1154 apic
->vcpu
->arch
.pending_ioapic_eoi
= vector
;
1155 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT
, apic
->vcpu
);
1159 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
))
1160 trigger_mode
= IOAPIC_LEVEL_TRIG
;
1162 trigger_mode
= IOAPIC_EDGE_TRIG
;
1164 kvm_ioapic_update_eoi(apic
->vcpu
, vector
, trigger_mode
);
1167 static int apic_set_eoi(struct kvm_lapic
*apic
)
1169 int vector
= apic_find_highest_isr(apic
);
1171 trace_kvm_eoi(apic
, vector
);
1174 * Not every write EOI will has corresponding ISR,
1175 * one example is when Kernel check timer on setup_IO_APIC
1180 apic_clear_isr(vector
, apic
);
1181 apic_update_ppr(apic
);
1183 if (test_bit(vector
, vcpu_to_synic(apic
->vcpu
)->vec_bitmap
))
1184 kvm_hv_synic_send_eoi(apic
->vcpu
, vector
);
1186 kvm_ioapic_send_eoi(apic
, vector
);
1187 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
1192 * this interface assumes a trap-like exit, which has already finished
1193 * desired side effect including vISR and vPPR update.
1195 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu
*vcpu
, int vector
)
1197 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1199 trace_kvm_eoi(apic
, vector
);
1201 kvm_ioapic_send_eoi(apic
, vector
);
1202 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
1204 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated
);
1206 static void apic_send_ipi(struct kvm_lapic
*apic
)
1208 u32 icr_low
= kvm_lapic_get_reg(apic
, APIC_ICR
);
1209 u32 icr_high
= kvm_lapic_get_reg(apic
, APIC_ICR2
);
1210 struct kvm_lapic_irq irq
;
1212 irq
.vector
= icr_low
& APIC_VECTOR_MASK
;
1213 irq
.delivery_mode
= icr_low
& APIC_MODE_MASK
;
1214 irq
.dest_mode
= icr_low
& APIC_DEST_MASK
;
1215 irq
.level
= (icr_low
& APIC_INT_ASSERT
) != 0;
1216 irq
.trig_mode
= icr_low
& APIC_INT_LEVELTRIG
;
1217 irq
.shorthand
= icr_low
& APIC_SHORT_MASK
;
1218 irq
.msi_redir_hint
= false;
1219 if (apic_x2apic_mode(apic
))
1220 irq
.dest_id
= icr_high
;
1222 irq
.dest_id
= GET_APIC_DEST_FIELD(icr_high
);
1224 trace_kvm_apic_ipi(icr_low
, irq
.dest_id
);
1226 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1227 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1228 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1229 "msi_redir_hint 0x%x\n",
1230 icr_high
, icr_low
, irq
.shorthand
, irq
.dest_id
,
1231 irq
.trig_mode
, irq
.level
, irq
.dest_mode
, irq
.delivery_mode
,
1232 irq
.vector
, irq
.msi_redir_hint
);
1234 kvm_irq_delivery_to_apic(apic
->vcpu
->kvm
, apic
, &irq
, NULL
);
1237 static u32
apic_get_tmcct(struct kvm_lapic
*apic
)
1239 ktime_t remaining
, now
;
1243 ASSERT(apic
!= NULL
);
1245 /* if initial count is 0, current count should also be 0 */
1246 if (kvm_lapic_get_reg(apic
, APIC_TMICT
) == 0 ||
1247 apic
->lapic_timer
.period
== 0)
1251 remaining
= ktime_sub(apic
->lapic_timer
.target_expiration
, now
);
1252 if (ktime_to_ns(remaining
) < 0)
1255 ns
= mod_64(ktime_to_ns(remaining
), apic
->lapic_timer
.period
);
1256 tmcct
= div64_u64(ns
,
1257 (APIC_BUS_CYCLE_NS
* apic
->divide_count
));
1262 static void __report_tpr_access(struct kvm_lapic
*apic
, bool write
)
1264 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1265 struct kvm_run
*run
= vcpu
->run
;
1267 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS
, vcpu
);
1268 run
->tpr_access
.rip
= kvm_rip_read(vcpu
);
1269 run
->tpr_access
.is_write
= write
;
1272 static inline void report_tpr_access(struct kvm_lapic
*apic
, bool write
)
1274 if (apic
->vcpu
->arch
.tpr_access_reporting
)
1275 __report_tpr_access(apic
, write
);
1278 static u32
__apic_read(struct kvm_lapic
*apic
, unsigned int offset
)
1282 if (offset
>= LAPIC_MMIO_LENGTH
)
1287 apic_debug("Access APIC ARBPRI register which is for P6\n");
1290 case APIC_TMCCT
: /* Timer CCR */
1291 if (apic_lvtt_tscdeadline(apic
))
1294 val
= apic_get_tmcct(apic
);
1297 apic_update_ppr(apic
);
1298 val
= kvm_lapic_get_reg(apic
, offset
);
1301 report_tpr_access(apic
, false);
1304 val
= kvm_lapic_get_reg(apic
, offset
);
1311 static inline struct kvm_lapic
*to_lapic(struct kvm_io_device
*dev
)
1313 return container_of(dev
, struct kvm_lapic
, dev
);
1316 int kvm_lapic_reg_read(struct kvm_lapic
*apic
, u32 offset
, int len
,
1319 unsigned char alignment
= offset
& 0xf;
1321 /* this bitmask has a bit cleared for each reserved register */
1322 static const u64 rmask
= 0x43ff01ffffffe70cULL
;
1324 if ((alignment
+ len
) > 4) {
1325 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1330 if (offset
> 0x3f0 || !(rmask
& (1ULL << (offset
>> 4)))) {
1331 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1336 result
= __apic_read(apic
, offset
& ~0xf);
1338 trace_kvm_apic_read(offset
, result
);
1344 memcpy(data
, (char *)&result
+ alignment
, len
);
1347 printk(KERN_ERR
"Local APIC read with len = %x, "
1348 "should be 1,2, or 4 instead\n", len
);
1353 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read
);
1355 static int apic_mmio_in_range(struct kvm_lapic
*apic
, gpa_t addr
)
1357 return addr
>= apic
->base_address
&&
1358 addr
< apic
->base_address
+ LAPIC_MMIO_LENGTH
;
1361 static int apic_mmio_read(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this,
1362 gpa_t address
, int len
, void *data
)
1364 struct kvm_lapic
*apic
= to_lapic(this);
1365 u32 offset
= address
- apic
->base_address
;
1367 if (!apic_mmio_in_range(apic
, address
))
1370 if (!kvm_apic_hw_enabled(apic
) || apic_x2apic_mode(apic
)) {
1371 if (!kvm_check_has_quirk(vcpu
->kvm
,
1372 KVM_X86_QUIRK_LAPIC_MMIO_HOLE
))
1375 memset(data
, 0xff, len
);
1379 kvm_lapic_reg_read(apic
, offset
, len
, data
);
1384 static void update_divide_count(struct kvm_lapic
*apic
)
1386 u32 tmp1
, tmp2
, tdcr
;
1388 tdcr
= kvm_lapic_get_reg(apic
, APIC_TDCR
);
1390 tmp2
= ((tmp1
& 0x3) | ((tmp1
& 0x8) >> 1)) + 1;
1391 apic
->divide_count
= 0x1 << (tmp2
& 0x7);
1393 apic_debug("timer divide count is 0x%x\n",
1394 apic
->divide_count
);
1397 static void limit_periodic_timer_frequency(struct kvm_lapic
*apic
)
1400 * Do not allow the guest to program periodic timers with small
1401 * interval, since the hrtimers are not throttled by the host
1404 if (apic_lvtt_period(apic
) && apic
->lapic_timer
.period
) {
1405 s64 min_period
= min_timer_period_us
* 1000LL;
1407 if (apic
->lapic_timer
.period
< min_period
) {
1408 pr_info_ratelimited(
1409 "kvm: vcpu %i: requested %lld ns "
1410 "lapic timer period limited to %lld ns\n",
1411 apic
->vcpu
->vcpu_id
,
1412 apic
->lapic_timer
.period
, min_period
);
1413 apic
->lapic_timer
.period
= min_period
;
1418 static void apic_update_lvtt(struct kvm_lapic
*apic
)
1420 u32 timer_mode
= kvm_lapic_get_reg(apic
, APIC_LVTT
) &
1421 apic
->lapic_timer
.timer_mode_mask
;
1423 if (apic
->lapic_timer
.timer_mode
!= timer_mode
) {
1424 if (apic_lvtt_tscdeadline(apic
) != (timer_mode
==
1425 APIC_LVT_TIMER_TSCDEADLINE
)) {
1426 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1427 kvm_lapic_set_reg(apic
, APIC_TMICT
, 0);
1428 apic
->lapic_timer
.period
= 0;
1429 apic
->lapic_timer
.tscdeadline
= 0;
1431 apic
->lapic_timer
.timer_mode
= timer_mode
;
1432 limit_periodic_timer_frequency(apic
);
1436 static void apic_timer_expired(struct kvm_lapic
*apic
)
1438 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1439 struct swait_queue_head
*q
= &vcpu
->wq
;
1440 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1442 if (atomic_read(&apic
->lapic_timer
.pending
))
1445 atomic_inc(&apic
->lapic_timer
.pending
);
1446 kvm_set_pending_timer(vcpu
);
1449 * For x86, the atomic_inc() is serialized, thus
1450 * using swait_active() is safe.
1452 if (swait_active(q
))
1455 if (apic_lvtt_tscdeadline(apic
))
1456 ktimer
->expired_tscdeadline
= ktimer
->tscdeadline
;
1460 * On APICv, this test will cause a busy wait
1461 * during a higher-priority task.
1464 static bool lapic_timer_int_injected(struct kvm_vcpu
*vcpu
)
1466 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1467 u32 reg
= kvm_lapic_get_reg(apic
, APIC_LVTT
);
1469 if (kvm_apic_hw_enabled(apic
)) {
1470 int vec
= reg
& APIC_VECTOR_MASK
;
1471 void *bitmap
= apic
->regs
+ APIC_ISR
;
1473 if (vcpu
->arch
.apicv_active
)
1474 bitmap
= apic
->regs
+ APIC_IRR
;
1476 if (apic_test_vector(vec
, bitmap
))
1482 void wait_lapic_expire(struct kvm_vcpu
*vcpu
)
1484 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1485 u64 guest_tsc
, tsc_deadline
, ns
;
1487 if (!lapic_in_kernel(vcpu
))
1490 if (apic
->lapic_timer
.expired_tscdeadline
== 0)
1493 if (!lapic_timer_int_injected(vcpu
))
1496 tsc_deadline
= apic
->lapic_timer
.expired_tscdeadline
;
1497 apic
->lapic_timer
.expired_tscdeadline
= 0;
1498 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1499 trace_kvm_wait_lapic_expire(vcpu
->vcpu_id
, guest_tsc
- tsc_deadline
);
1501 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1502 if (guest_tsc
< tsc_deadline
)
1503 __delay(min(tsc_deadline
- guest_tsc
,
1504 nsec_to_cycles(vcpu
, lapic_timer_advance_ns
)));
1506 if (!lapic_timer_advance_adjust_done
) {
1508 if (guest_tsc
< tsc_deadline
) {
1509 ns
= (tsc_deadline
- guest_tsc
) * 1000000ULL;
1510 do_div(ns
, vcpu
->arch
.virtual_tsc_khz
);
1511 lapic_timer_advance_ns
-= min((unsigned int)ns
,
1512 lapic_timer_advance_ns
/ LAPIC_TIMER_ADVANCE_ADJUST_STEP
);
1515 ns
= (guest_tsc
- tsc_deadline
) * 1000000ULL;
1516 do_div(ns
, vcpu
->arch
.virtual_tsc_khz
);
1517 lapic_timer_advance_ns
+= min((unsigned int)ns
,
1518 lapic_timer_advance_ns
/ LAPIC_TIMER_ADVANCE_ADJUST_STEP
);
1520 if (abs(guest_tsc
- tsc_deadline
) < LAPIC_TIMER_ADVANCE_ADJUST_DONE
)
1521 lapic_timer_advance_adjust_done
= true;
1525 static void start_sw_tscdeadline(struct kvm_lapic
*apic
)
1527 u64 guest_tsc
, tscdeadline
= apic
->lapic_timer
.tscdeadline
;
1530 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1531 unsigned long this_tsc_khz
= vcpu
->arch
.virtual_tsc_khz
;
1532 unsigned long flags
;
1535 if (unlikely(!tscdeadline
|| !this_tsc_khz
))
1538 local_irq_save(flags
);
1541 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1542 if (likely(tscdeadline
> guest_tsc
)) {
1543 ns
= (tscdeadline
- guest_tsc
) * 1000000ULL;
1544 do_div(ns
, this_tsc_khz
);
1545 expire
= ktime_add_ns(now
, ns
);
1546 expire
= ktime_sub_ns(expire
, lapic_timer_advance_ns
);
1547 hrtimer_start(&apic
->lapic_timer
.timer
,
1548 expire
, HRTIMER_MODE_ABS_PINNED
);
1550 apic_timer_expired(apic
);
1552 local_irq_restore(flags
);
1555 static void update_target_expiration(struct kvm_lapic
*apic
, uint32_t old_divisor
)
1557 ktime_t now
, remaining
;
1558 u64 ns_remaining_old
, ns_remaining_new
;
1560 apic
->lapic_timer
.period
= (u64
)kvm_lapic_get_reg(apic
, APIC_TMICT
)
1561 * APIC_BUS_CYCLE_NS
* apic
->divide_count
;
1562 limit_periodic_timer_frequency(apic
);
1565 remaining
= ktime_sub(apic
->lapic_timer
.target_expiration
, now
);
1566 if (ktime_to_ns(remaining
) < 0)
1569 ns_remaining_old
= ktime_to_ns(remaining
);
1570 ns_remaining_new
= mul_u64_u32_div(ns_remaining_old
,
1571 apic
->divide_count
, old_divisor
);
1573 apic
->lapic_timer
.tscdeadline
+=
1574 nsec_to_cycles(apic
->vcpu
, ns_remaining_new
) -
1575 nsec_to_cycles(apic
->vcpu
, ns_remaining_old
);
1576 apic
->lapic_timer
.target_expiration
= ktime_add_ns(now
, ns_remaining_new
);
1579 static bool set_target_expiration(struct kvm_lapic
*apic
)
1585 apic
->lapic_timer
.period
= (u64
)kvm_lapic_get_reg(apic
, APIC_TMICT
)
1586 * APIC_BUS_CYCLE_NS
* apic
->divide_count
;
1588 if (!apic
->lapic_timer
.period
) {
1589 apic
->lapic_timer
.tscdeadline
= 0;
1593 limit_periodic_timer_frequency(apic
);
1595 apic_debug("%s: bus cycle is %" PRId64
"ns, now 0x%016"
1597 "timer initial count 0x%x, period %lldns, "
1598 "expire @ 0x%016" PRIx64
".\n", __func__
,
1599 APIC_BUS_CYCLE_NS
, ktime_to_ns(now
),
1600 kvm_lapic_get_reg(apic
, APIC_TMICT
),
1601 apic
->lapic_timer
.period
,
1602 ktime_to_ns(ktime_add_ns(now
,
1603 apic
->lapic_timer
.period
)));
1605 apic
->lapic_timer
.tscdeadline
= kvm_read_l1_tsc(apic
->vcpu
, tscl
) +
1606 nsec_to_cycles(apic
->vcpu
, apic
->lapic_timer
.period
);
1607 apic
->lapic_timer
.target_expiration
= ktime_add_ns(now
, apic
->lapic_timer
.period
);
1612 static void advance_periodic_target_expiration(struct kvm_lapic
*apic
)
1614 ktime_t now
= ktime_get();
1619 * Synchronize both deadlines to the same time source or
1620 * differences in the periods (caused by differences in the
1621 * underlying clocks or numerical approximation errors) will
1622 * cause the two to drift apart over time as the errors
1625 apic
->lapic_timer
.target_expiration
=
1626 ktime_add_ns(apic
->lapic_timer
.target_expiration
,
1627 apic
->lapic_timer
.period
);
1628 delta
= ktime_sub(apic
->lapic_timer
.target_expiration
, now
);
1629 apic
->lapic_timer
.tscdeadline
= kvm_read_l1_tsc(apic
->vcpu
, tscl
) +
1630 nsec_to_cycles(apic
->vcpu
, delta
);
1633 static void start_sw_period(struct kvm_lapic
*apic
)
1635 if (!apic
->lapic_timer
.period
)
1638 if (ktime_after(ktime_get(),
1639 apic
->lapic_timer
.target_expiration
)) {
1640 apic_timer_expired(apic
);
1642 if (apic_lvtt_oneshot(apic
))
1645 advance_periodic_target_expiration(apic
);
1648 hrtimer_start(&apic
->lapic_timer
.timer
,
1649 apic
->lapic_timer
.target_expiration
,
1650 HRTIMER_MODE_ABS_PINNED
);
1653 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu
*vcpu
)
1655 if (!lapic_in_kernel(vcpu
))
1658 return vcpu
->arch
.apic
->lapic_timer
.hv_timer_in_use
;
1660 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use
);
1662 static void cancel_hv_timer(struct kvm_lapic
*apic
)
1664 WARN_ON(preemptible());
1665 WARN_ON(!apic
->lapic_timer
.hv_timer_in_use
);
1666 kvm_x86_ops
->cancel_hv_timer(apic
->vcpu
);
1667 apic
->lapic_timer
.hv_timer_in_use
= false;
1670 static bool start_hv_timer(struct kvm_lapic
*apic
)
1672 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1675 WARN_ON(preemptible());
1676 if (!kvm_x86_ops
->set_hv_timer
)
1679 if (!apic_lvtt_period(apic
) && atomic_read(&ktimer
->pending
))
1682 if (!ktimer
->tscdeadline
)
1685 r
= kvm_x86_ops
->set_hv_timer(apic
->vcpu
, ktimer
->tscdeadline
);
1689 ktimer
->hv_timer_in_use
= true;
1690 hrtimer_cancel(&ktimer
->timer
);
1693 * Also recheck ktimer->pending, in case the sw timer triggered in
1694 * the window. For periodic timer, leave the hv timer running for
1695 * simplicity, and the deadline will be recomputed on the next vmexit.
1697 if (!apic_lvtt_period(apic
) && (r
|| atomic_read(&ktimer
->pending
))) {
1699 apic_timer_expired(apic
);
1703 trace_kvm_hv_timer_state(apic
->vcpu
->vcpu_id
, true);
1707 static void start_sw_timer(struct kvm_lapic
*apic
)
1709 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1711 WARN_ON(preemptible());
1712 if (apic
->lapic_timer
.hv_timer_in_use
)
1713 cancel_hv_timer(apic
);
1714 if (!apic_lvtt_period(apic
) && atomic_read(&ktimer
->pending
))
1717 if (apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
))
1718 start_sw_period(apic
);
1719 else if (apic_lvtt_tscdeadline(apic
))
1720 start_sw_tscdeadline(apic
);
1721 trace_kvm_hv_timer_state(apic
->vcpu
->vcpu_id
, false);
1724 static void restart_apic_timer(struct kvm_lapic
*apic
)
1727 if (!start_hv_timer(apic
))
1728 start_sw_timer(apic
);
1732 void kvm_lapic_expired_hv_timer(struct kvm_vcpu
*vcpu
)
1734 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1737 /* If the preempt notifier has already run, it also called apic_timer_expired */
1738 if (!apic
->lapic_timer
.hv_timer_in_use
)
1740 WARN_ON(swait_active(&vcpu
->wq
));
1741 cancel_hv_timer(apic
);
1742 apic_timer_expired(apic
);
1744 if (apic_lvtt_period(apic
) && apic
->lapic_timer
.period
) {
1745 advance_periodic_target_expiration(apic
);
1746 restart_apic_timer(apic
);
1751 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer
);
1753 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu
*vcpu
)
1755 restart_apic_timer(vcpu
->arch
.apic
);
1757 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer
);
1759 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu
*vcpu
)
1761 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1764 /* Possibly the TSC deadline timer is not enabled yet */
1765 if (apic
->lapic_timer
.hv_timer_in_use
)
1766 start_sw_timer(apic
);
1769 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer
);
1771 void kvm_lapic_restart_hv_timer(struct kvm_vcpu
*vcpu
)
1773 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1775 WARN_ON(!apic
->lapic_timer
.hv_timer_in_use
);
1776 restart_apic_timer(apic
);
1779 static void start_apic_timer(struct kvm_lapic
*apic
)
1781 atomic_set(&apic
->lapic_timer
.pending
, 0);
1783 if ((apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
))
1784 && !set_target_expiration(apic
))
1787 restart_apic_timer(apic
);
1790 static void apic_manage_nmi_watchdog(struct kvm_lapic
*apic
, u32 lvt0_val
)
1792 bool lvt0_in_nmi_mode
= apic_lvt_nmi_mode(lvt0_val
);
1794 if (apic
->lvt0_in_nmi_mode
!= lvt0_in_nmi_mode
) {
1795 apic
->lvt0_in_nmi_mode
= lvt0_in_nmi_mode
;
1796 if (lvt0_in_nmi_mode
) {
1797 apic_debug("Receive NMI setting on APIC_LVT0 "
1798 "for cpu %d\n", apic
->vcpu
->vcpu_id
);
1799 atomic_inc(&apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
);
1801 atomic_dec(&apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
);
1805 int kvm_lapic_reg_write(struct kvm_lapic
*apic
, u32 reg
, u32 val
)
1809 trace_kvm_apic_write(reg
, val
);
1812 case APIC_ID
: /* Local APIC ID */
1813 if (!apic_x2apic_mode(apic
))
1814 kvm_apic_set_xapic_id(apic
, val
>> 24);
1820 report_tpr_access(apic
, true);
1821 apic_set_tpr(apic
, val
& 0xff);
1829 if (!apic_x2apic_mode(apic
))
1830 kvm_apic_set_ldr(apic
, val
& APIC_LDR_MASK
);
1836 if (!apic_x2apic_mode(apic
)) {
1837 kvm_lapic_set_reg(apic
, APIC_DFR
, val
| 0x0FFFFFFF);
1838 recalculate_apic_map(apic
->vcpu
->kvm
);
1845 if (kvm_lapic_get_reg(apic
, APIC_LVR
) & APIC_LVR_DIRECTED_EOI
)
1846 mask
|= APIC_SPIV_DIRECTED_EOI
;
1847 apic_set_spiv(apic
, val
& mask
);
1848 if (!(val
& APIC_SPIV_APIC_ENABLED
)) {
1852 for (i
= 0; i
< KVM_APIC_LVT_NUM
; i
++) {
1853 lvt_val
= kvm_lapic_get_reg(apic
,
1854 APIC_LVTT
+ 0x10 * i
);
1855 kvm_lapic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
,
1856 lvt_val
| APIC_LVT_MASKED
);
1858 apic_update_lvtt(apic
);
1859 atomic_set(&apic
->lapic_timer
.pending
, 0);
1865 /* No delay here, so we always clear the pending bit */
1866 kvm_lapic_set_reg(apic
, APIC_ICR
, val
& ~(1 << 12));
1867 apic_send_ipi(apic
);
1871 if (!apic_x2apic_mode(apic
))
1873 kvm_lapic_set_reg(apic
, APIC_ICR2
, val
);
1877 apic_manage_nmi_watchdog(apic
, val
);
1883 /* TODO: Check vector */
1884 if (!kvm_apic_sw_enabled(apic
))
1885 val
|= APIC_LVT_MASKED
;
1887 val
&= apic_lvt_mask
[(reg
- APIC_LVTT
) >> 4];
1888 kvm_lapic_set_reg(apic
, reg
, val
);
1893 if (!kvm_apic_sw_enabled(apic
))
1894 val
|= APIC_LVT_MASKED
;
1895 val
&= (apic_lvt_mask
[0] | apic
->lapic_timer
.timer_mode_mask
);
1896 kvm_lapic_set_reg(apic
, APIC_LVTT
, val
);
1897 apic_update_lvtt(apic
);
1901 if (apic_lvtt_tscdeadline(apic
))
1904 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1905 kvm_lapic_set_reg(apic
, APIC_TMICT
, val
);
1906 start_apic_timer(apic
);
1910 uint32_t old_divisor
= apic
->divide_count
;
1913 apic_debug("KVM_WRITE:TDCR %x\n", val
);
1914 kvm_lapic_set_reg(apic
, APIC_TDCR
, val
);
1915 update_divide_count(apic
);
1916 if (apic
->divide_count
!= old_divisor
&&
1917 apic
->lapic_timer
.period
) {
1918 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1919 update_target_expiration(apic
, old_divisor
);
1920 restart_apic_timer(apic
);
1925 if (apic_x2apic_mode(apic
) && val
!= 0) {
1926 apic_debug("KVM_WRITE:ESR not zero %x\n", val
);
1932 if (apic_x2apic_mode(apic
)) {
1933 kvm_lapic_reg_write(apic
, APIC_ICR
, 0x40000 | (val
& 0xff));
1942 apic_debug("Local APIC Write to read-only register %x\n", reg
);
1945 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write
);
1947 static int apic_mmio_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this,
1948 gpa_t address
, int len
, const void *data
)
1950 struct kvm_lapic
*apic
= to_lapic(this);
1951 unsigned int offset
= address
- apic
->base_address
;
1954 if (!apic_mmio_in_range(apic
, address
))
1957 if (!kvm_apic_hw_enabled(apic
) || apic_x2apic_mode(apic
)) {
1958 if (!kvm_check_has_quirk(vcpu
->kvm
,
1959 KVM_X86_QUIRK_LAPIC_MMIO_HOLE
))
1966 * APIC register must be aligned on 128-bits boundary.
1967 * 32/64/128 bits registers must be accessed thru 32 bits.
1970 if (len
!= 4 || (offset
& 0xf)) {
1971 /* Don't shout loud, $infamous_os would cause only noise. */
1972 apic_debug("apic write: bad size=%d %lx\n", len
, (long)address
);
1978 /* too common printing */
1979 if (offset
!= APIC_EOI
)
1980 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1981 "0x%x\n", __func__
, offset
, len
, val
);
1983 kvm_lapic_reg_write(apic
, offset
& 0xff0, val
);
1988 void kvm_lapic_set_eoi(struct kvm_vcpu
*vcpu
)
1990 kvm_lapic_reg_write(vcpu
->arch
.apic
, APIC_EOI
, 0);
1992 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi
);
1994 /* emulate APIC access in a trap manner */
1995 void kvm_apic_write_nodecode(struct kvm_vcpu
*vcpu
, u32 offset
)
1999 /* hw has done the conditional check and inst decode */
2002 kvm_lapic_reg_read(vcpu
->arch
.apic
, offset
, 4, &val
);
2004 /* TODO: optimize to just emulate side effect w/o one more write */
2005 kvm_lapic_reg_write(vcpu
->arch
.apic
, offset
, val
);
2007 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode
);
2009 void kvm_free_lapic(struct kvm_vcpu
*vcpu
)
2011 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2013 if (!vcpu
->arch
.apic
)
2016 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2018 if (!(vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
))
2019 static_key_slow_dec_deferred(&apic_hw_disabled
);
2021 if (!apic
->sw_enabled
)
2022 static_key_slow_dec_deferred(&apic_sw_disabled
);
2025 free_page((unsigned long)apic
->regs
);
2031 *----------------------------------------------------------------------
2033 *----------------------------------------------------------------------
2035 u64
kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
)
2037 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2039 if (!lapic_in_kernel(vcpu
) ||
2040 !apic_lvtt_tscdeadline(apic
))
2043 return apic
->lapic_timer
.tscdeadline
;
2046 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
, u64 data
)
2048 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2050 if (!lapic_in_kernel(vcpu
) || apic_lvtt_oneshot(apic
) ||
2051 apic_lvtt_period(apic
))
2054 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2055 apic
->lapic_timer
.tscdeadline
= data
;
2056 start_apic_timer(apic
);
2059 void kvm_lapic_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
2061 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2063 apic_set_tpr(apic
, ((cr8
& 0x0f) << 4)
2064 | (kvm_lapic_get_reg(apic
, APIC_TASKPRI
) & 4));
2067 u64
kvm_lapic_get_cr8(struct kvm_vcpu
*vcpu
)
2071 tpr
= (u64
) kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_TASKPRI
);
2073 return (tpr
& 0xf0) >> 4;
2076 void kvm_lapic_set_base(struct kvm_vcpu
*vcpu
, u64 value
)
2078 u64 old_value
= vcpu
->arch
.apic_base
;
2079 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2082 value
|= MSR_IA32_APICBASE_BSP
;
2084 vcpu
->arch
.apic_base
= value
;
2086 if ((old_value
^ value
) & MSR_IA32_APICBASE_ENABLE
)
2087 kvm_update_cpuid(vcpu
);
2092 /* update jump label if enable bit changes */
2093 if ((old_value
^ value
) & MSR_IA32_APICBASE_ENABLE
) {
2094 if (value
& MSR_IA32_APICBASE_ENABLE
) {
2095 kvm_apic_set_xapic_id(apic
, vcpu
->vcpu_id
);
2096 static_key_slow_dec_deferred(&apic_hw_disabled
);
2098 static_key_slow_inc(&apic_hw_disabled
.key
);
2099 recalculate_apic_map(vcpu
->kvm
);
2103 if (((old_value
^ value
) & X2APIC_ENABLE
) && (value
& X2APIC_ENABLE
))
2104 kvm_apic_set_x2apic_id(apic
, vcpu
->vcpu_id
);
2106 if ((old_value
^ value
) & (MSR_IA32_APICBASE_ENABLE
| X2APIC_ENABLE
))
2107 kvm_x86_ops
->set_virtual_apic_mode(vcpu
);
2109 apic
->base_address
= apic
->vcpu
->arch
.apic_base
&
2110 MSR_IA32_APICBASE_BASE
;
2112 if ((value
& MSR_IA32_APICBASE_ENABLE
) &&
2113 apic
->base_address
!= APIC_DEFAULT_PHYS_BASE
)
2114 pr_warn_once("APIC base relocation is unsupported by KVM");
2116 /* with FSB delivery interrupt, we can restart APIC functionality */
2117 apic_debug("apic base msr is 0x%016" PRIx64
", and base address is "
2118 "0x%lx.\n", apic
->vcpu
->arch
.apic_base
, apic
->base_address
);
2122 void kvm_lapic_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
2124 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2130 apic_debug("%s\n", __func__
);
2132 /* Stop the timer in case it's a reset to an active apic */
2133 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2136 kvm_lapic_set_base(vcpu
, APIC_DEFAULT_PHYS_BASE
|
2137 MSR_IA32_APICBASE_ENABLE
);
2138 kvm_apic_set_xapic_id(apic
, vcpu
->vcpu_id
);
2140 kvm_apic_set_version(apic
->vcpu
);
2142 for (i
= 0; i
< KVM_APIC_LVT_NUM
; i
++)
2143 kvm_lapic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
, APIC_LVT_MASKED
);
2144 apic_update_lvtt(apic
);
2145 if (kvm_vcpu_is_reset_bsp(vcpu
) &&
2146 kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_LINT0_REENABLED
))
2147 kvm_lapic_set_reg(apic
, APIC_LVT0
,
2148 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT
));
2149 apic_manage_nmi_watchdog(apic
, kvm_lapic_get_reg(apic
, APIC_LVT0
));
2151 kvm_lapic_set_reg(apic
, APIC_DFR
, 0xffffffffU
);
2152 apic_set_spiv(apic
, 0xff);
2153 kvm_lapic_set_reg(apic
, APIC_TASKPRI
, 0);
2154 if (!apic_x2apic_mode(apic
))
2155 kvm_apic_set_ldr(apic
, 0);
2156 kvm_lapic_set_reg(apic
, APIC_ESR
, 0);
2157 kvm_lapic_set_reg(apic
, APIC_ICR
, 0);
2158 kvm_lapic_set_reg(apic
, APIC_ICR2
, 0);
2159 kvm_lapic_set_reg(apic
, APIC_TDCR
, 0);
2160 kvm_lapic_set_reg(apic
, APIC_TMICT
, 0);
2161 for (i
= 0; i
< 8; i
++) {
2162 kvm_lapic_set_reg(apic
, APIC_IRR
+ 0x10 * i
, 0);
2163 kvm_lapic_set_reg(apic
, APIC_ISR
+ 0x10 * i
, 0);
2164 kvm_lapic_set_reg(apic
, APIC_TMR
+ 0x10 * i
, 0);
2166 apic
->irr_pending
= vcpu
->arch
.apicv_active
;
2167 apic
->isr_count
= vcpu
->arch
.apicv_active
? 1 : 0;
2168 apic
->highest_isr_cache
= -1;
2169 update_divide_count(apic
);
2170 atomic_set(&apic
->lapic_timer
.pending
, 0);
2171 if (kvm_vcpu_is_bsp(vcpu
))
2172 kvm_lapic_set_base(vcpu
,
2173 vcpu
->arch
.apic_base
| MSR_IA32_APICBASE_BSP
);
2174 vcpu
->arch
.pv_eoi
.msr_val
= 0;
2175 apic_update_ppr(apic
);
2176 if (vcpu
->arch
.apicv_active
) {
2177 kvm_x86_ops
->apicv_post_state_restore(vcpu
);
2178 kvm_x86_ops
->hwapic_irr_update(vcpu
, -1);
2179 kvm_x86_ops
->hwapic_isr_update(vcpu
, -1);
2182 vcpu
->arch
.apic_arb_prio
= 0;
2183 vcpu
->arch
.apic_attention
= 0;
2185 apic_debug("%s: vcpu=%p, id=0x%x, base_msr="
2186 "0x%016" PRIx64
", base_address=0x%0lx.\n", __func__
,
2187 vcpu
, kvm_lapic_get_reg(apic
, APIC_ID
),
2188 vcpu
->arch
.apic_base
, apic
->base_address
);
2192 *----------------------------------------------------------------------
2194 *----------------------------------------------------------------------
2197 static bool lapic_is_periodic(struct kvm_lapic
*apic
)
2199 return apic_lvtt_period(apic
);
2202 int apic_has_pending_timer(struct kvm_vcpu
*vcpu
)
2204 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2206 if (apic_enabled(apic
) && apic_lvt_enabled(apic
, APIC_LVTT
))
2207 return atomic_read(&apic
->lapic_timer
.pending
);
2212 int kvm_apic_local_deliver(struct kvm_lapic
*apic
, int lvt_type
)
2214 u32 reg
= kvm_lapic_get_reg(apic
, lvt_type
);
2215 int vector
, mode
, trig_mode
;
2217 if (kvm_apic_hw_enabled(apic
) && !(reg
& APIC_LVT_MASKED
)) {
2218 vector
= reg
& APIC_VECTOR_MASK
;
2219 mode
= reg
& APIC_MODE_MASK
;
2220 trig_mode
= reg
& APIC_LVT_LEVEL_TRIGGER
;
2221 return __apic_accept_irq(apic
, mode
, vector
, 1, trig_mode
,
2227 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu
*vcpu
)
2229 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2232 kvm_apic_local_deliver(apic
, APIC_LVT0
);
2235 static const struct kvm_io_device_ops apic_mmio_ops
= {
2236 .read
= apic_mmio_read
,
2237 .write
= apic_mmio_write
,
2240 static enum hrtimer_restart
apic_timer_fn(struct hrtimer
*data
)
2242 struct kvm_timer
*ktimer
= container_of(data
, struct kvm_timer
, timer
);
2243 struct kvm_lapic
*apic
= container_of(ktimer
, struct kvm_lapic
, lapic_timer
);
2245 apic_timer_expired(apic
);
2247 if (lapic_is_periodic(apic
)) {
2248 advance_periodic_target_expiration(apic
);
2249 hrtimer_add_expires_ns(&ktimer
->timer
, ktimer
->period
);
2250 return HRTIMER_RESTART
;
2252 return HRTIMER_NORESTART
;
2255 int kvm_create_lapic(struct kvm_vcpu
*vcpu
)
2257 struct kvm_lapic
*apic
;
2259 ASSERT(vcpu
!= NULL
);
2260 apic_debug("apic_init %d\n", vcpu
->vcpu_id
);
2262 apic
= kzalloc(sizeof(*apic
), GFP_KERNEL
);
2266 vcpu
->arch
.apic
= apic
;
2268 apic
->regs
= (void *)get_zeroed_page(GFP_KERNEL
);
2270 printk(KERN_ERR
"malloc apic regs error for vcpu %x\n",
2272 goto nomem_free_apic
;
2276 hrtimer_init(&apic
->lapic_timer
.timer
, CLOCK_MONOTONIC
,
2277 HRTIMER_MODE_ABS_PINNED
);
2278 apic
->lapic_timer
.timer
.function
= apic_timer_fn
;
2281 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2282 * thinking that APIC satet has changed.
2284 vcpu
->arch
.apic_base
= MSR_IA32_APICBASE_ENABLE
;
2285 static_key_slow_inc(&apic_sw_disabled
.key
); /* sw disabled at reset */
2286 kvm_iodevice_init(&apic
->dev
, &apic_mmio_ops
);
2295 int kvm_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
2297 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2300 if (!apic_enabled(apic
))
2303 __apic_update_ppr(apic
, &ppr
);
2304 return apic_has_interrupt_for_ppr(apic
, ppr
);
2307 int kvm_apic_accept_pic_intr(struct kvm_vcpu
*vcpu
)
2309 u32 lvt0
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_LVT0
);
2312 if (!kvm_apic_hw_enabled(vcpu
->arch
.apic
))
2314 if ((lvt0
& APIC_LVT_MASKED
) == 0 &&
2315 GET_APIC_DELIVERY_MODE(lvt0
) == APIC_MODE_EXTINT
)
2320 void kvm_inject_apic_timer_irqs(struct kvm_vcpu
*vcpu
)
2322 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2324 if (atomic_read(&apic
->lapic_timer
.pending
) > 0) {
2325 kvm_apic_local_deliver(apic
, APIC_LVTT
);
2326 if (apic_lvtt_tscdeadline(apic
))
2327 apic
->lapic_timer
.tscdeadline
= 0;
2328 if (apic_lvtt_oneshot(apic
)) {
2329 apic
->lapic_timer
.tscdeadline
= 0;
2330 apic
->lapic_timer
.target_expiration
= 0;
2332 atomic_set(&apic
->lapic_timer
.pending
, 0);
2336 int kvm_get_apic_interrupt(struct kvm_vcpu
*vcpu
)
2338 int vector
= kvm_apic_has_interrupt(vcpu
);
2339 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2346 * We get here even with APIC virtualization enabled, if doing
2347 * nested virtualization and L1 runs with the "acknowledge interrupt
2348 * on exit" mode. Then we cannot inject the interrupt via RVI,
2349 * because the process would deliver it through the IDT.
2352 apic_clear_irr(vector
, apic
);
2353 if (test_bit(vector
, vcpu_to_synic(vcpu
)->auto_eoi_bitmap
)) {
2355 * For auto-EOI interrupts, there might be another pending
2356 * interrupt above PPR, so check whether to raise another
2359 apic_update_ppr(apic
);
2362 * For normal interrupts, PPR has been raised and there cannot
2363 * be a higher-priority pending interrupt---except if there was
2364 * a concurrent interrupt injection, but that would have
2365 * triggered KVM_REQ_EVENT already.
2367 apic_set_isr(vector
, apic
);
2368 __apic_update_ppr(apic
, &ppr
);
2374 static int kvm_apic_state_fixup(struct kvm_vcpu
*vcpu
,
2375 struct kvm_lapic_state
*s
, bool set
)
2377 if (apic_x2apic_mode(vcpu
->arch
.apic
)) {
2378 u32
*id
= (u32
*)(s
->regs
+ APIC_ID
);
2379 u32
*ldr
= (u32
*)(s
->regs
+ APIC_LDR
);
2381 if (vcpu
->kvm
->arch
.x2apic_format
) {
2382 if (*id
!= vcpu
->vcpu_id
)
2391 /* In x2APIC mode, the LDR is fixed and based on the id */
2393 *ldr
= kvm_apic_calc_x2apic_ldr(*id
);
2399 int kvm_apic_get_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
)
2401 memcpy(s
->regs
, vcpu
->arch
.apic
->regs
, sizeof(*s
));
2402 return kvm_apic_state_fixup(vcpu
, s
, false);
2405 int kvm_apic_set_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
)
2407 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2411 kvm_lapic_set_base(vcpu
, vcpu
->arch
.apic_base
);
2412 /* set SPIV separately to get count of SW disabled APICs right */
2413 apic_set_spiv(apic
, *((u32
*)(s
->regs
+ APIC_SPIV
)));
2415 r
= kvm_apic_state_fixup(vcpu
, s
, true);
2418 memcpy(vcpu
->arch
.apic
->regs
, s
->regs
, sizeof(*s
));
2420 recalculate_apic_map(vcpu
->kvm
);
2421 kvm_apic_set_version(vcpu
);
2423 apic_update_ppr(apic
);
2424 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2425 apic_update_lvtt(apic
);
2426 apic_manage_nmi_watchdog(apic
, kvm_lapic_get_reg(apic
, APIC_LVT0
));
2427 update_divide_count(apic
);
2428 start_apic_timer(apic
);
2429 apic
->irr_pending
= true;
2430 apic
->isr_count
= vcpu
->arch
.apicv_active
?
2431 1 : count_vectors(apic
->regs
+ APIC_ISR
);
2432 apic
->highest_isr_cache
= -1;
2433 if (vcpu
->arch
.apicv_active
) {
2434 kvm_x86_ops
->apicv_post_state_restore(vcpu
);
2435 kvm_x86_ops
->hwapic_irr_update(vcpu
,
2436 apic_find_highest_irr(apic
));
2437 kvm_x86_ops
->hwapic_isr_update(vcpu
,
2438 apic_find_highest_isr(apic
));
2440 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2441 if (ioapic_in_kernel(vcpu
->kvm
))
2442 kvm_rtc_eoi_tracking_restore_one(vcpu
);
2444 vcpu
->arch
.apic_arb_prio
= 0;
2449 void __kvm_migrate_apic_timer(struct kvm_vcpu
*vcpu
)
2451 struct hrtimer
*timer
;
2453 if (!lapic_in_kernel(vcpu
))
2456 timer
= &vcpu
->arch
.apic
->lapic_timer
.timer
;
2457 if (hrtimer_cancel(timer
))
2458 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS_PINNED
);
2462 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2464 * Detect whether guest triggered PV EOI since the
2465 * last entry. If yes, set EOI on guests's behalf.
2466 * Clear PV EOI in guest memory in any case.
2468 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu
*vcpu
,
2469 struct kvm_lapic
*apic
)
2474 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2475 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2477 * KVM_APIC_PV_EOI_PENDING is unset:
2478 * -> host disabled PV EOI.
2479 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2480 * -> host enabled PV EOI, guest did not execute EOI yet.
2481 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2482 * -> host enabled PV EOI, guest executed EOI.
2484 BUG_ON(!pv_eoi_enabled(vcpu
));
2485 pending
= pv_eoi_get_pending(vcpu
);
2487 * Clear pending bit in any case: it will be set again on vmentry.
2488 * While this might not be ideal from performance point of view,
2489 * this makes sure pv eoi is only enabled when we know it's safe.
2491 pv_eoi_clr_pending(vcpu
);
2494 vector
= apic_set_eoi(apic
);
2495 trace_kvm_pv_eoi(apic
, vector
);
2498 void kvm_lapic_sync_from_vapic(struct kvm_vcpu
*vcpu
)
2502 if (test_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
))
2503 apic_sync_pv_eoi_from_guest(vcpu
, vcpu
->arch
.apic
);
2505 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
2508 if (kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
2512 apic_set_tpr(vcpu
->arch
.apic
, data
& 0xff);
2516 * apic_sync_pv_eoi_to_guest - called before vmentry
2518 * Detect whether it's safe to enable PV EOI and
2521 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu
*vcpu
,
2522 struct kvm_lapic
*apic
)
2524 if (!pv_eoi_enabled(vcpu
) ||
2525 /* IRR set or many bits in ISR: could be nested. */
2526 apic
->irr_pending
||
2527 /* Cache not set: could be safe but we don't bother. */
2528 apic
->highest_isr_cache
== -1 ||
2529 /* Need EOI to update ioapic. */
2530 kvm_ioapic_handles_vector(apic
, apic
->highest_isr_cache
)) {
2532 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2533 * so we need not do anything here.
2538 pv_eoi_set_pending(apic
->vcpu
);
2541 void kvm_lapic_sync_to_vapic(struct kvm_vcpu
*vcpu
)
2544 int max_irr
, max_isr
;
2545 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2547 apic_sync_pv_eoi_to_guest(vcpu
, apic
);
2549 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
2552 tpr
= kvm_lapic_get_reg(apic
, APIC_TASKPRI
) & 0xff;
2553 max_irr
= apic_find_highest_irr(apic
);
2556 max_isr
= apic_find_highest_isr(apic
);
2559 data
= (tpr
& 0xff) | ((max_isr
& 0xf0) << 8) | (max_irr
<< 24);
2561 kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
2565 int kvm_lapic_set_vapic_addr(struct kvm_vcpu
*vcpu
, gpa_t vapic_addr
)
2568 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
2569 &vcpu
->arch
.apic
->vapic_cache
,
2570 vapic_addr
, sizeof(u32
)))
2572 __set_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
2574 __clear_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
2577 vcpu
->arch
.apic
->vapic_addr
= vapic_addr
;
2581 int kvm_x2apic_msr_write(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
2583 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2584 u32 reg
= (msr
- APIC_BASE_MSR
) << 4;
2586 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(apic
))
2589 if (reg
== APIC_ICR2
)
2592 /* if this is ICR write vector before command */
2593 if (reg
== APIC_ICR
)
2594 kvm_lapic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
2595 return kvm_lapic_reg_write(apic
, reg
, (u32
)data
);
2598 int kvm_x2apic_msr_read(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
2600 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2601 u32 reg
= (msr
- APIC_BASE_MSR
) << 4, low
, high
= 0;
2603 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(apic
))
2606 if (reg
== APIC_DFR
|| reg
== APIC_ICR2
) {
2607 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2612 if (kvm_lapic_reg_read(apic
, reg
, 4, &low
))
2614 if (reg
== APIC_ICR
)
2615 kvm_lapic_reg_read(apic
, APIC_ICR2
, 4, &high
);
2617 *data
= (((u64
)high
) << 32) | low
;
2622 int kvm_hv_vapic_msr_write(struct kvm_vcpu
*vcpu
, u32 reg
, u64 data
)
2624 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2626 if (!lapic_in_kernel(vcpu
))
2629 /* if this is ICR write vector before command */
2630 if (reg
== APIC_ICR
)
2631 kvm_lapic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
2632 return kvm_lapic_reg_write(apic
, reg
, (u32
)data
);
2635 int kvm_hv_vapic_msr_read(struct kvm_vcpu
*vcpu
, u32 reg
, u64
*data
)
2637 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2640 if (!lapic_in_kernel(vcpu
))
2643 if (kvm_lapic_reg_read(apic
, reg
, 4, &low
))
2645 if (reg
== APIC_ICR
)
2646 kvm_lapic_reg_read(apic
, APIC_ICR2
, 4, &high
);
2648 *data
= (((u64
)high
) << 32) | low
;
2653 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu
*vcpu
, u64 data
, unsigned long len
)
2655 u64 addr
= data
& ~KVM_MSR_ENABLED
;
2656 struct gfn_to_hva_cache
*ghc
= &vcpu
->arch
.pv_eoi
.data
;
2657 unsigned long new_len
;
2659 if (!IS_ALIGNED(addr
, 4))
2662 vcpu
->arch
.pv_eoi
.msr_val
= data
;
2663 if (!pv_eoi_enabled(vcpu
))
2666 if (addr
== ghc
->gpa
&& len
<= ghc
->len
)
2671 return kvm_gfn_to_hva_cache_init(vcpu
->kvm
, ghc
, addr
, new_len
);
2674 void kvm_apic_accept_events(struct kvm_vcpu
*vcpu
)
2676 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2680 if (!lapic_in_kernel(vcpu
) || !apic
->pending_events
)
2684 * INITs are latched while in SMM. Because an SMM CPU cannot
2685 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2686 * and delay processing of INIT until the next RSM.
2689 WARN_ON_ONCE(vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
);
2690 if (test_bit(KVM_APIC_SIPI
, &apic
->pending_events
))
2691 clear_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
2695 pe
= xchg(&apic
->pending_events
, 0);
2696 if (test_bit(KVM_APIC_INIT
, &pe
)) {
2697 kvm_vcpu_reset(vcpu
, true);
2698 if (kvm_vcpu_is_bsp(apic
->vcpu
))
2699 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
2701 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
2703 if (test_bit(KVM_APIC_SIPI
, &pe
) &&
2704 vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
) {
2705 /* evaluate pending_events before reading the vector */
2707 sipi_vector
= apic
->sipi_vector
;
2708 apic_debug("vcpu %d received sipi with vector # %x\n",
2709 vcpu
->vcpu_id
, sipi_vector
);
2710 kvm_vcpu_deliver_sipi_vector(vcpu
, sipi_vector
);
2711 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
2715 void kvm_lapic_init(void)
2717 /* do not patch jump label more than once per second */
2718 jump_label_rate_limit(&apic_hw_disabled
, HZ
);
2719 jump_label_rate_limit(&apic_sw_disabled
, HZ
);
2722 void kvm_lapic_exit(void)
2724 static_key_deferred_flush(&apic_hw_disabled
);
2725 static_key_deferred_flush(&apic_sw_disabled
);