1 // SPDX-License-Identifier: GPL-2.0-only
4 * Local APIC virtualization
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
48 #define mod_64(x, y) ((x) % (y))
56 /* 14 is the version for Xeon and Pentium 8.4.8*/
57 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
58 #define LAPIC_MMIO_LENGTH (1 << 12)
59 /* followed define is not in apicdef.h */
60 #define MAX_APIC_VECTOR 256
61 #define APIC_VECTORS_PER_REG 32
63 static bool lapic_timer_advance_dynamic __read_mostly
;
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
66 #define LAPIC_TIMER_ADVANCE_NS_INIT 1000
67 #define LAPIC_TIMER_ADVANCE_NS_MAX 5000
68 /* step-by-step approximation to mitigate fluctuation */
69 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
71 static inline int apic_test_vector(int vec
, void *bitmap
)
73 return test_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
76 bool kvm_apic_pending_eoi(struct kvm_vcpu
*vcpu
, int vector
)
78 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
80 return apic_test_vector(vector
, apic
->regs
+ APIC_ISR
) ||
81 apic_test_vector(vector
, apic
->regs
+ APIC_IRR
);
84 static inline int __apic_test_and_set_vector(int vec
, void *bitmap
)
86 return __test_and_set_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
89 static inline int __apic_test_and_clear_vector(int vec
, void *bitmap
)
91 return __test_and_clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
94 __read_mostly
DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled
, HZ
);
95 __read_mostly
DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled
, HZ
);
97 static inline int apic_enabled(struct kvm_lapic
*apic
)
99 return kvm_apic_sw_enabled(apic
) && kvm_apic_hw_enabled(apic
);
103 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
106 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
107 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
109 static inline u32
kvm_x2apic_id(struct kvm_lapic
*apic
)
111 return apic
->vcpu
->vcpu_id
;
114 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu
*vcpu
)
116 return pi_inject_timer
&& kvm_vcpu_apicv_active(vcpu
);
119 bool kvm_can_use_hv_timer(struct kvm_vcpu
*vcpu
)
121 return kvm_x86_ops
.set_hv_timer
122 && !(kvm_mwait_in_guest(vcpu
->kvm
) ||
123 kvm_can_post_timer_interrupt(vcpu
));
125 EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer
);
127 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu
*vcpu
)
129 return kvm_can_post_timer_interrupt(vcpu
) && vcpu
->mode
== IN_GUEST_MODE
;
132 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map
*map
,
133 u32 dest_id
, struct kvm_lapic
***cluster
, u16
*mask
) {
135 case KVM_APIC_MODE_X2APIC
: {
136 u32 offset
= (dest_id
>> 16) * 16;
137 u32 max_apic_id
= map
->max_apic_id
;
139 if (offset
<= max_apic_id
) {
140 u8 cluster_size
= min(max_apic_id
- offset
+ 1, 16U);
142 offset
= array_index_nospec(offset
, map
->max_apic_id
+ 1);
143 *cluster
= &map
->phys_map
[offset
];
144 *mask
= dest_id
& (0xffff >> (16 - cluster_size
));
151 case KVM_APIC_MODE_XAPIC_FLAT
:
152 *cluster
= map
->xapic_flat_map
;
153 *mask
= dest_id
& 0xff;
155 case KVM_APIC_MODE_XAPIC_CLUSTER
:
156 *cluster
= map
->xapic_cluster_map
[(dest_id
>> 4) & 0xf];
157 *mask
= dest_id
& 0xf;
165 static void kvm_apic_map_free(struct rcu_head
*rcu
)
167 struct kvm_apic_map
*map
= container_of(rcu
, struct kvm_apic_map
, rcu
);
173 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
175 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
176 * apic_map_lock_held.
184 void kvm_recalculate_apic_map(struct kvm
*kvm
)
186 struct kvm_apic_map
*new, *old
= NULL
;
187 struct kvm_vcpu
*vcpu
;
189 u32 max_id
= 255; /* enough space for any xAPIC ID */
191 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
192 if (atomic_read_acquire(&kvm
->arch
.apic_map_dirty
) == CLEAN
)
195 mutex_lock(&kvm
->arch
.apic_map_lock
);
197 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
198 * (if clean) or the APIC registers (if dirty).
200 if (atomic_cmpxchg_acquire(&kvm
->arch
.apic_map_dirty
,
201 DIRTY
, UPDATE_IN_PROGRESS
) == CLEAN
) {
202 /* Someone else has updated the map. */
203 mutex_unlock(&kvm
->arch
.apic_map_lock
);
207 kvm_for_each_vcpu(i
, vcpu
, kvm
)
208 if (kvm_apic_present(vcpu
))
209 max_id
= max(max_id
, kvm_x2apic_id(vcpu
->arch
.apic
));
211 new = kvzalloc(sizeof(struct kvm_apic_map
) +
212 sizeof(struct kvm_lapic
*) * ((u64
)max_id
+ 1),
218 new->max_apic_id
= max_id
;
220 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
221 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
222 struct kvm_lapic
**cluster
;
228 if (!kvm_apic_present(vcpu
))
231 xapic_id
= kvm_xapic_id(apic
);
232 x2apic_id
= kvm_x2apic_id(apic
);
234 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
235 if ((apic_x2apic_mode(apic
) || x2apic_id
> 0xff) &&
236 x2apic_id
<= new->max_apic_id
)
237 new->phys_map
[x2apic_id
] = apic
;
239 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
240 * prevent them from masking VCPUs with APIC ID <= 0xff.
242 if (!apic_x2apic_mode(apic
) && !new->phys_map
[xapic_id
])
243 new->phys_map
[xapic_id
] = apic
;
245 if (!kvm_apic_sw_enabled(apic
))
248 ldr
= kvm_lapic_get_reg(apic
, APIC_LDR
);
250 if (apic_x2apic_mode(apic
)) {
251 new->mode
|= KVM_APIC_MODE_X2APIC
;
253 ldr
= GET_APIC_LOGICAL_ID(ldr
);
254 if (kvm_lapic_get_reg(apic
, APIC_DFR
) == APIC_DFR_FLAT
)
255 new->mode
|= KVM_APIC_MODE_XAPIC_FLAT
;
257 new->mode
|= KVM_APIC_MODE_XAPIC_CLUSTER
;
260 if (!kvm_apic_map_get_logical_dest(new, ldr
, &cluster
, &mask
))
264 cluster
[ffs(mask
) - 1] = apic
;
267 old
= rcu_dereference_protected(kvm
->arch
.apic_map
,
268 lockdep_is_held(&kvm
->arch
.apic_map_lock
));
269 rcu_assign_pointer(kvm
->arch
.apic_map
, new);
271 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
272 * If another update has come in, leave it DIRTY.
274 atomic_cmpxchg_release(&kvm
->arch
.apic_map_dirty
,
275 UPDATE_IN_PROGRESS
, CLEAN
);
276 mutex_unlock(&kvm
->arch
.apic_map_lock
);
279 call_rcu(&old
->rcu
, kvm_apic_map_free
);
281 kvm_make_scan_ioapic_request(kvm
);
284 static inline void apic_set_spiv(struct kvm_lapic
*apic
, u32 val
)
286 bool enabled
= val
& APIC_SPIV_APIC_ENABLED
;
288 kvm_lapic_set_reg(apic
, APIC_SPIV
, val
);
290 if (enabled
!= apic
->sw_enabled
) {
291 apic
->sw_enabled
= enabled
;
293 static_branch_slow_dec_deferred(&apic_sw_disabled
);
295 static_branch_inc(&apic_sw_disabled
.key
);
297 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
300 /* Check if there are APF page ready requests pending */
302 kvm_make_request(KVM_REQ_APF_READY
, apic
->vcpu
);
305 static inline void kvm_apic_set_xapic_id(struct kvm_lapic
*apic
, u8 id
)
307 kvm_lapic_set_reg(apic
, APIC_ID
, id
<< 24);
308 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
311 static inline void kvm_apic_set_ldr(struct kvm_lapic
*apic
, u32 id
)
313 kvm_lapic_set_reg(apic
, APIC_LDR
, id
);
314 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
317 static inline void kvm_apic_set_dfr(struct kvm_lapic
*apic
, u32 val
)
319 kvm_lapic_set_reg(apic
, APIC_DFR
, val
);
320 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
323 static inline u32
kvm_apic_calc_x2apic_ldr(u32 id
)
325 return ((id
>> 4) << 16) | (1 << (id
& 0xf));
328 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic
*apic
, u32 id
)
330 u32 ldr
= kvm_apic_calc_x2apic_ldr(id
);
332 WARN_ON_ONCE(id
!= apic
->vcpu
->vcpu_id
);
334 kvm_lapic_set_reg(apic
, APIC_ID
, id
);
335 kvm_lapic_set_reg(apic
, APIC_LDR
, ldr
);
336 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
339 static inline int apic_lvt_enabled(struct kvm_lapic
*apic
, int lvt_type
)
341 return !(kvm_lapic_get_reg(apic
, lvt_type
) & APIC_LVT_MASKED
);
344 static inline int apic_lvtt_oneshot(struct kvm_lapic
*apic
)
346 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_ONESHOT
;
349 static inline int apic_lvtt_period(struct kvm_lapic
*apic
)
351 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_PERIODIC
;
354 static inline int apic_lvtt_tscdeadline(struct kvm_lapic
*apic
)
356 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_TSCDEADLINE
;
359 static inline int apic_lvt_nmi_mode(u32 lvt_val
)
361 return (lvt_val
& (APIC_MODE_MASK
| APIC_LVT_MASKED
)) == APIC_DM_NMI
;
364 void kvm_apic_set_version(struct kvm_vcpu
*vcpu
)
366 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
367 u32 v
= APIC_VERSION
;
369 if (!lapic_in_kernel(vcpu
))
373 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
374 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
375 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
376 * version first and level-triggered interrupts never get EOIed in
379 if (guest_cpuid_has(vcpu
, X86_FEATURE_X2APIC
) &&
380 !ioapic_in_kernel(vcpu
->kvm
))
381 v
|= APIC_LVR_DIRECTED_EOI
;
382 kvm_lapic_set_reg(apic
, APIC_LVR
, v
);
385 static const unsigned int apic_lvt_mask
[KVM_APIC_LVT_NUM
] = {
386 LVT_MASK
, /* part LVTT mask, timer mode mask added at runtime */
387 LVT_MASK
| APIC_MODE_MASK
, /* LVTTHMR */
388 LVT_MASK
| APIC_MODE_MASK
, /* LVTPC */
389 LINT_MASK
, LINT_MASK
, /* LVT0-1 */
390 LVT_MASK
/* LVTERR */
393 static int find_highest_vector(void *bitmap
)
398 for (vec
= MAX_APIC_VECTOR
- APIC_VECTORS_PER_REG
;
399 vec
>= 0; vec
-= APIC_VECTORS_PER_REG
) {
400 reg
= bitmap
+ REG_POS(vec
);
402 return __fls(*reg
) + vec
;
408 static u8
count_vectors(void *bitmap
)
414 for (vec
= 0; vec
< MAX_APIC_VECTOR
; vec
+= APIC_VECTORS_PER_REG
) {
415 reg
= bitmap
+ REG_POS(vec
);
416 count
+= hweight32(*reg
);
422 bool __kvm_apic_update_irr(u32
*pir
, void *regs
, int *max_irr
)
425 u32 pir_val
, irr_val
, prev_irr_val
;
428 max_updated_irr
= -1;
431 for (i
= vec
= 0; i
<= 7; i
++, vec
+= 32) {
432 pir_val
= READ_ONCE(pir
[i
]);
433 irr_val
= *((u32
*)(regs
+ APIC_IRR
+ i
* 0x10));
435 prev_irr_val
= irr_val
;
436 irr_val
|= xchg(&pir
[i
], 0);
437 *((u32
*)(regs
+ APIC_IRR
+ i
* 0x10)) = irr_val
;
438 if (prev_irr_val
!= irr_val
) {
440 __fls(irr_val
^ prev_irr_val
) + vec
;
444 *max_irr
= __fls(irr_val
) + vec
;
447 return ((max_updated_irr
!= -1) &&
448 (max_updated_irr
== *max_irr
));
450 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr
);
452 bool kvm_apic_update_irr(struct kvm_vcpu
*vcpu
, u32
*pir
, int *max_irr
)
454 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
456 return __kvm_apic_update_irr(pir
, apic
->regs
, max_irr
);
458 EXPORT_SYMBOL_GPL(kvm_apic_update_irr
);
460 static inline int apic_search_irr(struct kvm_lapic
*apic
)
462 return find_highest_vector(apic
->regs
+ APIC_IRR
);
465 static inline int apic_find_highest_irr(struct kvm_lapic
*apic
)
470 * Note that irr_pending is just a hint. It will be always
471 * true with virtual interrupt delivery enabled.
473 if (!apic
->irr_pending
)
476 result
= apic_search_irr(apic
);
477 ASSERT(result
== -1 || result
>= 16);
482 static inline void apic_clear_irr(int vec
, struct kvm_lapic
*apic
)
484 struct kvm_vcpu
*vcpu
;
488 if (unlikely(vcpu
->arch
.apicv_active
)) {
489 /* need to update RVI */
490 kvm_lapic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
491 static_call(kvm_x86_hwapic_irr_update
)(vcpu
,
492 apic_find_highest_irr(apic
));
494 apic
->irr_pending
= false;
495 kvm_lapic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
496 if (apic_search_irr(apic
) != -1)
497 apic
->irr_pending
= true;
501 void kvm_apic_clear_irr(struct kvm_vcpu
*vcpu
, int vec
)
503 apic_clear_irr(vec
, vcpu
->arch
.apic
);
505 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr
);
507 static inline void apic_set_isr(int vec
, struct kvm_lapic
*apic
)
509 struct kvm_vcpu
*vcpu
;
511 if (__apic_test_and_set_vector(vec
, apic
->regs
+ APIC_ISR
))
517 * With APIC virtualization enabled, all caching is disabled
518 * because the processor can modify ISR under the hood. Instead
521 if (unlikely(vcpu
->arch
.apicv_active
))
522 static_call(kvm_x86_hwapic_isr_update
)(vcpu
, vec
);
525 BUG_ON(apic
->isr_count
> MAX_APIC_VECTOR
);
527 * ISR (in service register) bit is set when injecting an interrupt.
528 * The highest vector is injected. Thus the latest bit set matches
529 * the highest bit in ISR.
531 apic
->highest_isr_cache
= vec
;
535 static inline int apic_find_highest_isr(struct kvm_lapic
*apic
)
540 * Note that isr_count is always 1, and highest_isr_cache
541 * is always -1, with APIC virtualization enabled.
543 if (!apic
->isr_count
)
545 if (likely(apic
->highest_isr_cache
!= -1))
546 return apic
->highest_isr_cache
;
548 result
= find_highest_vector(apic
->regs
+ APIC_ISR
);
549 ASSERT(result
== -1 || result
>= 16);
554 static inline void apic_clear_isr(int vec
, struct kvm_lapic
*apic
)
556 struct kvm_vcpu
*vcpu
;
557 if (!__apic_test_and_clear_vector(vec
, apic
->regs
+ APIC_ISR
))
563 * We do get here for APIC virtualization enabled if the guest
564 * uses the Hyper-V APIC enlightenment. In this case we may need
565 * to trigger a new interrupt delivery by writing the SVI field;
566 * on the other hand isr_count and highest_isr_cache are unused
567 * and must be left alone.
569 if (unlikely(vcpu
->arch
.apicv_active
))
570 static_call(kvm_x86_hwapic_isr_update
)(vcpu
,
571 apic_find_highest_isr(apic
));
574 BUG_ON(apic
->isr_count
< 0);
575 apic
->highest_isr_cache
= -1;
579 int kvm_lapic_find_highest_irr(struct kvm_vcpu
*vcpu
)
581 /* This may race with setting of irr in __apic_accept_irq() and
582 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
583 * will cause vmexit immediately and the value will be recalculated
584 * on the next vmentry.
586 return apic_find_highest_irr(vcpu
->arch
.apic
);
588 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr
);
590 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
591 int vector
, int level
, int trig_mode
,
592 struct dest_map
*dest_map
);
594 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
,
595 struct dest_map
*dest_map
)
597 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
599 return __apic_accept_irq(apic
, irq
->delivery_mode
, irq
->vector
,
600 irq
->level
, irq
->trig_mode
, dest_map
);
603 static int __pv_send_ipi(unsigned long *ipi_bitmap
, struct kvm_apic_map
*map
,
604 struct kvm_lapic_irq
*irq
, u32 min
)
607 struct kvm_vcpu
*vcpu
;
609 if (min
> map
->max_apic_id
)
612 for_each_set_bit(i
, ipi_bitmap
,
613 min((u32
)BITS_PER_LONG
, (map
->max_apic_id
- min
+ 1))) {
614 if (map
->phys_map
[min
+ i
]) {
615 vcpu
= map
->phys_map
[min
+ i
]->vcpu
;
616 count
+= kvm_apic_set_irq(vcpu
, irq
, NULL
);
623 int kvm_pv_send_ipi(struct kvm
*kvm
, unsigned long ipi_bitmap_low
,
624 unsigned long ipi_bitmap_high
, u32 min
,
625 unsigned long icr
, int op_64_bit
)
627 struct kvm_apic_map
*map
;
628 struct kvm_lapic_irq irq
= {0};
629 int cluster_size
= op_64_bit
? 64 : 32;
632 if (icr
& (APIC_DEST_MASK
| APIC_SHORT_MASK
))
635 irq
.vector
= icr
& APIC_VECTOR_MASK
;
636 irq
.delivery_mode
= icr
& APIC_MODE_MASK
;
637 irq
.level
= (icr
& APIC_INT_ASSERT
) != 0;
638 irq
.trig_mode
= icr
& APIC_INT_LEVELTRIG
;
641 map
= rcu_dereference(kvm
->arch
.apic_map
);
645 count
= __pv_send_ipi(&ipi_bitmap_low
, map
, &irq
, min
);
647 count
+= __pv_send_ipi(&ipi_bitmap_high
, map
, &irq
, min
);
654 static int pv_eoi_put_user(struct kvm_vcpu
*vcpu
, u8 val
)
657 return kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, &val
,
661 static int pv_eoi_get_user(struct kvm_vcpu
*vcpu
, u8
*val
)
664 return kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, val
,
668 static inline bool pv_eoi_enabled(struct kvm_vcpu
*vcpu
)
670 return vcpu
->arch
.pv_eoi
.msr_val
& KVM_MSR_ENABLED
;
673 static bool pv_eoi_get_pending(struct kvm_vcpu
*vcpu
)
676 if (pv_eoi_get_user(vcpu
, &val
) < 0) {
677 printk(KERN_WARNING
"Can't read EOI MSR value: 0x%llx\n",
678 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
681 return val
& KVM_PV_EOI_ENABLED
;
684 static void pv_eoi_set_pending(struct kvm_vcpu
*vcpu
)
686 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_ENABLED
) < 0) {
687 printk(KERN_WARNING
"Can't set EOI MSR value: 0x%llx\n",
688 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
691 __set_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
694 static void pv_eoi_clr_pending(struct kvm_vcpu
*vcpu
)
696 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_DISABLED
) < 0) {
697 printk(KERN_WARNING
"Can't clear EOI MSR value: 0x%llx\n",
698 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
701 __clear_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
704 static int apic_has_interrupt_for_ppr(struct kvm_lapic
*apic
, u32 ppr
)
707 if (apic
->vcpu
->arch
.apicv_active
)
708 highest_irr
= static_call(kvm_x86_sync_pir_to_irr
)(apic
->vcpu
);
710 highest_irr
= apic_find_highest_irr(apic
);
711 if (highest_irr
== -1 || (highest_irr
& 0xF0) <= ppr
)
716 static bool __apic_update_ppr(struct kvm_lapic
*apic
, u32
*new_ppr
)
718 u32 tpr
, isrv
, ppr
, old_ppr
;
721 old_ppr
= kvm_lapic_get_reg(apic
, APIC_PROCPRI
);
722 tpr
= kvm_lapic_get_reg(apic
, APIC_TASKPRI
);
723 isr
= apic_find_highest_isr(apic
);
724 isrv
= (isr
!= -1) ? isr
: 0;
726 if ((tpr
& 0xf0) >= (isrv
& 0xf0))
733 kvm_lapic_set_reg(apic
, APIC_PROCPRI
, ppr
);
735 return ppr
< old_ppr
;
738 static void apic_update_ppr(struct kvm_lapic
*apic
)
742 if (__apic_update_ppr(apic
, &ppr
) &&
743 apic_has_interrupt_for_ppr(apic
, ppr
) != -1)
744 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
747 void kvm_apic_update_ppr(struct kvm_vcpu
*vcpu
)
749 apic_update_ppr(vcpu
->arch
.apic
);
751 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr
);
753 static void apic_set_tpr(struct kvm_lapic
*apic
, u32 tpr
)
755 kvm_lapic_set_reg(apic
, APIC_TASKPRI
, tpr
);
756 apic_update_ppr(apic
);
759 static bool kvm_apic_broadcast(struct kvm_lapic
*apic
, u32 mda
)
761 return mda
== (apic_x2apic_mode(apic
) ?
762 X2APIC_BROADCAST
: APIC_BROADCAST
);
765 static bool kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u32 mda
)
767 if (kvm_apic_broadcast(apic
, mda
))
770 if (apic_x2apic_mode(apic
))
771 return mda
== kvm_x2apic_id(apic
);
774 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
775 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
776 * this allows unique addressing of VCPUs with APIC ID over 0xff.
777 * The 0xff condition is needed because writeable xAPIC ID.
779 if (kvm_x2apic_id(apic
) > 0xff && mda
== kvm_x2apic_id(apic
))
782 return mda
== kvm_xapic_id(apic
);
785 static bool kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u32 mda
)
789 if (kvm_apic_broadcast(apic
, mda
))
792 logical_id
= kvm_lapic_get_reg(apic
, APIC_LDR
);
794 if (apic_x2apic_mode(apic
))
795 return ((logical_id
>> 16) == (mda
>> 16))
796 && (logical_id
& mda
& 0xffff) != 0;
798 logical_id
= GET_APIC_LOGICAL_ID(logical_id
);
800 switch (kvm_lapic_get_reg(apic
, APIC_DFR
)) {
802 return (logical_id
& mda
) != 0;
803 case APIC_DFR_CLUSTER
:
804 return ((logical_id
>> 4) == (mda
>> 4))
805 && (logical_id
& mda
& 0xf) != 0;
811 /* The KVM local APIC implementation has two quirks:
813 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
814 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
815 * KVM doesn't do that aliasing.
817 * - in-kernel IOAPIC messages have to be delivered directly to
818 * x2APIC, because the kernel does not support interrupt remapping.
819 * In order to support broadcast without interrupt remapping, x2APIC
820 * rewrites the destination of non-IPI messages from APIC_BROADCAST
821 * to X2APIC_BROADCAST.
823 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
824 * important when userspace wants to use x2APIC-format MSIs, because
825 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
827 static u32
kvm_apic_mda(struct kvm_vcpu
*vcpu
, unsigned int dest_id
,
828 struct kvm_lapic
*source
, struct kvm_lapic
*target
)
830 bool ipi
= source
!= NULL
;
832 if (!vcpu
->kvm
->arch
.x2apic_broadcast_quirk_disabled
&&
833 !ipi
&& dest_id
== APIC_BROADCAST
&& apic_x2apic_mode(target
))
834 return X2APIC_BROADCAST
;
839 bool kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
840 int shorthand
, unsigned int dest
, int dest_mode
)
842 struct kvm_lapic
*target
= vcpu
->arch
.apic
;
843 u32 mda
= kvm_apic_mda(vcpu
, dest
, source
, target
);
847 case APIC_DEST_NOSHORT
:
848 if (dest_mode
== APIC_DEST_PHYSICAL
)
849 return kvm_apic_match_physical_addr(target
, mda
);
851 return kvm_apic_match_logical_addr(target
, mda
);
853 return target
== source
;
854 case APIC_DEST_ALLINC
:
856 case APIC_DEST_ALLBUT
:
857 return target
!= source
;
862 EXPORT_SYMBOL_GPL(kvm_apic_match_dest
);
864 int kvm_vector_to_index(u32 vector
, u32 dest_vcpus
,
865 const unsigned long *bitmap
, u32 bitmap_size
)
870 mod
= vector
% dest_vcpus
;
872 for (i
= 0; i
<= mod
; i
++) {
873 idx
= find_next_bit(bitmap
, bitmap_size
, idx
+ 1);
874 BUG_ON(idx
== bitmap_size
);
880 static void kvm_apic_disabled_lapic_found(struct kvm
*kvm
)
882 if (!kvm
->arch
.disabled_lapic_found
) {
883 kvm
->arch
.disabled_lapic_found
= true;
885 "Disabled LAPIC found during irq injection\n");
889 static bool kvm_apic_is_broadcast_dest(struct kvm
*kvm
, struct kvm_lapic
**src
,
890 struct kvm_lapic_irq
*irq
, struct kvm_apic_map
*map
)
892 if (kvm
->arch
.x2apic_broadcast_quirk_disabled
) {
893 if ((irq
->dest_id
== APIC_BROADCAST
&&
894 map
->mode
!= KVM_APIC_MODE_X2APIC
))
896 if (irq
->dest_id
== X2APIC_BROADCAST
)
899 bool x2apic_ipi
= src
&& *src
&& apic_x2apic_mode(*src
);
900 if (irq
->dest_id
== (x2apic_ipi
?
901 X2APIC_BROADCAST
: APIC_BROADCAST
))
908 /* Return true if the interrupt can be handled by using *bitmap as index mask
909 * for valid destinations in *dst array.
910 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
911 * Note: we may have zero kvm_lapic destinations when we return true, which
912 * means that the interrupt should be dropped. In this case, *bitmap would be
913 * zero and *dst undefined.
915 static inline bool kvm_apic_map_get_dest_lapic(struct kvm
*kvm
,
916 struct kvm_lapic
**src
, struct kvm_lapic_irq
*irq
,
917 struct kvm_apic_map
*map
, struct kvm_lapic
***dst
,
918 unsigned long *bitmap
)
922 if (irq
->shorthand
== APIC_DEST_SELF
&& src
) {
926 } else if (irq
->shorthand
)
929 if (!map
|| kvm_apic_is_broadcast_dest(kvm
, src
, irq
, map
))
932 if (irq
->dest_mode
== APIC_DEST_PHYSICAL
) {
933 if (irq
->dest_id
> map
->max_apic_id
) {
936 u32 dest_id
= array_index_nospec(irq
->dest_id
, map
->max_apic_id
+ 1);
937 *dst
= &map
->phys_map
[dest_id
];
944 if (!kvm_apic_map_get_logical_dest(map
, irq
->dest_id
, dst
,
948 if (!kvm_lowest_prio_delivery(irq
))
951 if (!kvm_vector_hashing_enabled()) {
953 for_each_set_bit(i
, bitmap
, 16) {
958 else if (kvm_apic_compare_prio((*dst
)[i
]->vcpu
,
959 (*dst
)[lowest
]->vcpu
) < 0)
966 lowest
= kvm_vector_to_index(irq
->vector
, hweight16(*bitmap
),
969 if (!(*dst
)[lowest
]) {
970 kvm_apic_disabled_lapic_found(kvm
);
976 *bitmap
= (lowest
>= 0) ? 1 << lowest
: 0;
981 bool kvm_irq_delivery_to_apic_fast(struct kvm
*kvm
, struct kvm_lapic
*src
,
982 struct kvm_lapic_irq
*irq
, int *r
, struct dest_map
*dest_map
)
984 struct kvm_apic_map
*map
;
985 unsigned long bitmap
;
986 struct kvm_lapic
**dst
= NULL
;
992 if (irq
->shorthand
== APIC_DEST_SELF
) {
993 *r
= kvm_apic_set_irq(src
->vcpu
, irq
, dest_map
);
998 map
= rcu_dereference(kvm
->arch
.apic_map
);
1000 ret
= kvm_apic_map_get_dest_lapic(kvm
, &src
, irq
, map
, &dst
, &bitmap
);
1003 for_each_set_bit(i
, &bitmap
, 16) {
1006 *r
+= kvm_apic_set_irq(dst
[i
]->vcpu
, irq
, dest_map
);
1015 * This routine tries to handle interrupts in posted mode, here is how
1016 * it deals with different cases:
1017 * - For single-destination interrupts, handle it in posted mode
1018 * - Else if vector hashing is enabled and it is a lowest-priority
1019 * interrupt, handle it in posted mode and use the following mechanism
1020 * to find the destination vCPU.
1021 * 1. For lowest-priority interrupts, store all the possible
1022 * destination vCPUs in an array.
1023 * 2. Use "guest vector % max number of destination vCPUs" to find
1024 * the right destination vCPU in the array for the lowest-priority
1026 * - Otherwise, use remapped mode to inject the interrupt.
1028 bool kvm_intr_is_single_vcpu_fast(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
1029 struct kvm_vcpu
**dest_vcpu
)
1031 struct kvm_apic_map
*map
;
1032 unsigned long bitmap
;
1033 struct kvm_lapic
**dst
= NULL
;
1040 map
= rcu_dereference(kvm
->arch
.apic_map
);
1042 if (kvm_apic_map_get_dest_lapic(kvm
, NULL
, irq
, map
, &dst
, &bitmap
) &&
1043 hweight16(bitmap
) == 1) {
1044 unsigned long i
= find_first_bit(&bitmap
, 16);
1047 *dest_vcpu
= dst
[i
]->vcpu
;
1057 * Add a pending IRQ into lapic.
1058 * Return 1 if successfully added and 0 if discarded.
1060 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
1061 int vector
, int level
, int trig_mode
,
1062 struct dest_map
*dest_map
)
1065 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1067 trace_kvm_apic_accept_irq(vcpu
->vcpu_id
, delivery_mode
,
1069 switch (delivery_mode
) {
1070 case APIC_DM_LOWEST
:
1071 vcpu
->arch
.apic_arb_prio
++;
1074 if (unlikely(trig_mode
&& !level
))
1077 /* FIXME add logic for vcpu on reset */
1078 if (unlikely(!apic_enabled(apic
)))
1084 __set_bit(vcpu
->vcpu_id
, dest_map
->map
);
1085 dest_map
->vectors
[vcpu
->vcpu_id
] = vector
;
1088 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
) != !!trig_mode
) {
1090 kvm_lapic_set_vector(vector
,
1091 apic
->regs
+ APIC_TMR
);
1093 kvm_lapic_clear_vector(vector
,
1094 apic
->regs
+ APIC_TMR
);
1097 if (static_call(kvm_x86_deliver_posted_interrupt
)(vcpu
, vector
)) {
1098 kvm_lapic_set_irr(vector
, apic
);
1099 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1100 kvm_vcpu_kick(vcpu
);
1106 vcpu
->arch
.pv
.pv_unhalted
= 1;
1107 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1108 kvm_vcpu_kick(vcpu
);
1113 kvm_make_request(KVM_REQ_SMI
, vcpu
);
1114 kvm_vcpu_kick(vcpu
);
1119 kvm_inject_nmi(vcpu
);
1120 kvm_vcpu_kick(vcpu
);
1124 if (!trig_mode
|| level
) {
1126 /* assumes that there are only KVM_APIC_INIT/SIPI */
1127 apic
->pending_events
= (1UL << KVM_APIC_INIT
);
1128 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1129 kvm_vcpu_kick(vcpu
);
1133 case APIC_DM_STARTUP
:
1135 apic
->sipi_vector
= vector
;
1136 /* make sure sipi_vector is visible for the receiver */
1138 set_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
1139 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1140 kvm_vcpu_kick(vcpu
);
1143 case APIC_DM_EXTINT
:
1145 * Should only be called by kvm_apic_local_deliver() with LVT0,
1146 * before NMI watchdog was enabled. Already handled by
1147 * kvm_apic_accept_pic_intr().
1152 printk(KERN_ERR
"TODO: unsupported delivery mode %x\n",
1160 * This routine identifies the destination vcpus mask meant to receive the
1161 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1162 * out the destination vcpus array and set the bitmap or it traverses to
1163 * each available vcpu to identify the same.
1165 void kvm_bitmap_or_dest_vcpus(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
1166 unsigned long *vcpu_bitmap
)
1168 struct kvm_lapic
**dest_vcpu
= NULL
;
1169 struct kvm_lapic
*src
= NULL
;
1170 struct kvm_apic_map
*map
;
1171 struct kvm_vcpu
*vcpu
;
1172 unsigned long bitmap
;
1177 map
= rcu_dereference(kvm
->arch
.apic_map
);
1179 ret
= kvm_apic_map_get_dest_lapic(kvm
, &src
, irq
, map
, &dest_vcpu
,
1182 for_each_set_bit(i
, &bitmap
, 16) {
1185 vcpu_idx
= dest_vcpu
[i
]->vcpu
->vcpu_idx
;
1186 __set_bit(vcpu_idx
, vcpu_bitmap
);
1189 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1190 if (!kvm_apic_present(vcpu
))
1192 if (!kvm_apic_match_dest(vcpu
, NULL
,
1197 __set_bit(i
, vcpu_bitmap
);
1203 int kvm_apic_compare_prio(struct kvm_vcpu
*vcpu1
, struct kvm_vcpu
*vcpu2
)
1205 return vcpu1
->arch
.apic_arb_prio
- vcpu2
->arch
.apic_arb_prio
;
1208 static bool kvm_ioapic_handles_vector(struct kvm_lapic
*apic
, int vector
)
1210 return test_bit(vector
, apic
->vcpu
->arch
.ioapic_handled_vectors
);
1213 static void kvm_ioapic_send_eoi(struct kvm_lapic
*apic
, int vector
)
1217 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1218 if (!kvm_ioapic_handles_vector(apic
, vector
))
1221 /* Request a KVM exit to inform the userspace IOAPIC. */
1222 if (irqchip_split(apic
->vcpu
->kvm
)) {
1223 apic
->vcpu
->arch
.pending_ioapic_eoi
= vector
;
1224 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT
, apic
->vcpu
);
1228 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
))
1229 trigger_mode
= IOAPIC_LEVEL_TRIG
;
1231 trigger_mode
= IOAPIC_EDGE_TRIG
;
1233 kvm_ioapic_update_eoi(apic
->vcpu
, vector
, trigger_mode
);
1236 static int apic_set_eoi(struct kvm_lapic
*apic
)
1238 int vector
= apic_find_highest_isr(apic
);
1240 trace_kvm_eoi(apic
, vector
);
1243 * Not every write EOI will has corresponding ISR,
1244 * one example is when Kernel check timer on setup_IO_APIC
1249 apic_clear_isr(vector
, apic
);
1250 apic_update_ppr(apic
);
1252 if (to_hv_vcpu(apic
->vcpu
) &&
1253 test_bit(vector
, to_hv_synic(apic
->vcpu
)->vec_bitmap
))
1254 kvm_hv_synic_send_eoi(apic
->vcpu
, vector
);
1256 kvm_ioapic_send_eoi(apic
, vector
);
1257 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
1262 * this interface assumes a trap-like exit, which has already finished
1263 * desired side effect including vISR and vPPR update.
1265 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu
*vcpu
, int vector
)
1267 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1269 trace_kvm_eoi(apic
, vector
);
1271 kvm_ioapic_send_eoi(apic
, vector
);
1272 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
1274 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated
);
1276 void kvm_apic_send_ipi(struct kvm_lapic
*apic
, u32 icr_low
, u32 icr_high
)
1278 struct kvm_lapic_irq irq
;
1280 irq
.vector
= icr_low
& APIC_VECTOR_MASK
;
1281 irq
.delivery_mode
= icr_low
& APIC_MODE_MASK
;
1282 irq
.dest_mode
= icr_low
& APIC_DEST_MASK
;
1283 irq
.level
= (icr_low
& APIC_INT_ASSERT
) != 0;
1284 irq
.trig_mode
= icr_low
& APIC_INT_LEVELTRIG
;
1285 irq
.shorthand
= icr_low
& APIC_SHORT_MASK
;
1286 irq
.msi_redir_hint
= false;
1287 if (apic_x2apic_mode(apic
))
1288 irq
.dest_id
= icr_high
;
1290 irq
.dest_id
= GET_APIC_DEST_FIELD(icr_high
);
1292 trace_kvm_apic_ipi(icr_low
, irq
.dest_id
);
1294 kvm_irq_delivery_to_apic(apic
->vcpu
->kvm
, apic
, &irq
, NULL
);
1297 static u32
apic_get_tmcct(struct kvm_lapic
*apic
)
1299 ktime_t remaining
, now
;
1303 ASSERT(apic
!= NULL
);
1305 /* if initial count is 0, current count should also be 0 */
1306 if (kvm_lapic_get_reg(apic
, APIC_TMICT
) == 0 ||
1307 apic
->lapic_timer
.period
== 0)
1311 remaining
= ktime_sub(apic
->lapic_timer
.target_expiration
, now
);
1312 if (ktime_to_ns(remaining
) < 0)
1315 ns
= mod_64(ktime_to_ns(remaining
), apic
->lapic_timer
.period
);
1316 tmcct
= div64_u64(ns
,
1317 (APIC_BUS_CYCLE_NS
* apic
->divide_count
));
1322 static void __report_tpr_access(struct kvm_lapic
*apic
, bool write
)
1324 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1325 struct kvm_run
*run
= vcpu
->run
;
1327 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS
, vcpu
);
1328 run
->tpr_access
.rip
= kvm_rip_read(vcpu
);
1329 run
->tpr_access
.is_write
= write
;
1332 static inline void report_tpr_access(struct kvm_lapic
*apic
, bool write
)
1334 if (apic
->vcpu
->arch
.tpr_access_reporting
)
1335 __report_tpr_access(apic
, write
);
1338 static u32
__apic_read(struct kvm_lapic
*apic
, unsigned int offset
)
1342 if (offset
>= LAPIC_MMIO_LENGTH
)
1349 case APIC_TMCCT
: /* Timer CCR */
1350 if (apic_lvtt_tscdeadline(apic
))
1353 val
= apic_get_tmcct(apic
);
1356 apic_update_ppr(apic
);
1357 val
= kvm_lapic_get_reg(apic
, offset
);
1360 report_tpr_access(apic
, false);
1363 val
= kvm_lapic_get_reg(apic
, offset
);
1370 static inline struct kvm_lapic
*to_lapic(struct kvm_io_device
*dev
)
1372 return container_of(dev
, struct kvm_lapic
, dev
);
1375 #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1376 #define APIC_REGS_MASK(first, count) \
1377 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1379 int kvm_lapic_reg_read(struct kvm_lapic
*apic
, u32 offset
, int len
,
1382 unsigned char alignment
= offset
& 0xf;
1384 /* this bitmask has a bit cleared for each reserved register */
1385 u64 valid_reg_mask
=
1386 APIC_REG_MASK(APIC_ID
) |
1387 APIC_REG_MASK(APIC_LVR
) |
1388 APIC_REG_MASK(APIC_TASKPRI
) |
1389 APIC_REG_MASK(APIC_PROCPRI
) |
1390 APIC_REG_MASK(APIC_LDR
) |
1391 APIC_REG_MASK(APIC_DFR
) |
1392 APIC_REG_MASK(APIC_SPIV
) |
1393 APIC_REGS_MASK(APIC_ISR
, APIC_ISR_NR
) |
1394 APIC_REGS_MASK(APIC_TMR
, APIC_ISR_NR
) |
1395 APIC_REGS_MASK(APIC_IRR
, APIC_ISR_NR
) |
1396 APIC_REG_MASK(APIC_ESR
) |
1397 APIC_REG_MASK(APIC_ICR
) |
1398 APIC_REG_MASK(APIC_ICR2
) |
1399 APIC_REG_MASK(APIC_LVTT
) |
1400 APIC_REG_MASK(APIC_LVTTHMR
) |
1401 APIC_REG_MASK(APIC_LVTPC
) |
1402 APIC_REG_MASK(APIC_LVT0
) |
1403 APIC_REG_MASK(APIC_LVT1
) |
1404 APIC_REG_MASK(APIC_LVTERR
) |
1405 APIC_REG_MASK(APIC_TMICT
) |
1406 APIC_REG_MASK(APIC_TMCCT
) |
1407 APIC_REG_MASK(APIC_TDCR
);
1409 /* ARBPRI is not valid on x2APIC */
1410 if (!apic_x2apic_mode(apic
))
1411 valid_reg_mask
|= APIC_REG_MASK(APIC_ARBPRI
);
1413 if (alignment
+ len
> 4)
1416 if (offset
> 0x3f0 || !(valid_reg_mask
& APIC_REG_MASK(offset
)))
1419 result
= __apic_read(apic
, offset
& ~0xf);
1421 trace_kvm_apic_read(offset
, result
);
1427 memcpy(data
, (char *)&result
+ alignment
, len
);
1430 printk(KERN_ERR
"Local APIC read with len = %x, "
1431 "should be 1,2, or 4 instead\n", len
);
1436 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read
);
1438 static int apic_mmio_in_range(struct kvm_lapic
*apic
, gpa_t addr
)
1440 return addr
>= apic
->base_address
&&
1441 addr
< apic
->base_address
+ LAPIC_MMIO_LENGTH
;
1444 static int apic_mmio_read(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this,
1445 gpa_t address
, int len
, void *data
)
1447 struct kvm_lapic
*apic
= to_lapic(this);
1448 u32 offset
= address
- apic
->base_address
;
1450 if (!apic_mmio_in_range(apic
, address
))
1453 if (!kvm_apic_hw_enabled(apic
) || apic_x2apic_mode(apic
)) {
1454 if (!kvm_check_has_quirk(vcpu
->kvm
,
1455 KVM_X86_QUIRK_LAPIC_MMIO_HOLE
))
1458 memset(data
, 0xff, len
);
1462 kvm_lapic_reg_read(apic
, offset
, len
, data
);
1467 static void update_divide_count(struct kvm_lapic
*apic
)
1469 u32 tmp1
, tmp2
, tdcr
;
1471 tdcr
= kvm_lapic_get_reg(apic
, APIC_TDCR
);
1473 tmp2
= ((tmp1
& 0x3) | ((tmp1
& 0x8) >> 1)) + 1;
1474 apic
->divide_count
= 0x1 << (tmp2
& 0x7);
1477 static void limit_periodic_timer_frequency(struct kvm_lapic
*apic
)
1480 * Do not allow the guest to program periodic timers with small
1481 * interval, since the hrtimers are not throttled by the host
1484 if (apic_lvtt_period(apic
) && apic
->lapic_timer
.period
) {
1485 s64 min_period
= min_timer_period_us
* 1000LL;
1487 if (apic
->lapic_timer
.period
< min_period
) {
1488 pr_info_ratelimited(
1489 "kvm: vcpu %i: requested %lld ns "
1490 "lapic timer period limited to %lld ns\n",
1491 apic
->vcpu
->vcpu_id
,
1492 apic
->lapic_timer
.period
, min_period
);
1493 apic
->lapic_timer
.period
= min_period
;
1498 static void cancel_hv_timer(struct kvm_lapic
*apic
);
1500 static void cancel_apic_timer(struct kvm_lapic
*apic
)
1502 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1504 if (apic
->lapic_timer
.hv_timer_in_use
)
1505 cancel_hv_timer(apic
);
1509 static void apic_update_lvtt(struct kvm_lapic
*apic
)
1511 u32 timer_mode
= kvm_lapic_get_reg(apic
, APIC_LVTT
) &
1512 apic
->lapic_timer
.timer_mode_mask
;
1514 if (apic
->lapic_timer
.timer_mode
!= timer_mode
) {
1515 if (apic_lvtt_tscdeadline(apic
) != (timer_mode
==
1516 APIC_LVT_TIMER_TSCDEADLINE
)) {
1517 cancel_apic_timer(apic
);
1518 kvm_lapic_set_reg(apic
, APIC_TMICT
, 0);
1519 apic
->lapic_timer
.period
= 0;
1520 apic
->lapic_timer
.tscdeadline
= 0;
1522 apic
->lapic_timer
.timer_mode
= timer_mode
;
1523 limit_periodic_timer_frequency(apic
);
1528 * On APICv, this test will cause a busy wait
1529 * during a higher-priority task.
1532 static bool lapic_timer_int_injected(struct kvm_vcpu
*vcpu
)
1534 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1535 u32 reg
= kvm_lapic_get_reg(apic
, APIC_LVTT
);
1537 if (kvm_apic_hw_enabled(apic
)) {
1538 int vec
= reg
& APIC_VECTOR_MASK
;
1539 void *bitmap
= apic
->regs
+ APIC_ISR
;
1541 if (vcpu
->arch
.apicv_active
)
1542 bitmap
= apic
->regs
+ APIC_IRR
;
1544 if (apic_test_vector(vec
, bitmap
))
1550 static inline void __wait_lapic_expire(struct kvm_vcpu
*vcpu
, u64 guest_cycles
)
1552 u64 timer_advance_ns
= vcpu
->arch
.apic
->lapic_timer
.timer_advance_ns
;
1555 * If the guest TSC is running at a different ratio than the host, then
1556 * convert the delay to nanoseconds to achieve an accurate delay. Note
1557 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1558 * always for VMX enabled hardware.
1560 if (vcpu
->arch
.tsc_scaling_ratio
== kvm_default_tsc_scaling_ratio
) {
1561 __delay(min(guest_cycles
,
1562 nsec_to_cycles(vcpu
, timer_advance_ns
)));
1564 u64 delay_ns
= guest_cycles
* 1000000ULL;
1565 do_div(delay_ns
, vcpu
->arch
.virtual_tsc_khz
);
1566 ndelay(min_t(u32
, delay_ns
, timer_advance_ns
));
1570 static inline void adjust_lapic_timer_advance(struct kvm_vcpu
*vcpu
,
1571 s64 advance_expire_delta
)
1573 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1574 u32 timer_advance_ns
= apic
->lapic_timer
.timer_advance_ns
;
1577 /* Do not adjust for tiny fluctuations or large random spikes. */
1578 if (abs(advance_expire_delta
) > LAPIC_TIMER_ADVANCE_ADJUST_MAX
||
1579 abs(advance_expire_delta
) < LAPIC_TIMER_ADVANCE_ADJUST_MIN
)
1583 if (advance_expire_delta
< 0) {
1584 ns
= -advance_expire_delta
* 1000000ULL;
1585 do_div(ns
, vcpu
->arch
.virtual_tsc_khz
);
1586 timer_advance_ns
-= ns
/LAPIC_TIMER_ADVANCE_ADJUST_STEP
;
1589 ns
= advance_expire_delta
* 1000000ULL;
1590 do_div(ns
, vcpu
->arch
.virtual_tsc_khz
);
1591 timer_advance_ns
+= ns
/LAPIC_TIMER_ADVANCE_ADJUST_STEP
;
1594 if (unlikely(timer_advance_ns
> LAPIC_TIMER_ADVANCE_NS_MAX
))
1595 timer_advance_ns
= LAPIC_TIMER_ADVANCE_NS_INIT
;
1596 apic
->lapic_timer
.timer_advance_ns
= timer_advance_ns
;
1599 static void __kvm_wait_lapic_expire(struct kvm_vcpu
*vcpu
)
1601 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1602 u64 guest_tsc
, tsc_deadline
;
1604 tsc_deadline
= apic
->lapic_timer
.expired_tscdeadline
;
1605 apic
->lapic_timer
.expired_tscdeadline
= 0;
1606 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1607 apic
->lapic_timer
.advance_expire_delta
= guest_tsc
- tsc_deadline
;
1609 if (lapic_timer_advance_dynamic
) {
1610 adjust_lapic_timer_advance(vcpu
, apic
->lapic_timer
.advance_expire_delta
);
1612 * If the timer fired early, reread the TSC to account for the
1613 * overhead of the above adjustment to avoid waiting longer
1614 * than is necessary.
1616 if (guest_tsc
< tsc_deadline
)
1617 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1620 if (guest_tsc
< tsc_deadline
)
1621 __wait_lapic_expire(vcpu
, tsc_deadline
- guest_tsc
);
1624 void kvm_wait_lapic_expire(struct kvm_vcpu
*vcpu
)
1626 if (lapic_in_kernel(vcpu
) &&
1627 vcpu
->arch
.apic
->lapic_timer
.expired_tscdeadline
&&
1628 vcpu
->arch
.apic
->lapic_timer
.timer_advance_ns
&&
1629 lapic_timer_int_injected(vcpu
))
1630 __kvm_wait_lapic_expire(vcpu
);
1632 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire
);
1634 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic
*apic
)
1636 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1638 kvm_apic_local_deliver(apic
, APIC_LVTT
);
1639 if (apic_lvtt_tscdeadline(apic
)) {
1640 ktimer
->tscdeadline
= 0;
1641 } else if (apic_lvtt_oneshot(apic
)) {
1642 ktimer
->tscdeadline
= 0;
1643 ktimer
->target_expiration
= 0;
1647 static void apic_timer_expired(struct kvm_lapic
*apic
, bool from_timer_fn
)
1649 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1650 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1652 if (atomic_read(&apic
->lapic_timer
.pending
))
1655 if (apic_lvtt_tscdeadline(apic
) || ktimer
->hv_timer_in_use
)
1656 ktimer
->expired_tscdeadline
= ktimer
->tscdeadline
;
1658 if (!from_timer_fn
&& vcpu
->arch
.apicv_active
) {
1659 WARN_ON(kvm_get_running_vcpu() != vcpu
);
1660 kvm_apic_inject_pending_timer_irqs(apic
);
1664 if (kvm_use_posted_timer_interrupt(apic
->vcpu
)) {
1666 * Ensure the guest's timer has truly expired before posting an
1667 * interrupt. Open code the relevant checks to avoid querying
1668 * lapic_timer_int_injected(), which will be false since the
1669 * interrupt isn't yet injected. Waiting until after injecting
1670 * is not an option since that won't help a posted interrupt.
1672 if (vcpu
->arch
.apic
->lapic_timer
.expired_tscdeadline
&&
1673 vcpu
->arch
.apic
->lapic_timer
.timer_advance_ns
)
1674 __kvm_wait_lapic_expire(vcpu
);
1675 kvm_apic_inject_pending_timer_irqs(apic
);
1679 atomic_inc(&apic
->lapic_timer
.pending
);
1680 kvm_make_request(KVM_REQ_UNBLOCK
, vcpu
);
1682 kvm_vcpu_kick(vcpu
);
1685 static void start_sw_tscdeadline(struct kvm_lapic
*apic
)
1687 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1688 u64 guest_tsc
, tscdeadline
= ktimer
->tscdeadline
;
1691 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1692 unsigned long this_tsc_khz
= vcpu
->arch
.virtual_tsc_khz
;
1693 unsigned long flags
;
1696 if (unlikely(!tscdeadline
|| !this_tsc_khz
))
1699 local_irq_save(flags
);
1702 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1704 ns
= (tscdeadline
- guest_tsc
) * 1000000ULL;
1705 do_div(ns
, this_tsc_khz
);
1707 if (likely(tscdeadline
> guest_tsc
) &&
1708 likely(ns
> apic
->lapic_timer
.timer_advance_ns
)) {
1709 expire
= ktime_add_ns(now
, ns
);
1710 expire
= ktime_sub_ns(expire
, ktimer
->timer_advance_ns
);
1711 hrtimer_start(&ktimer
->timer
, expire
, HRTIMER_MODE_ABS_HARD
);
1713 apic_timer_expired(apic
, false);
1715 local_irq_restore(flags
);
1718 static inline u64
tmict_to_ns(struct kvm_lapic
*apic
, u32 tmict
)
1720 return (u64
)tmict
* APIC_BUS_CYCLE_NS
* (u64
)apic
->divide_count
;
1723 static void update_target_expiration(struct kvm_lapic
*apic
, uint32_t old_divisor
)
1725 ktime_t now
, remaining
;
1726 u64 ns_remaining_old
, ns_remaining_new
;
1728 apic
->lapic_timer
.period
=
1729 tmict_to_ns(apic
, kvm_lapic_get_reg(apic
, APIC_TMICT
));
1730 limit_periodic_timer_frequency(apic
);
1733 remaining
= ktime_sub(apic
->lapic_timer
.target_expiration
, now
);
1734 if (ktime_to_ns(remaining
) < 0)
1737 ns_remaining_old
= ktime_to_ns(remaining
);
1738 ns_remaining_new
= mul_u64_u32_div(ns_remaining_old
,
1739 apic
->divide_count
, old_divisor
);
1741 apic
->lapic_timer
.tscdeadline
+=
1742 nsec_to_cycles(apic
->vcpu
, ns_remaining_new
) -
1743 nsec_to_cycles(apic
->vcpu
, ns_remaining_old
);
1744 apic
->lapic_timer
.target_expiration
= ktime_add_ns(now
, ns_remaining_new
);
1747 static bool set_target_expiration(struct kvm_lapic
*apic
, u32 count_reg
)
1754 apic
->lapic_timer
.period
=
1755 tmict_to_ns(apic
, kvm_lapic_get_reg(apic
, APIC_TMICT
));
1757 if (!apic
->lapic_timer
.period
) {
1758 apic
->lapic_timer
.tscdeadline
= 0;
1762 limit_periodic_timer_frequency(apic
);
1763 deadline
= apic
->lapic_timer
.period
;
1765 if (apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
)) {
1766 if (unlikely(count_reg
!= APIC_TMICT
)) {
1767 deadline
= tmict_to_ns(apic
,
1768 kvm_lapic_get_reg(apic
, count_reg
));
1769 if (unlikely(deadline
<= 0))
1770 deadline
= apic
->lapic_timer
.period
;
1771 else if (unlikely(deadline
> apic
->lapic_timer
.period
)) {
1772 pr_info_ratelimited(
1773 "kvm: vcpu %i: requested lapic timer restore with "
1774 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1775 "Using initial count to start timer.\n",
1776 apic
->vcpu
->vcpu_id
,
1778 kvm_lapic_get_reg(apic
, count_reg
),
1779 deadline
, apic
->lapic_timer
.period
);
1780 kvm_lapic_set_reg(apic
, count_reg
, 0);
1781 deadline
= apic
->lapic_timer
.period
;
1786 apic
->lapic_timer
.tscdeadline
= kvm_read_l1_tsc(apic
->vcpu
, tscl
) +
1787 nsec_to_cycles(apic
->vcpu
, deadline
);
1788 apic
->lapic_timer
.target_expiration
= ktime_add_ns(now
, deadline
);
1793 static void advance_periodic_target_expiration(struct kvm_lapic
*apic
)
1795 ktime_t now
= ktime_get();
1800 * Synchronize both deadlines to the same time source or
1801 * differences in the periods (caused by differences in the
1802 * underlying clocks or numerical approximation errors) will
1803 * cause the two to drift apart over time as the errors
1806 apic
->lapic_timer
.target_expiration
=
1807 ktime_add_ns(apic
->lapic_timer
.target_expiration
,
1808 apic
->lapic_timer
.period
);
1809 delta
= ktime_sub(apic
->lapic_timer
.target_expiration
, now
);
1810 apic
->lapic_timer
.tscdeadline
= kvm_read_l1_tsc(apic
->vcpu
, tscl
) +
1811 nsec_to_cycles(apic
->vcpu
, delta
);
1814 static void start_sw_period(struct kvm_lapic
*apic
)
1816 if (!apic
->lapic_timer
.period
)
1819 if (ktime_after(ktime_get(),
1820 apic
->lapic_timer
.target_expiration
)) {
1821 apic_timer_expired(apic
, false);
1823 if (apic_lvtt_oneshot(apic
))
1826 advance_periodic_target_expiration(apic
);
1829 hrtimer_start(&apic
->lapic_timer
.timer
,
1830 apic
->lapic_timer
.target_expiration
,
1831 HRTIMER_MODE_ABS_HARD
);
1834 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu
*vcpu
)
1836 if (!lapic_in_kernel(vcpu
))
1839 return vcpu
->arch
.apic
->lapic_timer
.hv_timer_in_use
;
1841 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use
);
1843 static void cancel_hv_timer(struct kvm_lapic
*apic
)
1845 WARN_ON(preemptible());
1846 WARN_ON(!apic
->lapic_timer
.hv_timer_in_use
);
1847 static_call(kvm_x86_cancel_hv_timer
)(apic
->vcpu
);
1848 apic
->lapic_timer
.hv_timer_in_use
= false;
1851 static bool start_hv_timer(struct kvm_lapic
*apic
)
1853 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1854 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1857 WARN_ON(preemptible());
1858 if (!kvm_can_use_hv_timer(vcpu
))
1861 if (!ktimer
->tscdeadline
)
1864 if (static_call(kvm_x86_set_hv_timer
)(vcpu
, ktimer
->tscdeadline
, &expired
))
1867 ktimer
->hv_timer_in_use
= true;
1868 hrtimer_cancel(&ktimer
->timer
);
1871 * To simplify handling the periodic timer, leave the hv timer running
1872 * even if the deadline timer has expired, i.e. rely on the resulting
1873 * VM-Exit to recompute the periodic timer's target expiration.
1875 if (!apic_lvtt_period(apic
)) {
1877 * Cancel the hv timer if the sw timer fired while the hv timer
1878 * was being programmed, or if the hv timer itself expired.
1880 if (atomic_read(&ktimer
->pending
)) {
1881 cancel_hv_timer(apic
);
1882 } else if (expired
) {
1883 apic_timer_expired(apic
, false);
1884 cancel_hv_timer(apic
);
1888 trace_kvm_hv_timer_state(vcpu
->vcpu_id
, ktimer
->hv_timer_in_use
);
1893 static void start_sw_timer(struct kvm_lapic
*apic
)
1895 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1897 WARN_ON(preemptible());
1898 if (apic
->lapic_timer
.hv_timer_in_use
)
1899 cancel_hv_timer(apic
);
1900 if (!apic_lvtt_period(apic
) && atomic_read(&ktimer
->pending
))
1903 if (apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
))
1904 start_sw_period(apic
);
1905 else if (apic_lvtt_tscdeadline(apic
))
1906 start_sw_tscdeadline(apic
);
1907 trace_kvm_hv_timer_state(apic
->vcpu
->vcpu_id
, false);
1910 static void restart_apic_timer(struct kvm_lapic
*apic
)
1914 if (!apic_lvtt_period(apic
) && atomic_read(&apic
->lapic_timer
.pending
))
1917 if (!start_hv_timer(apic
))
1918 start_sw_timer(apic
);
1923 void kvm_lapic_expired_hv_timer(struct kvm_vcpu
*vcpu
)
1925 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1928 /* If the preempt notifier has already run, it also called apic_timer_expired */
1929 if (!apic
->lapic_timer
.hv_timer_in_use
)
1931 WARN_ON(rcuwait_active(&vcpu
->wait
));
1932 apic_timer_expired(apic
, false);
1933 cancel_hv_timer(apic
);
1935 if (apic_lvtt_period(apic
) && apic
->lapic_timer
.period
) {
1936 advance_periodic_target_expiration(apic
);
1937 restart_apic_timer(apic
);
1942 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer
);
1944 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu
*vcpu
)
1946 restart_apic_timer(vcpu
->arch
.apic
);
1948 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer
);
1950 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu
*vcpu
)
1952 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1955 /* Possibly the TSC deadline timer is not enabled yet */
1956 if (apic
->lapic_timer
.hv_timer_in_use
)
1957 start_sw_timer(apic
);
1960 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer
);
1962 void kvm_lapic_restart_hv_timer(struct kvm_vcpu
*vcpu
)
1964 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1966 WARN_ON(!apic
->lapic_timer
.hv_timer_in_use
);
1967 restart_apic_timer(apic
);
1970 static void __start_apic_timer(struct kvm_lapic
*apic
, u32 count_reg
)
1972 atomic_set(&apic
->lapic_timer
.pending
, 0);
1974 if ((apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
))
1975 && !set_target_expiration(apic
, count_reg
))
1978 restart_apic_timer(apic
);
1981 static void start_apic_timer(struct kvm_lapic
*apic
)
1983 __start_apic_timer(apic
, APIC_TMICT
);
1986 static void apic_manage_nmi_watchdog(struct kvm_lapic
*apic
, u32 lvt0_val
)
1988 bool lvt0_in_nmi_mode
= apic_lvt_nmi_mode(lvt0_val
);
1990 if (apic
->lvt0_in_nmi_mode
!= lvt0_in_nmi_mode
) {
1991 apic
->lvt0_in_nmi_mode
= lvt0_in_nmi_mode
;
1992 if (lvt0_in_nmi_mode
) {
1993 atomic_inc(&apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
);
1995 atomic_dec(&apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
);
1999 int kvm_lapic_reg_write(struct kvm_lapic
*apic
, u32 reg
, u32 val
)
2003 trace_kvm_apic_write(reg
, val
);
2006 case APIC_ID
: /* Local APIC ID */
2007 if (!apic_x2apic_mode(apic
))
2008 kvm_apic_set_xapic_id(apic
, val
>> 24);
2014 report_tpr_access(apic
, true);
2015 apic_set_tpr(apic
, val
& 0xff);
2023 if (!apic_x2apic_mode(apic
))
2024 kvm_apic_set_ldr(apic
, val
& APIC_LDR_MASK
);
2030 if (!apic_x2apic_mode(apic
))
2031 kvm_apic_set_dfr(apic
, val
| 0x0FFFFFFF);
2038 if (kvm_lapic_get_reg(apic
, APIC_LVR
) & APIC_LVR_DIRECTED_EOI
)
2039 mask
|= APIC_SPIV_DIRECTED_EOI
;
2040 apic_set_spiv(apic
, val
& mask
);
2041 if (!(val
& APIC_SPIV_APIC_ENABLED
)) {
2045 for (i
= 0; i
< KVM_APIC_LVT_NUM
; i
++) {
2046 lvt_val
= kvm_lapic_get_reg(apic
,
2047 APIC_LVTT
+ 0x10 * i
);
2048 kvm_lapic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
,
2049 lvt_val
| APIC_LVT_MASKED
);
2051 apic_update_lvtt(apic
);
2052 atomic_set(&apic
->lapic_timer
.pending
, 0);
2058 /* No delay here, so we always clear the pending bit */
2060 kvm_apic_send_ipi(apic
, val
, kvm_lapic_get_reg(apic
, APIC_ICR2
));
2061 kvm_lapic_set_reg(apic
, APIC_ICR
, val
);
2065 if (!apic_x2apic_mode(apic
))
2067 kvm_lapic_set_reg(apic
, APIC_ICR2
, val
);
2071 apic_manage_nmi_watchdog(apic
, val
);
2077 /* TODO: Check vector */
2081 if (!kvm_apic_sw_enabled(apic
))
2082 val
|= APIC_LVT_MASKED
;
2083 size
= ARRAY_SIZE(apic_lvt_mask
);
2084 index
= array_index_nospec(
2085 (reg
- APIC_LVTT
) >> 4, size
);
2086 val
&= apic_lvt_mask
[index
];
2087 kvm_lapic_set_reg(apic
, reg
, val
);
2092 if (!kvm_apic_sw_enabled(apic
))
2093 val
|= APIC_LVT_MASKED
;
2094 val
&= (apic_lvt_mask
[0] | apic
->lapic_timer
.timer_mode_mask
);
2095 kvm_lapic_set_reg(apic
, APIC_LVTT
, val
);
2096 apic_update_lvtt(apic
);
2100 if (apic_lvtt_tscdeadline(apic
))
2103 cancel_apic_timer(apic
);
2104 kvm_lapic_set_reg(apic
, APIC_TMICT
, val
);
2105 start_apic_timer(apic
);
2109 uint32_t old_divisor
= apic
->divide_count
;
2111 kvm_lapic_set_reg(apic
, APIC_TDCR
, val
& 0xb);
2112 update_divide_count(apic
);
2113 if (apic
->divide_count
!= old_divisor
&&
2114 apic
->lapic_timer
.period
) {
2115 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2116 update_target_expiration(apic
, old_divisor
);
2117 restart_apic_timer(apic
);
2122 if (apic_x2apic_mode(apic
) && val
!= 0)
2127 if (apic_x2apic_mode(apic
)) {
2128 kvm_lapic_reg_write(apic
, APIC_ICR
,
2129 APIC_DEST_SELF
| (val
& APIC_VECTOR_MASK
));
2138 kvm_recalculate_apic_map(apic
->vcpu
->kvm
);
2142 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write
);
2144 static int apic_mmio_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this,
2145 gpa_t address
, int len
, const void *data
)
2147 struct kvm_lapic
*apic
= to_lapic(this);
2148 unsigned int offset
= address
- apic
->base_address
;
2151 if (!apic_mmio_in_range(apic
, address
))
2154 if (!kvm_apic_hw_enabled(apic
) || apic_x2apic_mode(apic
)) {
2155 if (!kvm_check_has_quirk(vcpu
->kvm
,
2156 KVM_X86_QUIRK_LAPIC_MMIO_HOLE
))
2163 * APIC register must be aligned on 128-bits boundary.
2164 * 32/64/128 bits registers must be accessed thru 32 bits.
2167 if (len
!= 4 || (offset
& 0xf))
2172 kvm_lapic_reg_write(apic
, offset
& 0xff0, val
);
2177 void kvm_lapic_set_eoi(struct kvm_vcpu
*vcpu
)
2179 kvm_lapic_reg_write(vcpu
->arch
.apic
, APIC_EOI
, 0);
2181 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi
);
2183 /* emulate APIC access in a trap manner */
2184 void kvm_apic_write_nodecode(struct kvm_vcpu
*vcpu
, u32 offset
)
2188 /* hw has done the conditional check and inst decode */
2191 kvm_lapic_reg_read(vcpu
->arch
.apic
, offset
, 4, &val
);
2193 /* TODO: optimize to just emulate side effect w/o one more write */
2194 kvm_lapic_reg_write(vcpu
->arch
.apic
, offset
, val
);
2196 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode
);
2198 void kvm_free_lapic(struct kvm_vcpu
*vcpu
)
2200 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2202 if (!vcpu
->arch
.apic
)
2205 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2207 if (!(vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
))
2208 static_branch_slow_dec_deferred(&apic_hw_disabled
);
2210 if (!apic
->sw_enabled
)
2211 static_branch_slow_dec_deferred(&apic_sw_disabled
);
2214 free_page((unsigned long)apic
->regs
);
2220 *----------------------------------------------------------------------
2222 *----------------------------------------------------------------------
2224 u64
kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
)
2226 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2228 if (!kvm_apic_present(vcpu
) || !apic_lvtt_tscdeadline(apic
))
2231 return apic
->lapic_timer
.tscdeadline
;
2234 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
, u64 data
)
2236 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2238 if (!kvm_apic_present(vcpu
) || !apic_lvtt_tscdeadline(apic
))
2241 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2242 apic
->lapic_timer
.tscdeadline
= data
;
2243 start_apic_timer(apic
);
2246 void kvm_lapic_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
2248 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2250 apic_set_tpr(apic
, ((cr8
& 0x0f) << 4)
2251 | (kvm_lapic_get_reg(apic
, APIC_TASKPRI
) & 4));
2254 u64
kvm_lapic_get_cr8(struct kvm_vcpu
*vcpu
)
2258 tpr
= (u64
) kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_TASKPRI
);
2260 return (tpr
& 0xf0) >> 4;
2263 void kvm_lapic_set_base(struct kvm_vcpu
*vcpu
, u64 value
)
2265 u64 old_value
= vcpu
->arch
.apic_base
;
2266 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2269 value
|= MSR_IA32_APICBASE_BSP
;
2271 vcpu
->arch
.apic_base
= value
;
2273 if ((old_value
^ value
) & MSR_IA32_APICBASE_ENABLE
)
2274 kvm_update_cpuid_runtime(vcpu
);
2279 /* update jump label if enable bit changes */
2280 if ((old_value
^ value
) & MSR_IA32_APICBASE_ENABLE
) {
2281 if (value
& MSR_IA32_APICBASE_ENABLE
) {
2282 kvm_apic_set_xapic_id(apic
, vcpu
->vcpu_id
);
2283 static_branch_slow_dec_deferred(&apic_hw_disabled
);
2284 /* Check if there are APF page ready requests pending */
2285 kvm_make_request(KVM_REQ_APF_READY
, vcpu
);
2287 static_branch_inc(&apic_hw_disabled
.key
);
2288 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
2292 if (((old_value
^ value
) & X2APIC_ENABLE
) && (value
& X2APIC_ENABLE
))
2293 kvm_apic_set_x2apic_id(apic
, vcpu
->vcpu_id
);
2295 if ((old_value
^ value
) & (MSR_IA32_APICBASE_ENABLE
| X2APIC_ENABLE
))
2296 static_call(kvm_x86_set_virtual_apic_mode
)(vcpu
);
2298 apic
->base_address
= apic
->vcpu
->arch
.apic_base
&
2299 MSR_IA32_APICBASE_BASE
;
2301 if ((value
& MSR_IA32_APICBASE_ENABLE
) &&
2302 apic
->base_address
!= APIC_DEFAULT_PHYS_BASE
)
2303 pr_warn_once("APIC base relocation is unsupported by KVM");
2306 void kvm_apic_update_apicv(struct kvm_vcpu
*vcpu
)
2308 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2310 if (vcpu
->arch
.apicv_active
) {
2311 /* irr_pending is always true when apicv is activated. */
2312 apic
->irr_pending
= true;
2313 apic
->isr_count
= 1;
2315 apic
->irr_pending
= (apic_search_irr(apic
) != -1);
2316 apic
->isr_count
= count_vectors(apic
->regs
+ APIC_ISR
);
2319 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv
);
2321 void kvm_lapic_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
2323 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2329 /* Stop the timer in case it's a reset to an active apic */
2330 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2333 kvm_lapic_set_base(vcpu
, APIC_DEFAULT_PHYS_BASE
|
2334 MSR_IA32_APICBASE_ENABLE
);
2335 kvm_apic_set_xapic_id(apic
, vcpu
->vcpu_id
);
2337 kvm_apic_set_version(apic
->vcpu
);
2339 for (i
= 0; i
< KVM_APIC_LVT_NUM
; i
++)
2340 kvm_lapic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
, APIC_LVT_MASKED
);
2341 apic_update_lvtt(apic
);
2342 if (kvm_vcpu_is_reset_bsp(vcpu
) &&
2343 kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_LINT0_REENABLED
))
2344 kvm_lapic_set_reg(apic
, APIC_LVT0
,
2345 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT
));
2346 apic_manage_nmi_watchdog(apic
, kvm_lapic_get_reg(apic
, APIC_LVT0
));
2348 kvm_apic_set_dfr(apic
, 0xffffffffU
);
2349 apic_set_spiv(apic
, 0xff);
2350 kvm_lapic_set_reg(apic
, APIC_TASKPRI
, 0);
2351 if (!apic_x2apic_mode(apic
))
2352 kvm_apic_set_ldr(apic
, 0);
2353 kvm_lapic_set_reg(apic
, APIC_ESR
, 0);
2354 kvm_lapic_set_reg(apic
, APIC_ICR
, 0);
2355 kvm_lapic_set_reg(apic
, APIC_ICR2
, 0);
2356 kvm_lapic_set_reg(apic
, APIC_TDCR
, 0);
2357 kvm_lapic_set_reg(apic
, APIC_TMICT
, 0);
2358 for (i
= 0; i
< 8; i
++) {
2359 kvm_lapic_set_reg(apic
, APIC_IRR
+ 0x10 * i
, 0);
2360 kvm_lapic_set_reg(apic
, APIC_ISR
+ 0x10 * i
, 0);
2361 kvm_lapic_set_reg(apic
, APIC_TMR
+ 0x10 * i
, 0);
2363 kvm_apic_update_apicv(vcpu
);
2364 apic
->highest_isr_cache
= -1;
2365 update_divide_count(apic
);
2366 atomic_set(&apic
->lapic_timer
.pending
, 0);
2367 if (kvm_vcpu_is_bsp(vcpu
))
2368 kvm_lapic_set_base(vcpu
,
2369 vcpu
->arch
.apic_base
| MSR_IA32_APICBASE_BSP
);
2370 vcpu
->arch
.pv_eoi
.msr_val
= 0;
2371 apic_update_ppr(apic
);
2372 if (vcpu
->arch
.apicv_active
) {
2373 static_call(kvm_x86_apicv_post_state_restore
)(vcpu
);
2374 static_call(kvm_x86_hwapic_irr_update
)(vcpu
, -1);
2375 static_call(kvm_x86_hwapic_isr_update
)(vcpu
, -1);
2378 vcpu
->arch
.apic_arb_prio
= 0;
2379 vcpu
->arch
.apic_attention
= 0;
2381 kvm_recalculate_apic_map(vcpu
->kvm
);
2385 *----------------------------------------------------------------------
2387 *----------------------------------------------------------------------
2390 static bool lapic_is_periodic(struct kvm_lapic
*apic
)
2392 return apic_lvtt_period(apic
);
2395 int apic_has_pending_timer(struct kvm_vcpu
*vcpu
)
2397 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2399 if (apic_enabled(apic
) && apic_lvt_enabled(apic
, APIC_LVTT
))
2400 return atomic_read(&apic
->lapic_timer
.pending
);
2405 int kvm_apic_local_deliver(struct kvm_lapic
*apic
, int lvt_type
)
2407 u32 reg
= kvm_lapic_get_reg(apic
, lvt_type
);
2408 int vector
, mode
, trig_mode
;
2410 if (kvm_apic_hw_enabled(apic
) && !(reg
& APIC_LVT_MASKED
)) {
2411 vector
= reg
& APIC_VECTOR_MASK
;
2412 mode
= reg
& APIC_MODE_MASK
;
2413 trig_mode
= reg
& APIC_LVT_LEVEL_TRIGGER
;
2414 return __apic_accept_irq(apic
, mode
, vector
, 1, trig_mode
,
2420 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu
*vcpu
)
2422 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2425 kvm_apic_local_deliver(apic
, APIC_LVT0
);
2428 static const struct kvm_io_device_ops apic_mmio_ops
= {
2429 .read
= apic_mmio_read
,
2430 .write
= apic_mmio_write
,
2433 static enum hrtimer_restart
apic_timer_fn(struct hrtimer
*data
)
2435 struct kvm_timer
*ktimer
= container_of(data
, struct kvm_timer
, timer
);
2436 struct kvm_lapic
*apic
= container_of(ktimer
, struct kvm_lapic
, lapic_timer
);
2438 apic_timer_expired(apic
, true);
2440 if (lapic_is_periodic(apic
)) {
2441 advance_periodic_target_expiration(apic
);
2442 hrtimer_add_expires_ns(&ktimer
->timer
, ktimer
->period
);
2443 return HRTIMER_RESTART
;
2445 return HRTIMER_NORESTART
;
2448 int kvm_create_lapic(struct kvm_vcpu
*vcpu
, int timer_advance_ns
)
2450 struct kvm_lapic
*apic
;
2452 ASSERT(vcpu
!= NULL
);
2454 apic
= kzalloc(sizeof(*apic
), GFP_KERNEL_ACCOUNT
);
2458 vcpu
->arch
.apic
= apic
;
2460 apic
->regs
= (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT
);
2462 printk(KERN_ERR
"malloc apic regs error for vcpu %x\n",
2464 goto nomem_free_apic
;
2468 hrtimer_init(&apic
->lapic_timer
.timer
, CLOCK_MONOTONIC
,
2469 HRTIMER_MODE_ABS_HARD
);
2470 apic
->lapic_timer
.timer
.function
= apic_timer_fn
;
2471 if (timer_advance_ns
== -1) {
2472 apic
->lapic_timer
.timer_advance_ns
= LAPIC_TIMER_ADVANCE_NS_INIT
;
2473 lapic_timer_advance_dynamic
= true;
2475 apic
->lapic_timer
.timer_advance_ns
= timer_advance_ns
;
2476 lapic_timer_advance_dynamic
= false;
2480 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2481 * thinking that APIC state has changed.
2483 vcpu
->arch
.apic_base
= MSR_IA32_APICBASE_ENABLE
;
2484 static_branch_inc(&apic_sw_disabled
.key
); /* sw disabled at reset */
2485 kvm_iodevice_init(&apic
->dev
, &apic_mmio_ops
);
2490 vcpu
->arch
.apic
= NULL
;
2495 int kvm_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
2497 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2500 if (!kvm_apic_present(vcpu
))
2503 __apic_update_ppr(apic
, &ppr
);
2504 return apic_has_interrupt_for_ppr(apic
, ppr
);
2506 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt
);
2508 int kvm_apic_accept_pic_intr(struct kvm_vcpu
*vcpu
)
2510 u32 lvt0
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_LVT0
);
2512 if (!kvm_apic_hw_enabled(vcpu
->arch
.apic
))
2514 if ((lvt0
& APIC_LVT_MASKED
) == 0 &&
2515 GET_APIC_DELIVERY_MODE(lvt0
) == APIC_MODE_EXTINT
)
2520 void kvm_inject_apic_timer_irqs(struct kvm_vcpu
*vcpu
)
2522 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2524 if (atomic_read(&apic
->lapic_timer
.pending
) > 0) {
2525 kvm_apic_inject_pending_timer_irqs(apic
);
2526 atomic_set(&apic
->lapic_timer
.pending
, 0);
2530 int kvm_get_apic_interrupt(struct kvm_vcpu
*vcpu
)
2532 int vector
= kvm_apic_has_interrupt(vcpu
);
2533 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2540 * We get here even with APIC virtualization enabled, if doing
2541 * nested virtualization and L1 runs with the "acknowledge interrupt
2542 * on exit" mode. Then we cannot inject the interrupt via RVI,
2543 * because the process would deliver it through the IDT.
2546 apic_clear_irr(vector
, apic
);
2547 if (to_hv_vcpu(vcpu
) && test_bit(vector
, to_hv_synic(vcpu
)->auto_eoi_bitmap
)) {
2549 * For auto-EOI interrupts, there might be another pending
2550 * interrupt above PPR, so check whether to raise another
2553 apic_update_ppr(apic
);
2556 * For normal interrupts, PPR has been raised and there cannot
2557 * be a higher-priority pending interrupt---except if there was
2558 * a concurrent interrupt injection, but that would have
2559 * triggered KVM_REQ_EVENT already.
2561 apic_set_isr(vector
, apic
);
2562 __apic_update_ppr(apic
, &ppr
);
2568 static int kvm_apic_state_fixup(struct kvm_vcpu
*vcpu
,
2569 struct kvm_lapic_state
*s
, bool set
)
2571 if (apic_x2apic_mode(vcpu
->arch
.apic
)) {
2572 u32
*id
= (u32
*)(s
->regs
+ APIC_ID
);
2573 u32
*ldr
= (u32
*)(s
->regs
+ APIC_LDR
);
2575 if (vcpu
->kvm
->arch
.x2apic_format
) {
2576 if (*id
!= vcpu
->vcpu_id
)
2585 /* In x2APIC mode, the LDR is fixed and based on the id */
2587 *ldr
= kvm_apic_calc_x2apic_ldr(*id
);
2593 int kvm_apic_get_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
)
2595 memcpy(s
->regs
, vcpu
->arch
.apic
->regs
, sizeof(*s
));
2598 * Get calculated timer current count for remaining timer period (if
2599 * any) and store it in the returned register set.
2601 __kvm_lapic_set_reg(s
->regs
, APIC_TMCCT
,
2602 __apic_read(vcpu
->arch
.apic
, APIC_TMCCT
));
2604 return kvm_apic_state_fixup(vcpu
, s
, false);
2607 int kvm_apic_set_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
)
2609 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2612 kvm_lapic_set_base(vcpu
, vcpu
->arch
.apic_base
);
2613 /* set SPIV separately to get count of SW disabled APICs right */
2614 apic_set_spiv(apic
, *((u32
*)(s
->regs
+ APIC_SPIV
)));
2616 r
= kvm_apic_state_fixup(vcpu
, s
, true);
2618 kvm_recalculate_apic_map(vcpu
->kvm
);
2621 memcpy(vcpu
->arch
.apic
->regs
, s
->regs
, sizeof(*s
));
2623 atomic_set_release(&apic
->vcpu
->kvm
->arch
.apic_map_dirty
, DIRTY
);
2624 kvm_recalculate_apic_map(vcpu
->kvm
);
2625 kvm_apic_set_version(vcpu
);
2627 apic_update_ppr(apic
);
2628 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2629 apic
->lapic_timer
.expired_tscdeadline
= 0;
2630 apic_update_lvtt(apic
);
2631 apic_manage_nmi_watchdog(apic
, kvm_lapic_get_reg(apic
, APIC_LVT0
));
2632 update_divide_count(apic
);
2633 __start_apic_timer(apic
, APIC_TMCCT
);
2634 kvm_lapic_set_reg(apic
, APIC_TMCCT
, 0);
2635 kvm_apic_update_apicv(vcpu
);
2636 apic
->highest_isr_cache
= -1;
2637 if (vcpu
->arch
.apicv_active
) {
2638 static_call(kvm_x86_apicv_post_state_restore
)(vcpu
);
2639 static_call(kvm_x86_hwapic_irr_update
)(vcpu
,
2640 apic_find_highest_irr(apic
));
2641 static_call(kvm_x86_hwapic_isr_update
)(vcpu
,
2642 apic_find_highest_isr(apic
));
2644 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2645 if (ioapic_in_kernel(vcpu
->kvm
))
2646 kvm_rtc_eoi_tracking_restore_one(vcpu
);
2648 vcpu
->arch
.apic_arb_prio
= 0;
2653 void __kvm_migrate_apic_timer(struct kvm_vcpu
*vcpu
)
2655 struct hrtimer
*timer
;
2657 if (!lapic_in_kernel(vcpu
) ||
2658 kvm_can_post_timer_interrupt(vcpu
))
2661 timer
= &vcpu
->arch
.apic
->lapic_timer
.timer
;
2662 if (hrtimer_cancel(timer
))
2663 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS_HARD
);
2667 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2669 * Detect whether guest triggered PV EOI since the
2670 * last entry. If yes, set EOI on guests's behalf.
2671 * Clear PV EOI in guest memory in any case.
2673 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu
*vcpu
,
2674 struct kvm_lapic
*apic
)
2679 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2680 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2682 * KVM_APIC_PV_EOI_PENDING is unset:
2683 * -> host disabled PV EOI.
2684 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2685 * -> host enabled PV EOI, guest did not execute EOI yet.
2686 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2687 * -> host enabled PV EOI, guest executed EOI.
2689 BUG_ON(!pv_eoi_enabled(vcpu
));
2690 pending
= pv_eoi_get_pending(vcpu
);
2692 * Clear pending bit in any case: it will be set again on vmentry.
2693 * While this might not be ideal from performance point of view,
2694 * this makes sure pv eoi is only enabled when we know it's safe.
2696 pv_eoi_clr_pending(vcpu
);
2699 vector
= apic_set_eoi(apic
);
2700 trace_kvm_pv_eoi(apic
, vector
);
2703 void kvm_lapic_sync_from_vapic(struct kvm_vcpu
*vcpu
)
2707 if (test_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
))
2708 apic_sync_pv_eoi_from_guest(vcpu
, vcpu
->arch
.apic
);
2710 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
2713 if (kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
2717 apic_set_tpr(vcpu
->arch
.apic
, data
& 0xff);
2721 * apic_sync_pv_eoi_to_guest - called before vmentry
2723 * Detect whether it's safe to enable PV EOI and
2726 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu
*vcpu
,
2727 struct kvm_lapic
*apic
)
2729 if (!pv_eoi_enabled(vcpu
) ||
2730 /* IRR set or many bits in ISR: could be nested. */
2731 apic
->irr_pending
||
2732 /* Cache not set: could be safe but we don't bother. */
2733 apic
->highest_isr_cache
== -1 ||
2734 /* Need EOI to update ioapic. */
2735 kvm_ioapic_handles_vector(apic
, apic
->highest_isr_cache
)) {
2737 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2738 * so we need not do anything here.
2743 pv_eoi_set_pending(apic
->vcpu
);
2746 void kvm_lapic_sync_to_vapic(struct kvm_vcpu
*vcpu
)
2749 int max_irr
, max_isr
;
2750 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2752 apic_sync_pv_eoi_to_guest(vcpu
, apic
);
2754 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
2757 tpr
= kvm_lapic_get_reg(apic
, APIC_TASKPRI
) & 0xff;
2758 max_irr
= apic_find_highest_irr(apic
);
2761 max_isr
= apic_find_highest_isr(apic
);
2764 data
= (tpr
& 0xff) | ((max_isr
& 0xf0) << 8) | (max_irr
<< 24);
2766 kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
2770 int kvm_lapic_set_vapic_addr(struct kvm_vcpu
*vcpu
, gpa_t vapic_addr
)
2773 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
2774 &vcpu
->arch
.apic
->vapic_cache
,
2775 vapic_addr
, sizeof(u32
)))
2777 __set_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
2779 __clear_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
2782 vcpu
->arch
.apic
->vapic_addr
= vapic_addr
;
2786 int kvm_x2apic_msr_write(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
2788 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2789 u32 reg
= (msr
- APIC_BASE_MSR
) << 4;
2791 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(apic
))
2794 if (reg
== APIC_ICR2
)
2797 /* if this is ICR write vector before command */
2798 if (reg
== APIC_ICR
)
2799 kvm_lapic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
2800 return kvm_lapic_reg_write(apic
, reg
, (u32
)data
);
2803 int kvm_x2apic_msr_read(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
2805 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2806 u32 reg
= (msr
- APIC_BASE_MSR
) << 4, low
, high
= 0;
2808 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(apic
))
2811 if (reg
== APIC_DFR
|| reg
== APIC_ICR2
)
2814 if (kvm_lapic_reg_read(apic
, reg
, 4, &low
))
2816 if (reg
== APIC_ICR
)
2817 kvm_lapic_reg_read(apic
, APIC_ICR2
, 4, &high
);
2819 *data
= (((u64
)high
) << 32) | low
;
2824 int kvm_hv_vapic_msr_write(struct kvm_vcpu
*vcpu
, u32 reg
, u64 data
)
2826 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2828 if (!lapic_in_kernel(vcpu
))
2831 /* if this is ICR write vector before command */
2832 if (reg
== APIC_ICR
)
2833 kvm_lapic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
2834 return kvm_lapic_reg_write(apic
, reg
, (u32
)data
);
2837 int kvm_hv_vapic_msr_read(struct kvm_vcpu
*vcpu
, u32 reg
, u64
*data
)
2839 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2842 if (!lapic_in_kernel(vcpu
))
2845 if (kvm_lapic_reg_read(apic
, reg
, 4, &low
))
2847 if (reg
== APIC_ICR
)
2848 kvm_lapic_reg_read(apic
, APIC_ICR2
, 4, &high
);
2850 *data
= (((u64
)high
) << 32) | low
;
2855 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu
*vcpu
, u64 data
, unsigned long len
)
2857 u64 addr
= data
& ~KVM_MSR_ENABLED
;
2858 struct gfn_to_hva_cache
*ghc
= &vcpu
->arch
.pv_eoi
.data
;
2859 unsigned long new_len
;
2861 if (!IS_ALIGNED(addr
, 4))
2864 vcpu
->arch
.pv_eoi
.msr_val
= data
;
2865 if (!pv_eoi_enabled(vcpu
))
2868 if (addr
== ghc
->gpa
&& len
<= ghc
->len
)
2873 return kvm_gfn_to_hva_cache_init(vcpu
->kvm
, ghc
, addr
, new_len
);
2876 int kvm_apic_accept_events(struct kvm_vcpu
*vcpu
)
2878 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2883 if (!lapic_in_kernel(vcpu
))
2887 * Read pending events before calling the check_events
2890 pe
= smp_load_acquire(&apic
->pending_events
);
2894 if (is_guest_mode(vcpu
)) {
2895 r
= kvm_check_nested_events(vcpu
);
2897 return r
== -EBUSY
? 0 : r
;
2899 * If an event has happened and caused a vmexit,
2900 * we know INITs are latched and therefore
2901 * we will not incorrectly deliver an APIC
2902 * event instead of a vmexit.
2907 * INITs are latched while CPU is in specific states
2908 * (SMM, VMX root mode, SVM with GIF=0).
2909 * Because a CPU cannot be in these states immediately
2910 * after it has processed an INIT signal (and thus in
2911 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2912 * and leave the INIT pending.
2914 if (kvm_vcpu_latch_init(vcpu
)) {
2915 WARN_ON_ONCE(vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
);
2916 if (test_bit(KVM_APIC_SIPI
, &pe
))
2917 clear_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
2921 if (test_bit(KVM_APIC_INIT
, &pe
)) {
2922 clear_bit(KVM_APIC_INIT
, &apic
->pending_events
);
2923 kvm_vcpu_reset(vcpu
, true);
2924 if (kvm_vcpu_is_bsp(apic
->vcpu
))
2925 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
2927 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
2929 if (test_bit(KVM_APIC_SIPI
, &pe
)) {
2930 clear_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
2931 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
) {
2932 /* evaluate pending_events before reading the vector */
2934 sipi_vector
= apic
->sipi_vector
;
2935 kvm_x86_ops
.vcpu_deliver_sipi_vector(vcpu
, sipi_vector
);
2936 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
2942 void kvm_lapic_exit(void)
2944 static_key_deferred_flush(&apic_hw_disabled
);
2945 static_key_deferred_flush(&apic_sw_disabled
);