3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49 #define mod_64(x, y) ((x) % (y))
57 #define APIC_BUS_CYCLE_NS 1
59 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
60 #define apic_debug(fmt, arg...)
62 /* 14 is the version for Xeon and Pentium 8.4.8*/
63 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
64 #define LAPIC_MMIO_LENGTH (1 << 12)
65 /* followed define is not in apicdef.h */
66 #define APIC_SHORT_MASK 0xc0000
67 #define APIC_DEST_NOSHORT 0x0
68 #define APIC_DEST_MASK 0x800
69 #define MAX_APIC_VECTOR 256
70 #define APIC_VECTORS_PER_REG 32
72 #define APIC_BROADCAST 0xFF
73 #define X2APIC_BROADCAST 0xFFFFFFFFul
75 static inline int apic_test_vector(int vec
, void *bitmap
)
77 return test_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
80 bool kvm_apic_pending_eoi(struct kvm_vcpu
*vcpu
, int vector
)
82 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
84 return apic_test_vector(vector
, apic
->regs
+ APIC_ISR
) ||
85 apic_test_vector(vector
, apic
->regs
+ APIC_IRR
);
88 static inline void apic_clear_vector(int vec
, void *bitmap
)
90 clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
93 static inline int __apic_test_and_set_vector(int vec
, void *bitmap
)
95 return __test_and_set_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
98 static inline int __apic_test_and_clear_vector(int vec
, void *bitmap
)
100 return __test_and_clear_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
103 struct static_key_deferred apic_hw_disabled __read_mostly
;
104 struct static_key_deferred apic_sw_disabled __read_mostly
;
106 static inline int apic_enabled(struct kvm_lapic
*apic
)
108 return kvm_apic_sw_enabled(apic
) && kvm_apic_hw_enabled(apic
);
112 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
115 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
116 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
118 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map
*map
,
119 u32 dest_id
, struct kvm_lapic
***cluster
, u16
*mask
) {
121 case KVM_APIC_MODE_X2APIC
: {
122 u32 offset
= (dest_id
>> 16) * 16;
123 u32 max_apic_id
= map
->max_apic_id
;
125 if (offset
<= max_apic_id
) {
126 u8 cluster_size
= min(max_apic_id
- offset
+ 1, 16U);
128 *cluster
= &map
->phys_map
[offset
];
129 *mask
= dest_id
& (0xffff >> (16 - cluster_size
));
136 case KVM_APIC_MODE_XAPIC_FLAT
:
137 *cluster
= map
->xapic_flat_map
;
138 *mask
= dest_id
& 0xff;
140 case KVM_APIC_MODE_XAPIC_CLUSTER
:
141 *cluster
= map
->xapic_cluster_map
[dest_id
>> 4];
142 *mask
= dest_id
& 0xf;
150 static void recalculate_apic_map(struct kvm
*kvm
)
152 struct kvm_apic_map
*new, *old
= NULL
;
153 struct kvm_vcpu
*vcpu
;
157 mutex_lock(&kvm
->arch
.apic_map_lock
);
159 kvm_for_each_vcpu(i
, vcpu
, kvm
)
160 if (kvm_apic_present(vcpu
))
161 max_id
= max(max_id
, kvm_apic_id(vcpu
->arch
.apic
));
163 new = kzalloc(sizeof(struct kvm_apic_map
) +
164 sizeof(struct kvm_lapic
*) * (max_id
+ 1), GFP_KERNEL
);
169 new->max_apic_id
= max_id
;
171 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
172 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
173 struct kvm_lapic
**cluster
;
177 if (!kvm_apic_present(vcpu
))
180 aid
= kvm_apic_id(apic
);
181 ldr
= kvm_lapic_get_reg(apic
, APIC_LDR
);
183 if (aid
<= new->max_apic_id
)
184 new->phys_map
[aid
] = apic
;
186 if (apic_x2apic_mode(apic
)) {
187 new->mode
|= KVM_APIC_MODE_X2APIC
;
189 ldr
= GET_APIC_LOGICAL_ID(ldr
);
190 if (kvm_lapic_get_reg(apic
, APIC_DFR
) == APIC_DFR_FLAT
)
191 new->mode
|= KVM_APIC_MODE_XAPIC_FLAT
;
193 new->mode
|= KVM_APIC_MODE_XAPIC_CLUSTER
;
196 if (!kvm_apic_map_get_logical_dest(new, ldr
, &cluster
, &mask
))
200 cluster
[ffs(mask
) - 1] = apic
;
203 old
= rcu_dereference_protected(kvm
->arch
.apic_map
,
204 lockdep_is_held(&kvm
->arch
.apic_map_lock
));
205 rcu_assign_pointer(kvm
->arch
.apic_map
, new);
206 mutex_unlock(&kvm
->arch
.apic_map_lock
);
211 kvm_make_scan_ioapic_request(kvm
);
214 static inline void apic_set_spiv(struct kvm_lapic
*apic
, u32 val
)
216 bool enabled
= val
& APIC_SPIV_APIC_ENABLED
;
218 kvm_lapic_set_reg(apic
, APIC_SPIV
, val
);
220 if (enabled
!= apic
->sw_enabled
) {
221 apic
->sw_enabled
= enabled
;
223 static_key_slow_dec_deferred(&apic_sw_disabled
);
224 recalculate_apic_map(apic
->vcpu
->kvm
);
226 static_key_slow_inc(&apic_sw_disabled
.key
);
230 static inline void kvm_apic_set_xapic_id(struct kvm_lapic
*apic
, u8 id
)
232 kvm_lapic_set_reg(apic
, APIC_ID
, id
<< 24);
233 recalculate_apic_map(apic
->vcpu
->kvm
);
236 static inline void kvm_apic_set_ldr(struct kvm_lapic
*apic
, u32 id
)
238 kvm_lapic_set_reg(apic
, APIC_LDR
, id
);
239 recalculate_apic_map(apic
->vcpu
->kvm
);
242 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic
*apic
, u32 id
)
244 u32 ldr
= ((id
>> 4) << 16) | (1 << (id
& 0xf));
246 kvm_lapic_set_reg(apic
, APIC_ID
, id
);
247 kvm_lapic_set_reg(apic
, APIC_LDR
, ldr
);
248 recalculate_apic_map(apic
->vcpu
->kvm
);
251 static inline int apic_lvt_enabled(struct kvm_lapic
*apic
, int lvt_type
)
253 return !(kvm_lapic_get_reg(apic
, lvt_type
) & APIC_LVT_MASKED
);
256 static inline int apic_lvt_vector(struct kvm_lapic
*apic
, int lvt_type
)
258 return kvm_lapic_get_reg(apic
, lvt_type
) & APIC_VECTOR_MASK
;
261 static inline int apic_lvtt_oneshot(struct kvm_lapic
*apic
)
263 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_ONESHOT
;
266 static inline int apic_lvtt_period(struct kvm_lapic
*apic
)
268 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_PERIODIC
;
271 static inline int apic_lvtt_tscdeadline(struct kvm_lapic
*apic
)
273 return apic
->lapic_timer
.timer_mode
== APIC_LVT_TIMER_TSCDEADLINE
;
276 static inline int apic_lvt_nmi_mode(u32 lvt_val
)
278 return (lvt_val
& (APIC_MODE_MASK
| APIC_LVT_MASKED
)) == APIC_DM_NMI
;
281 void kvm_apic_set_version(struct kvm_vcpu
*vcpu
)
283 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
284 struct kvm_cpuid_entry2
*feat
;
285 u32 v
= APIC_VERSION
;
287 if (!lapic_in_kernel(vcpu
))
290 feat
= kvm_find_cpuid_entry(apic
->vcpu
, 0x1, 0);
291 if (feat
&& (feat
->ecx
& (1 << (X86_FEATURE_X2APIC
& 31))))
292 v
|= APIC_LVR_DIRECTED_EOI
;
293 kvm_lapic_set_reg(apic
, APIC_LVR
, v
);
296 static const unsigned int apic_lvt_mask
[KVM_APIC_LVT_NUM
] = {
297 LVT_MASK
, /* part LVTT mask, timer mode mask added at runtime */
298 LVT_MASK
| APIC_MODE_MASK
, /* LVTTHMR */
299 LVT_MASK
| APIC_MODE_MASK
, /* LVTPC */
300 LINT_MASK
, LINT_MASK
, /* LVT0-1 */
301 LVT_MASK
/* LVTERR */
304 static int find_highest_vector(void *bitmap
)
309 for (vec
= MAX_APIC_VECTOR
- APIC_VECTORS_PER_REG
;
310 vec
>= 0; vec
-= APIC_VECTORS_PER_REG
) {
311 reg
= bitmap
+ REG_POS(vec
);
313 return fls(*reg
) - 1 + vec
;
319 static u8
count_vectors(void *bitmap
)
325 for (vec
= 0; vec
< MAX_APIC_VECTOR
; vec
+= APIC_VECTORS_PER_REG
) {
326 reg
= bitmap
+ REG_POS(vec
);
327 count
+= hweight32(*reg
);
333 void __kvm_apic_update_irr(u32
*pir
, void *regs
)
337 for (i
= 0; i
<= 7; i
++) {
338 pir_val
= xchg(&pir
[i
], 0);
340 *((u32
*)(regs
+ APIC_IRR
+ i
* 0x10)) |= pir_val
;
343 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr
);
345 void kvm_apic_update_irr(struct kvm_vcpu
*vcpu
, u32
*pir
)
347 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
349 __kvm_apic_update_irr(pir
, apic
->regs
);
351 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
353 EXPORT_SYMBOL_GPL(kvm_apic_update_irr
);
355 static inline int apic_search_irr(struct kvm_lapic
*apic
)
357 return find_highest_vector(apic
->regs
+ APIC_IRR
);
360 static inline int apic_find_highest_irr(struct kvm_lapic
*apic
)
365 * Note that irr_pending is just a hint. It will be always
366 * true with virtual interrupt delivery enabled.
368 if (!apic
->irr_pending
)
371 if (apic
->vcpu
->arch
.apicv_active
)
372 kvm_x86_ops
->sync_pir_to_irr(apic
->vcpu
);
373 result
= apic_search_irr(apic
);
374 ASSERT(result
== -1 || result
>= 16);
379 static inline void apic_clear_irr(int vec
, struct kvm_lapic
*apic
)
381 struct kvm_vcpu
*vcpu
;
385 if (unlikely(vcpu
->arch
.apicv_active
)) {
386 /* try to update RVI */
387 apic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
388 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
390 apic
->irr_pending
= false;
391 apic_clear_vector(vec
, apic
->regs
+ APIC_IRR
);
392 if (apic_search_irr(apic
) != -1)
393 apic
->irr_pending
= true;
397 static inline void apic_set_isr(int vec
, struct kvm_lapic
*apic
)
399 struct kvm_vcpu
*vcpu
;
401 if (__apic_test_and_set_vector(vec
, apic
->regs
+ APIC_ISR
))
407 * With APIC virtualization enabled, all caching is disabled
408 * because the processor can modify ISR under the hood. Instead
411 if (unlikely(vcpu
->arch
.apicv_active
))
412 kvm_x86_ops
->hwapic_isr_update(vcpu
, vec
);
415 BUG_ON(apic
->isr_count
> MAX_APIC_VECTOR
);
417 * ISR (in service register) bit is set when injecting an interrupt.
418 * The highest vector is injected. Thus the latest bit set matches
419 * the highest bit in ISR.
421 apic
->highest_isr_cache
= vec
;
425 static inline int apic_find_highest_isr(struct kvm_lapic
*apic
)
430 * Note that isr_count is always 1, and highest_isr_cache
431 * is always -1, with APIC virtualization enabled.
433 if (!apic
->isr_count
)
435 if (likely(apic
->highest_isr_cache
!= -1))
436 return apic
->highest_isr_cache
;
438 result
= find_highest_vector(apic
->regs
+ APIC_ISR
);
439 ASSERT(result
== -1 || result
>= 16);
444 static inline void apic_clear_isr(int vec
, struct kvm_lapic
*apic
)
446 struct kvm_vcpu
*vcpu
;
447 if (!__apic_test_and_clear_vector(vec
, apic
->regs
+ APIC_ISR
))
453 * We do get here for APIC virtualization enabled if the guest
454 * uses the Hyper-V APIC enlightenment. In this case we may need
455 * to trigger a new interrupt delivery by writing the SVI field;
456 * on the other hand isr_count and highest_isr_cache are unused
457 * and must be left alone.
459 if (unlikely(vcpu
->arch
.apicv_active
))
460 kvm_x86_ops
->hwapic_isr_update(vcpu
,
461 apic_find_highest_isr(apic
));
464 BUG_ON(apic
->isr_count
< 0);
465 apic
->highest_isr_cache
= -1;
469 int kvm_lapic_find_highest_irr(struct kvm_vcpu
*vcpu
)
471 /* This may race with setting of irr in __apic_accept_irq() and
472 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
473 * will cause vmexit immediately and the value will be recalculated
474 * on the next vmentry.
476 return apic_find_highest_irr(vcpu
->arch
.apic
);
479 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
480 int vector
, int level
, int trig_mode
,
481 struct dest_map
*dest_map
);
483 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
,
484 struct dest_map
*dest_map
)
486 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
488 return __apic_accept_irq(apic
, irq
->delivery_mode
, irq
->vector
,
489 irq
->level
, irq
->trig_mode
, dest_map
);
492 static int pv_eoi_put_user(struct kvm_vcpu
*vcpu
, u8 val
)
495 return kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, &val
,
499 static int pv_eoi_get_user(struct kvm_vcpu
*vcpu
, u8
*val
)
502 return kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
, val
,
506 static inline bool pv_eoi_enabled(struct kvm_vcpu
*vcpu
)
508 return vcpu
->arch
.pv_eoi
.msr_val
& KVM_MSR_ENABLED
;
511 static bool pv_eoi_get_pending(struct kvm_vcpu
*vcpu
)
514 if (pv_eoi_get_user(vcpu
, &val
) < 0)
515 apic_debug("Can't read EOI MSR value: 0x%llx\n",
516 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
520 static void pv_eoi_set_pending(struct kvm_vcpu
*vcpu
)
522 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_ENABLED
) < 0) {
523 apic_debug("Can't set EOI MSR value: 0x%llx\n",
524 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
527 __set_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
530 static void pv_eoi_clr_pending(struct kvm_vcpu
*vcpu
)
532 if (pv_eoi_put_user(vcpu
, KVM_PV_EOI_DISABLED
) < 0) {
533 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
534 (unsigned long long)vcpu
->arch
.pv_eoi
.msr_val
);
537 __clear_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
);
540 static void apic_update_ppr(struct kvm_lapic
*apic
)
542 u32 tpr
, isrv
, ppr
, old_ppr
;
545 old_ppr
= kvm_lapic_get_reg(apic
, APIC_PROCPRI
);
546 tpr
= kvm_lapic_get_reg(apic
, APIC_TASKPRI
);
547 isr
= apic_find_highest_isr(apic
);
548 isrv
= (isr
!= -1) ? isr
: 0;
550 if ((tpr
& 0xf0) >= (isrv
& 0xf0))
555 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
556 apic
, ppr
, isr
, isrv
);
558 if (old_ppr
!= ppr
) {
559 kvm_lapic_set_reg(apic
, APIC_PROCPRI
, ppr
);
561 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
565 static void apic_set_tpr(struct kvm_lapic
*apic
, u32 tpr
)
567 kvm_lapic_set_reg(apic
, APIC_TASKPRI
, tpr
);
568 apic_update_ppr(apic
);
571 static bool kvm_apic_broadcast(struct kvm_lapic
*apic
, u32 mda
)
573 if (apic_x2apic_mode(apic
))
574 return mda
== X2APIC_BROADCAST
;
576 return GET_APIC_DEST_FIELD(mda
) == APIC_BROADCAST
;
579 static bool kvm_apic_match_physical_addr(struct kvm_lapic
*apic
, u32 mda
)
581 if (kvm_apic_broadcast(apic
, mda
))
584 if (apic_x2apic_mode(apic
))
585 return mda
== kvm_apic_id(apic
);
587 return mda
== SET_APIC_DEST_FIELD(kvm_apic_id(apic
));
590 static bool kvm_apic_match_logical_addr(struct kvm_lapic
*apic
, u32 mda
)
594 if (kvm_apic_broadcast(apic
, mda
))
597 logical_id
= kvm_lapic_get_reg(apic
, APIC_LDR
);
599 if (apic_x2apic_mode(apic
))
600 return ((logical_id
>> 16) == (mda
>> 16))
601 && (logical_id
& mda
& 0xffff) != 0;
603 logical_id
= GET_APIC_LOGICAL_ID(logical_id
);
604 mda
= GET_APIC_DEST_FIELD(mda
);
606 switch (kvm_lapic_get_reg(apic
, APIC_DFR
)) {
608 return (logical_id
& mda
) != 0;
609 case APIC_DFR_CLUSTER
:
610 return ((logical_id
>> 4) == (mda
>> 4))
611 && (logical_id
& mda
& 0xf) != 0;
613 apic_debug("Bad DFR vcpu %d: %08x\n",
614 apic
->vcpu
->vcpu_id
, kvm_lapic_get_reg(apic
, APIC_DFR
));
619 /* KVM APIC implementation has two quirks
620 * - dest always begins at 0 while xAPIC MDA has offset 24,
621 * - IOxAPIC messages have to be delivered (directly) to x2APIC.
623 static u32
kvm_apic_mda(unsigned int dest_id
, struct kvm_lapic
*source
,
624 struct kvm_lapic
*target
)
626 bool ipi
= source
!= NULL
;
627 bool x2apic_mda
= apic_x2apic_mode(ipi
? source
: target
);
629 if (!ipi
&& dest_id
== APIC_BROADCAST
&& x2apic_mda
)
630 return X2APIC_BROADCAST
;
632 return x2apic_mda
? dest_id
: SET_APIC_DEST_FIELD(dest_id
);
635 bool kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
636 int short_hand
, unsigned int dest
, int dest_mode
)
638 struct kvm_lapic
*target
= vcpu
->arch
.apic
;
639 u32 mda
= kvm_apic_mda(dest
, source
, target
);
641 apic_debug("target %p, source %p, dest 0x%x, "
642 "dest_mode 0x%x, short_hand 0x%x\n",
643 target
, source
, dest
, dest_mode
, short_hand
);
646 switch (short_hand
) {
647 case APIC_DEST_NOSHORT
:
648 if (dest_mode
== APIC_DEST_PHYSICAL
)
649 return kvm_apic_match_physical_addr(target
, mda
);
651 return kvm_apic_match_logical_addr(target
, mda
);
653 return target
== source
;
654 case APIC_DEST_ALLINC
:
656 case APIC_DEST_ALLBUT
:
657 return target
!= source
;
659 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
664 EXPORT_SYMBOL_GPL(kvm_apic_match_dest
);
666 int kvm_vector_to_index(u32 vector
, u32 dest_vcpus
,
667 const unsigned long *bitmap
, u32 bitmap_size
)
672 mod
= vector
% dest_vcpus
;
674 for (i
= 0; i
<= mod
; i
++) {
675 idx
= find_next_bit(bitmap
, bitmap_size
, idx
+ 1);
676 BUG_ON(idx
== bitmap_size
);
682 static void kvm_apic_disabled_lapic_found(struct kvm
*kvm
)
684 if (!kvm
->arch
.disabled_lapic_found
) {
685 kvm
->arch
.disabled_lapic_found
= true;
687 "Disabled LAPIC found during irq injection\n");
691 /* Return true if the interrupt can be handled by using *bitmap as index mask
692 * for valid destinations in *dst array.
693 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
694 * Note: we may have zero kvm_lapic destinations when we return true, which
695 * means that the interrupt should be dropped. In this case, *bitmap would be
696 * zero and *dst undefined.
698 static inline bool kvm_apic_map_get_dest_lapic(struct kvm
*kvm
,
699 struct kvm_lapic
**src
, struct kvm_lapic_irq
*irq
,
700 struct kvm_apic_map
*map
, struct kvm_lapic
***dst
,
701 unsigned long *bitmap
)
706 if (irq
->shorthand
== APIC_DEST_SELF
&& src
) {
710 } else if (irq
->shorthand
)
713 x2apic_ipi
= src
&& *src
&& apic_x2apic_mode(*src
);
714 if (irq
->dest_id
== (x2apic_ipi
? X2APIC_BROADCAST
: APIC_BROADCAST
))
720 if (irq
->dest_mode
== APIC_DEST_PHYSICAL
) {
721 if (irq
->dest_id
> map
->max_apic_id
) {
724 *dst
= &map
->phys_map
[irq
->dest_id
];
731 if (!kvm_apic_map_get_logical_dest(map
, irq
->dest_id
, dst
,
735 if (!kvm_lowest_prio_delivery(irq
))
738 if (!kvm_vector_hashing_enabled()) {
740 for_each_set_bit(i
, bitmap
, 16) {
745 else if (kvm_apic_compare_prio((*dst
)[i
]->vcpu
,
746 (*dst
)[lowest
]->vcpu
) < 0)
753 lowest
= kvm_vector_to_index(irq
->vector
, hweight16(*bitmap
),
756 if (!(*dst
)[lowest
]) {
757 kvm_apic_disabled_lapic_found(kvm
);
763 *bitmap
= (lowest
>= 0) ? 1 << lowest
: 0;
768 bool kvm_irq_delivery_to_apic_fast(struct kvm
*kvm
, struct kvm_lapic
*src
,
769 struct kvm_lapic_irq
*irq
, int *r
, struct dest_map
*dest_map
)
771 struct kvm_apic_map
*map
;
772 unsigned long bitmap
;
773 struct kvm_lapic
**dst
= NULL
;
779 if (irq
->shorthand
== APIC_DEST_SELF
) {
780 *r
= kvm_apic_set_irq(src
->vcpu
, irq
, dest_map
);
785 map
= rcu_dereference(kvm
->arch
.apic_map
);
787 ret
= kvm_apic_map_get_dest_lapic(kvm
, &src
, irq
, map
, &dst
, &bitmap
);
789 for_each_set_bit(i
, &bitmap
, 16) {
794 *r
+= kvm_apic_set_irq(dst
[i
]->vcpu
, irq
, dest_map
);
802 * This routine tries to handler interrupts in posted mode, here is how
803 * it deals with different cases:
804 * - For single-destination interrupts, handle it in posted mode
805 * - Else if vector hashing is enabled and it is a lowest-priority
806 * interrupt, handle it in posted mode and use the following mechanism
807 * to find the destinaiton vCPU.
808 * 1. For lowest-priority interrupts, store all the possible
809 * destination vCPUs in an array.
810 * 2. Use "guest vector % max number of destination vCPUs" to find
811 * the right destination vCPU in the array for the lowest-priority
813 * - Otherwise, use remapped mode to inject the interrupt.
815 bool kvm_intr_is_single_vcpu_fast(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
816 struct kvm_vcpu
**dest_vcpu
)
818 struct kvm_apic_map
*map
;
819 unsigned long bitmap
;
820 struct kvm_lapic
**dst
= NULL
;
827 map
= rcu_dereference(kvm
->arch
.apic_map
);
829 if (kvm_apic_map_get_dest_lapic(kvm
, NULL
, irq
, map
, &dst
, &bitmap
) &&
830 hweight16(bitmap
) == 1) {
831 unsigned long i
= find_first_bit(&bitmap
, 16);
834 *dest_vcpu
= dst
[i
]->vcpu
;
844 * Add a pending IRQ into lapic.
845 * Return 1 if successfully added and 0 if discarded.
847 static int __apic_accept_irq(struct kvm_lapic
*apic
, int delivery_mode
,
848 int vector
, int level
, int trig_mode
,
849 struct dest_map
*dest_map
)
852 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
854 trace_kvm_apic_accept_irq(vcpu
->vcpu_id
, delivery_mode
,
856 switch (delivery_mode
) {
858 vcpu
->arch
.apic_arb_prio
++;
860 if (unlikely(trig_mode
&& !level
))
863 /* FIXME add logic for vcpu on reset */
864 if (unlikely(!apic_enabled(apic
)))
870 __set_bit(vcpu
->vcpu_id
, dest_map
->map
);
871 dest_map
->vectors
[vcpu
->vcpu_id
] = vector
;
874 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
) != !!trig_mode
) {
876 kvm_lapic_set_vector(vector
, apic
->regs
+ APIC_TMR
);
878 apic_clear_vector(vector
, apic
->regs
+ APIC_TMR
);
881 if (vcpu
->arch
.apicv_active
)
882 kvm_x86_ops
->deliver_posted_interrupt(vcpu
, vector
);
884 kvm_lapic_set_irr(vector
, apic
);
886 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
893 vcpu
->arch
.pv
.pv_unhalted
= 1;
894 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
900 kvm_make_request(KVM_REQ_SMI
, vcpu
);
906 kvm_inject_nmi(vcpu
);
911 if (!trig_mode
|| level
) {
913 /* assumes that there are only KVM_APIC_INIT/SIPI */
914 apic
->pending_events
= (1UL << KVM_APIC_INIT
);
915 /* make sure pending_events is visible before sending
918 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
921 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
926 case APIC_DM_STARTUP
:
927 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
928 vcpu
->vcpu_id
, vector
);
930 apic
->sipi_vector
= vector
;
931 /* make sure sipi_vector is visible for the receiver */
933 set_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
934 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
940 * Should only be called by kvm_apic_local_deliver() with LVT0,
941 * before NMI watchdog was enabled. Already handled by
942 * kvm_apic_accept_pic_intr().
947 printk(KERN_ERR
"TODO: unsupported delivery mode %x\n",
954 int kvm_apic_compare_prio(struct kvm_vcpu
*vcpu1
, struct kvm_vcpu
*vcpu2
)
956 return vcpu1
->arch
.apic_arb_prio
- vcpu2
->arch
.apic_arb_prio
;
959 static bool kvm_ioapic_handles_vector(struct kvm_lapic
*apic
, int vector
)
961 return test_bit(vector
, apic
->vcpu
->arch
.ioapic_handled_vectors
);
964 static void kvm_ioapic_send_eoi(struct kvm_lapic
*apic
, int vector
)
968 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
969 if (!kvm_ioapic_handles_vector(apic
, vector
))
972 /* Request a KVM exit to inform the userspace IOAPIC. */
973 if (irqchip_split(apic
->vcpu
->kvm
)) {
974 apic
->vcpu
->arch
.pending_ioapic_eoi
= vector
;
975 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT
, apic
->vcpu
);
979 if (apic_test_vector(vector
, apic
->regs
+ APIC_TMR
))
980 trigger_mode
= IOAPIC_LEVEL_TRIG
;
982 trigger_mode
= IOAPIC_EDGE_TRIG
;
984 kvm_ioapic_update_eoi(apic
->vcpu
, vector
, trigger_mode
);
987 static int apic_set_eoi(struct kvm_lapic
*apic
)
989 int vector
= apic_find_highest_isr(apic
);
991 trace_kvm_eoi(apic
, vector
);
994 * Not every write EOI will has corresponding ISR,
995 * one example is when Kernel check timer on setup_IO_APIC
1000 apic_clear_isr(vector
, apic
);
1001 apic_update_ppr(apic
);
1003 if (test_bit(vector
, vcpu_to_synic(apic
->vcpu
)->vec_bitmap
))
1004 kvm_hv_synic_send_eoi(apic
->vcpu
, vector
);
1006 kvm_ioapic_send_eoi(apic
, vector
);
1007 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
1012 * this interface assumes a trap-like exit, which has already finished
1013 * desired side effect including vISR and vPPR update.
1015 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu
*vcpu
, int vector
)
1017 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1019 trace_kvm_eoi(apic
, vector
);
1021 kvm_ioapic_send_eoi(apic
, vector
);
1022 kvm_make_request(KVM_REQ_EVENT
, apic
->vcpu
);
1024 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated
);
1026 static void apic_send_ipi(struct kvm_lapic
*apic
)
1028 u32 icr_low
= kvm_lapic_get_reg(apic
, APIC_ICR
);
1029 u32 icr_high
= kvm_lapic_get_reg(apic
, APIC_ICR2
);
1030 struct kvm_lapic_irq irq
;
1032 irq
.vector
= icr_low
& APIC_VECTOR_MASK
;
1033 irq
.delivery_mode
= icr_low
& APIC_MODE_MASK
;
1034 irq
.dest_mode
= icr_low
& APIC_DEST_MASK
;
1035 irq
.level
= (icr_low
& APIC_INT_ASSERT
) != 0;
1036 irq
.trig_mode
= icr_low
& APIC_INT_LEVELTRIG
;
1037 irq
.shorthand
= icr_low
& APIC_SHORT_MASK
;
1038 irq
.msi_redir_hint
= false;
1039 if (apic_x2apic_mode(apic
))
1040 irq
.dest_id
= icr_high
;
1042 irq
.dest_id
= GET_APIC_DEST_FIELD(icr_high
);
1044 trace_kvm_apic_ipi(icr_low
, irq
.dest_id
);
1046 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1047 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1048 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1049 "msi_redir_hint 0x%x\n",
1050 icr_high
, icr_low
, irq
.shorthand
, irq
.dest_id
,
1051 irq
.trig_mode
, irq
.level
, irq
.dest_mode
, irq
.delivery_mode
,
1052 irq
.vector
, irq
.msi_redir_hint
);
1054 kvm_irq_delivery_to_apic(apic
->vcpu
->kvm
, apic
, &irq
, NULL
);
1057 static u32
apic_get_tmcct(struct kvm_lapic
*apic
)
1063 ASSERT(apic
!= NULL
);
1065 /* if initial count is 0, current count should also be 0 */
1066 if (kvm_lapic_get_reg(apic
, APIC_TMICT
) == 0 ||
1067 apic
->lapic_timer
.period
== 0)
1070 remaining
= hrtimer_get_remaining(&apic
->lapic_timer
.timer
);
1071 if (ktime_to_ns(remaining
) < 0)
1072 remaining
= ktime_set(0, 0);
1074 ns
= mod_64(ktime_to_ns(remaining
), apic
->lapic_timer
.period
);
1075 tmcct
= div64_u64(ns
,
1076 (APIC_BUS_CYCLE_NS
* apic
->divide_count
));
1081 static void __report_tpr_access(struct kvm_lapic
*apic
, bool write
)
1083 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1084 struct kvm_run
*run
= vcpu
->run
;
1086 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS
, vcpu
);
1087 run
->tpr_access
.rip
= kvm_rip_read(vcpu
);
1088 run
->tpr_access
.is_write
= write
;
1091 static inline void report_tpr_access(struct kvm_lapic
*apic
, bool write
)
1093 if (apic
->vcpu
->arch
.tpr_access_reporting
)
1094 __report_tpr_access(apic
, write
);
1097 static u32
__apic_read(struct kvm_lapic
*apic
, unsigned int offset
)
1101 if (offset
>= LAPIC_MMIO_LENGTH
)
1106 apic_debug("Access APIC ARBPRI register which is for P6\n");
1109 case APIC_TMCCT
: /* Timer CCR */
1110 if (apic_lvtt_tscdeadline(apic
))
1113 val
= apic_get_tmcct(apic
);
1116 apic_update_ppr(apic
);
1117 val
= kvm_lapic_get_reg(apic
, offset
);
1120 report_tpr_access(apic
, false);
1123 val
= kvm_lapic_get_reg(apic
, offset
);
1130 static inline struct kvm_lapic
*to_lapic(struct kvm_io_device
*dev
)
1132 return container_of(dev
, struct kvm_lapic
, dev
);
1135 int kvm_lapic_reg_read(struct kvm_lapic
*apic
, u32 offset
, int len
,
1138 unsigned char alignment
= offset
& 0xf;
1140 /* this bitmask has a bit cleared for each reserved register */
1141 static const u64 rmask
= 0x43ff01ffffffe70cULL
;
1143 if ((alignment
+ len
) > 4) {
1144 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1149 if (offset
> 0x3f0 || !(rmask
& (1ULL << (offset
>> 4)))) {
1150 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1155 result
= __apic_read(apic
, offset
& ~0xf);
1157 trace_kvm_apic_read(offset
, result
);
1163 memcpy(data
, (char *)&result
+ alignment
, len
);
1166 printk(KERN_ERR
"Local APIC read with len = %x, "
1167 "should be 1,2, or 4 instead\n", len
);
1172 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read
);
1174 static int apic_mmio_in_range(struct kvm_lapic
*apic
, gpa_t addr
)
1176 return kvm_apic_hw_enabled(apic
) &&
1177 addr
>= apic
->base_address
&&
1178 addr
< apic
->base_address
+ LAPIC_MMIO_LENGTH
;
1181 static int apic_mmio_read(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this,
1182 gpa_t address
, int len
, void *data
)
1184 struct kvm_lapic
*apic
= to_lapic(this);
1185 u32 offset
= address
- apic
->base_address
;
1187 if (!apic_mmio_in_range(apic
, address
))
1190 kvm_lapic_reg_read(apic
, offset
, len
, data
);
1195 static void update_divide_count(struct kvm_lapic
*apic
)
1197 u32 tmp1
, tmp2
, tdcr
;
1199 tdcr
= kvm_lapic_get_reg(apic
, APIC_TDCR
);
1201 tmp2
= ((tmp1
& 0x3) | ((tmp1
& 0x8) >> 1)) + 1;
1202 apic
->divide_count
= 0x1 << (tmp2
& 0x7);
1204 apic_debug("timer divide count is 0x%x\n",
1205 apic
->divide_count
);
1208 static void apic_update_lvtt(struct kvm_lapic
*apic
)
1210 u32 timer_mode
= kvm_lapic_get_reg(apic
, APIC_LVTT
) &
1211 apic
->lapic_timer
.timer_mode_mask
;
1213 if (apic
->lapic_timer
.timer_mode
!= timer_mode
) {
1214 apic
->lapic_timer
.timer_mode
= timer_mode
;
1215 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1219 static void apic_timer_expired(struct kvm_lapic
*apic
)
1221 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1222 struct swait_queue_head
*q
= &vcpu
->wq
;
1223 struct kvm_timer
*ktimer
= &apic
->lapic_timer
;
1225 if (atomic_read(&apic
->lapic_timer
.pending
))
1228 atomic_inc(&apic
->lapic_timer
.pending
);
1229 kvm_set_pending_timer(vcpu
);
1231 if (swait_active(q
))
1234 if (apic_lvtt_tscdeadline(apic
))
1235 ktimer
->expired_tscdeadline
= ktimer
->tscdeadline
;
1239 * On APICv, this test will cause a busy wait
1240 * during a higher-priority task.
1243 static bool lapic_timer_int_injected(struct kvm_vcpu
*vcpu
)
1245 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1246 u32 reg
= kvm_lapic_get_reg(apic
, APIC_LVTT
);
1248 if (kvm_apic_hw_enabled(apic
)) {
1249 int vec
= reg
& APIC_VECTOR_MASK
;
1250 void *bitmap
= apic
->regs
+ APIC_ISR
;
1252 if (vcpu
->arch
.apicv_active
)
1253 bitmap
= apic
->regs
+ APIC_IRR
;
1255 if (apic_test_vector(vec
, bitmap
))
1261 void wait_lapic_expire(struct kvm_vcpu
*vcpu
)
1263 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1264 u64 guest_tsc
, tsc_deadline
;
1266 if (!lapic_in_kernel(vcpu
))
1269 if (apic
->lapic_timer
.expired_tscdeadline
== 0)
1272 if (!lapic_timer_int_injected(vcpu
))
1275 tsc_deadline
= apic
->lapic_timer
.expired_tscdeadline
;
1276 apic
->lapic_timer
.expired_tscdeadline
= 0;
1277 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1278 trace_kvm_wait_lapic_expire(vcpu
->vcpu_id
, guest_tsc
- tsc_deadline
);
1280 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1281 if (guest_tsc
< tsc_deadline
)
1282 __delay(tsc_deadline
- guest_tsc
);
1285 static void start_sw_tscdeadline(struct kvm_lapic
*apic
)
1287 u64 guest_tsc
, tscdeadline
= apic
->lapic_timer
.tscdeadline
;
1290 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
1291 unsigned long this_tsc_khz
= vcpu
->arch
.virtual_tsc_khz
;
1292 unsigned long flags
;
1295 if (unlikely(!tscdeadline
|| !this_tsc_khz
))
1298 local_irq_save(flags
);
1300 now
= apic
->lapic_timer
.timer
.base
->get_time();
1301 guest_tsc
= kvm_read_l1_tsc(vcpu
, rdtsc());
1302 if (likely(tscdeadline
> guest_tsc
)) {
1303 ns
= (tscdeadline
- guest_tsc
) * 1000000ULL;
1304 do_div(ns
, this_tsc_khz
);
1305 expire
= ktime_add_ns(now
, ns
);
1306 expire
= ktime_sub_ns(expire
, lapic_timer_advance_ns
);
1307 hrtimer_start(&apic
->lapic_timer
.timer
,
1308 expire
, HRTIMER_MODE_ABS_PINNED
);
1310 apic_timer_expired(apic
);
1312 local_irq_restore(flags
);
1315 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu
*vcpu
)
1317 return vcpu
->arch
.apic
->lapic_timer
.hv_timer_in_use
;
1319 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use
);
1321 static void cancel_hv_tscdeadline(struct kvm_lapic
*apic
)
1323 kvm_x86_ops
->cancel_hv_timer(apic
->vcpu
);
1324 apic
->lapic_timer
.hv_timer_in_use
= false;
1327 void kvm_lapic_expired_hv_timer(struct kvm_vcpu
*vcpu
)
1329 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1331 WARN_ON(!apic
->lapic_timer
.hv_timer_in_use
);
1332 WARN_ON(swait_active(&vcpu
->wq
));
1333 cancel_hv_tscdeadline(apic
);
1334 apic_timer_expired(apic
);
1336 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer
);
1338 static bool start_hv_tscdeadline(struct kvm_lapic
*apic
)
1340 u64 tscdeadline
= apic
->lapic_timer
.tscdeadline
;
1342 if (atomic_read(&apic
->lapic_timer
.pending
) ||
1343 kvm_x86_ops
->set_hv_timer(apic
->vcpu
, tscdeadline
)) {
1344 if (apic
->lapic_timer
.hv_timer_in_use
)
1345 cancel_hv_tscdeadline(apic
);
1347 apic
->lapic_timer
.hv_timer_in_use
= true;
1348 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1350 /* In case the sw timer triggered in the window */
1351 if (atomic_read(&apic
->lapic_timer
.pending
))
1352 cancel_hv_tscdeadline(apic
);
1354 trace_kvm_hv_timer_state(apic
->vcpu
->vcpu_id
,
1355 apic
->lapic_timer
.hv_timer_in_use
);
1356 return apic
->lapic_timer
.hv_timer_in_use
;
1359 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu
*vcpu
)
1361 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1363 WARN_ON(apic
->lapic_timer
.hv_timer_in_use
);
1365 if (apic_lvtt_tscdeadline(apic
))
1366 start_hv_tscdeadline(apic
);
1368 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer
);
1370 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu
*vcpu
)
1372 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1374 /* Possibly the TSC deadline timer is not enabled yet */
1375 if (!apic
->lapic_timer
.hv_timer_in_use
)
1378 cancel_hv_tscdeadline(apic
);
1380 if (atomic_read(&apic
->lapic_timer
.pending
))
1383 start_sw_tscdeadline(apic
);
1385 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer
);
1387 static void start_apic_timer(struct kvm_lapic
*apic
)
1391 atomic_set(&apic
->lapic_timer
.pending
, 0);
1393 if (apic_lvtt_period(apic
) || apic_lvtt_oneshot(apic
)) {
1394 /* lapic timer in oneshot or periodic mode */
1395 now
= apic
->lapic_timer
.timer
.base
->get_time();
1396 apic
->lapic_timer
.period
= (u64
)kvm_lapic_get_reg(apic
, APIC_TMICT
)
1397 * APIC_BUS_CYCLE_NS
* apic
->divide_count
;
1399 if (!apic
->lapic_timer
.period
)
1402 * Do not allow the guest to program periodic timers with small
1403 * interval, since the hrtimers are not throttled by the host
1406 if (apic_lvtt_period(apic
)) {
1407 s64 min_period
= min_timer_period_us
* 1000LL;
1409 if (apic
->lapic_timer
.period
< min_period
) {
1410 pr_info_ratelimited(
1411 "kvm: vcpu %i: requested %lld ns "
1412 "lapic timer period limited to %lld ns\n",
1413 apic
->vcpu
->vcpu_id
,
1414 apic
->lapic_timer
.period
, min_period
);
1415 apic
->lapic_timer
.period
= min_period
;
1419 hrtimer_start(&apic
->lapic_timer
.timer
,
1420 ktime_add_ns(now
, apic
->lapic_timer
.period
),
1421 HRTIMER_MODE_ABS_PINNED
);
1423 apic_debug("%s: bus cycle is %" PRId64
"ns, now 0x%016"
1425 "timer initial count 0x%x, period %lldns, "
1426 "expire @ 0x%016" PRIx64
".\n", __func__
,
1427 APIC_BUS_CYCLE_NS
, ktime_to_ns(now
),
1428 kvm_lapic_get_reg(apic
, APIC_TMICT
),
1429 apic
->lapic_timer
.period
,
1430 ktime_to_ns(ktime_add_ns(now
,
1431 apic
->lapic_timer
.period
)));
1432 } else if (apic_lvtt_tscdeadline(apic
)) {
1433 if (!(kvm_x86_ops
->set_hv_timer
&& start_hv_tscdeadline(apic
)))
1434 start_sw_tscdeadline(apic
);
1438 static void apic_manage_nmi_watchdog(struct kvm_lapic
*apic
, u32 lvt0_val
)
1440 bool lvt0_in_nmi_mode
= apic_lvt_nmi_mode(lvt0_val
);
1442 if (apic
->lvt0_in_nmi_mode
!= lvt0_in_nmi_mode
) {
1443 apic
->lvt0_in_nmi_mode
= lvt0_in_nmi_mode
;
1444 if (lvt0_in_nmi_mode
) {
1445 apic_debug("Receive NMI setting on APIC_LVT0 "
1446 "for cpu %d\n", apic
->vcpu
->vcpu_id
);
1447 atomic_inc(&apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
);
1449 atomic_dec(&apic
->vcpu
->kvm
->arch
.vapics_in_nmi_mode
);
1453 int kvm_lapic_reg_write(struct kvm_lapic
*apic
, u32 reg
, u32 val
)
1457 trace_kvm_apic_write(reg
, val
);
1460 case APIC_ID
: /* Local APIC ID */
1461 if (!apic_x2apic_mode(apic
))
1462 kvm_apic_set_xapic_id(apic
, val
>> 24);
1468 report_tpr_access(apic
, true);
1469 apic_set_tpr(apic
, val
& 0xff);
1477 if (!apic_x2apic_mode(apic
))
1478 kvm_apic_set_ldr(apic
, val
& APIC_LDR_MASK
);
1484 if (!apic_x2apic_mode(apic
)) {
1485 kvm_lapic_set_reg(apic
, APIC_DFR
, val
| 0x0FFFFFFF);
1486 recalculate_apic_map(apic
->vcpu
->kvm
);
1493 if (kvm_lapic_get_reg(apic
, APIC_LVR
) & APIC_LVR_DIRECTED_EOI
)
1494 mask
|= APIC_SPIV_DIRECTED_EOI
;
1495 apic_set_spiv(apic
, val
& mask
);
1496 if (!(val
& APIC_SPIV_APIC_ENABLED
)) {
1500 for (i
= 0; i
< KVM_APIC_LVT_NUM
; i
++) {
1501 lvt_val
= kvm_lapic_get_reg(apic
,
1502 APIC_LVTT
+ 0x10 * i
);
1503 kvm_lapic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
,
1504 lvt_val
| APIC_LVT_MASKED
);
1506 apic_update_lvtt(apic
);
1507 atomic_set(&apic
->lapic_timer
.pending
, 0);
1513 /* No delay here, so we always clear the pending bit */
1514 kvm_lapic_set_reg(apic
, APIC_ICR
, val
& ~(1 << 12));
1515 apic_send_ipi(apic
);
1519 if (!apic_x2apic_mode(apic
))
1521 kvm_lapic_set_reg(apic
, APIC_ICR2
, val
);
1525 apic_manage_nmi_watchdog(apic
, val
);
1530 /* TODO: Check vector */
1531 if (!kvm_apic_sw_enabled(apic
))
1532 val
|= APIC_LVT_MASKED
;
1534 val
&= apic_lvt_mask
[(reg
- APIC_LVTT
) >> 4];
1535 kvm_lapic_set_reg(apic
, reg
, val
);
1540 if (!kvm_apic_sw_enabled(apic
))
1541 val
|= APIC_LVT_MASKED
;
1542 val
&= (apic_lvt_mask
[0] | apic
->lapic_timer
.timer_mode_mask
);
1543 kvm_lapic_set_reg(apic
, APIC_LVTT
, val
);
1544 apic_update_lvtt(apic
);
1548 if (apic_lvtt_tscdeadline(apic
))
1551 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1552 kvm_lapic_set_reg(apic
, APIC_TMICT
, val
);
1553 start_apic_timer(apic
);
1558 apic_debug("KVM_WRITE:TDCR %x\n", val
);
1559 kvm_lapic_set_reg(apic
, APIC_TDCR
, val
);
1560 update_divide_count(apic
);
1564 if (apic_x2apic_mode(apic
) && val
!= 0) {
1565 apic_debug("KVM_WRITE:ESR not zero %x\n", val
);
1571 if (apic_x2apic_mode(apic
)) {
1572 kvm_lapic_reg_write(apic
, APIC_ICR
, 0x40000 | (val
& 0xff));
1581 apic_debug("Local APIC Write to read-only register %x\n", reg
);
1584 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write
);
1586 static int apic_mmio_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this,
1587 gpa_t address
, int len
, const void *data
)
1589 struct kvm_lapic
*apic
= to_lapic(this);
1590 unsigned int offset
= address
- apic
->base_address
;
1593 if (!apic_mmio_in_range(apic
, address
))
1597 * APIC register must be aligned on 128-bits boundary.
1598 * 32/64/128 bits registers must be accessed thru 32 bits.
1601 if (len
!= 4 || (offset
& 0xf)) {
1602 /* Don't shout loud, $infamous_os would cause only noise. */
1603 apic_debug("apic write: bad size=%d %lx\n", len
, (long)address
);
1609 /* too common printing */
1610 if (offset
!= APIC_EOI
)
1611 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1612 "0x%x\n", __func__
, offset
, len
, val
);
1614 kvm_lapic_reg_write(apic
, offset
& 0xff0, val
);
1619 void kvm_lapic_set_eoi(struct kvm_vcpu
*vcpu
)
1621 kvm_lapic_reg_write(vcpu
->arch
.apic
, APIC_EOI
, 0);
1623 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi
);
1625 /* emulate APIC access in a trap manner */
1626 void kvm_apic_write_nodecode(struct kvm_vcpu
*vcpu
, u32 offset
)
1630 /* hw has done the conditional check and inst decode */
1633 kvm_lapic_reg_read(vcpu
->arch
.apic
, offset
, 4, &val
);
1635 /* TODO: optimize to just emulate side effect w/o one more write */
1636 kvm_lapic_reg_write(vcpu
->arch
.apic
, offset
, val
);
1638 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode
);
1640 void kvm_free_lapic(struct kvm_vcpu
*vcpu
)
1642 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1644 if (!vcpu
->arch
.apic
)
1647 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1649 if (!(vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
))
1650 static_key_slow_dec_deferred(&apic_hw_disabled
);
1652 if (!apic
->sw_enabled
)
1653 static_key_slow_dec_deferred(&apic_sw_disabled
);
1656 free_page((unsigned long)apic
->regs
);
1662 *----------------------------------------------------------------------
1664 *----------------------------------------------------------------------
1667 u64
kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
)
1669 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1671 if (!lapic_in_kernel(vcpu
) || apic_lvtt_oneshot(apic
) ||
1672 apic_lvtt_period(apic
))
1675 return apic
->lapic_timer
.tscdeadline
;
1678 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
, u64 data
)
1680 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1682 if (!lapic_in_kernel(vcpu
) || apic_lvtt_oneshot(apic
) ||
1683 apic_lvtt_period(apic
))
1686 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1687 apic
->lapic_timer
.tscdeadline
= data
;
1688 start_apic_timer(apic
);
1691 void kvm_lapic_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
1693 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1695 apic_set_tpr(apic
, ((cr8
& 0x0f) << 4)
1696 | (kvm_lapic_get_reg(apic
, APIC_TASKPRI
) & 4));
1699 u64
kvm_lapic_get_cr8(struct kvm_vcpu
*vcpu
)
1703 tpr
= (u64
) kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_TASKPRI
);
1705 return (tpr
& 0xf0) >> 4;
1708 void kvm_lapic_set_base(struct kvm_vcpu
*vcpu
, u64 value
)
1710 u64 old_value
= vcpu
->arch
.apic_base
;
1711 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1714 value
|= MSR_IA32_APICBASE_BSP
;
1715 vcpu
->arch
.apic_base
= value
;
1719 vcpu
->arch
.apic_base
= value
;
1721 /* update jump label if enable bit changes */
1722 if ((old_value
^ value
) & MSR_IA32_APICBASE_ENABLE
) {
1723 if (value
& MSR_IA32_APICBASE_ENABLE
) {
1724 kvm_apic_set_xapic_id(apic
, vcpu
->vcpu_id
);
1725 static_key_slow_dec_deferred(&apic_hw_disabled
);
1727 static_key_slow_inc(&apic_hw_disabled
.key
);
1728 recalculate_apic_map(vcpu
->kvm
);
1731 if ((old_value
^ value
) & X2APIC_ENABLE
) {
1732 if (value
& X2APIC_ENABLE
) {
1733 kvm_apic_set_x2apic_id(apic
, vcpu
->vcpu_id
);
1734 kvm_x86_ops
->set_virtual_x2apic_mode(vcpu
, true);
1736 kvm_x86_ops
->set_virtual_x2apic_mode(vcpu
, false);
1739 apic
->base_address
= apic
->vcpu
->arch
.apic_base
&
1740 MSR_IA32_APICBASE_BASE
;
1742 if ((value
& MSR_IA32_APICBASE_ENABLE
) &&
1743 apic
->base_address
!= APIC_DEFAULT_PHYS_BASE
)
1744 pr_warn_once("APIC base relocation is unsupported by KVM");
1746 /* with FSB delivery interrupt, we can restart APIC functionality */
1747 apic_debug("apic base msr is 0x%016" PRIx64
", and base address is "
1748 "0x%lx.\n", apic
->vcpu
->arch
.apic_base
, apic
->base_address
);
1752 void kvm_lapic_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
1754 struct kvm_lapic
*apic
;
1757 apic_debug("%s\n", __func__
);
1760 apic
= vcpu
->arch
.apic
;
1761 ASSERT(apic
!= NULL
);
1763 /* Stop the timer in case it's a reset to an active apic */
1764 hrtimer_cancel(&apic
->lapic_timer
.timer
);
1767 kvm_apic_set_xapic_id(apic
, vcpu
->vcpu_id
);
1768 kvm_apic_set_version(apic
->vcpu
);
1770 for (i
= 0; i
< KVM_APIC_LVT_NUM
; i
++)
1771 kvm_lapic_set_reg(apic
, APIC_LVTT
+ 0x10 * i
, APIC_LVT_MASKED
);
1772 apic_update_lvtt(apic
);
1773 if (kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_LINT0_REENABLED
))
1774 kvm_lapic_set_reg(apic
, APIC_LVT0
,
1775 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT
));
1776 apic_manage_nmi_watchdog(apic
, kvm_lapic_get_reg(apic
, APIC_LVT0
));
1778 kvm_lapic_set_reg(apic
, APIC_DFR
, 0xffffffffU
);
1779 apic_set_spiv(apic
, 0xff);
1780 kvm_lapic_set_reg(apic
, APIC_TASKPRI
, 0);
1781 if (!apic_x2apic_mode(apic
))
1782 kvm_apic_set_ldr(apic
, 0);
1783 kvm_lapic_set_reg(apic
, APIC_ESR
, 0);
1784 kvm_lapic_set_reg(apic
, APIC_ICR
, 0);
1785 kvm_lapic_set_reg(apic
, APIC_ICR2
, 0);
1786 kvm_lapic_set_reg(apic
, APIC_TDCR
, 0);
1787 kvm_lapic_set_reg(apic
, APIC_TMICT
, 0);
1788 for (i
= 0; i
< 8; i
++) {
1789 kvm_lapic_set_reg(apic
, APIC_IRR
+ 0x10 * i
, 0);
1790 kvm_lapic_set_reg(apic
, APIC_ISR
+ 0x10 * i
, 0);
1791 kvm_lapic_set_reg(apic
, APIC_TMR
+ 0x10 * i
, 0);
1793 apic
->irr_pending
= vcpu
->arch
.apicv_active
;
1794 apic
->isr_count
= vcpu
->arch
.apicv_active
? 1 : 0;
1795 apic
->highest_isr_cache
= -1;
1796 update_divide_count(apic
);
1797 atomic_set(&apic
->lapic_timer
.pending
, 0);
1798 if (kvm_vcpu_is_bsp(vcpu
))
1799 kvm_lapic_set_base(vcpu
,
1800 vcpu
->arch
.apic_base
| MSR_IA32_APICBASE_BSP
);
1801 vcpu
->arch
.pv_eoi
.msr_val
= 0;
1802 apic_update_ppr(apic
);
1804 vcpu
->arch
.apic_arb_prio
= 0;
1805 vcpu
->arch
.apic_attention
= 0;
1807 apic_debug("%s: vcpu=%p, id=%d, base_msr="
1808 "0x%016" PRIx64
", base_address=0x%0lx.\n", __func__
,
1809 vcpu
, kvm_apic_id(apic
),
1810 vcpu
->arch
.apic_base
, apic
->base_address
);
1814 *----------------------------------------------------------------------
1816 *----------------------------------------------------------------------
1819 static bool lapic_is_periodic(struct kvm_lapic
*apic
)
1821 return apic_lvtt_period(apic
);
1824 int apic_has_pending_timer(struct kvm_vcpu
*vcpu
)
1826 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1828 if (apic_enabled(apic
) && apic_lvt_enabled(apic
, APIC_LVTT
))
1829 return atomic_read(&apic
->lapic_timer
.pending
);
1834 int kvm_apic_local_deliver(struct kvm_lapic
*apic
, int lvt_type
)
1836 u32 reg
= kvm_lapic_get_reg(apic
, lvt_type
);
1837 int vector
, mode
, trig_mode
;
1839 if (kvm_apic_hw_enabled(apic
) && !(reg
& APIC_LVT_MASKED
)) {
1840 vector
= reg
& APIC_VECTOR_MASK
;
1841 mode
= reg
& APIC_MODE_MASK
;
1842 trig_mode
= reg
& APIC_LVT_LEVEL_TRIGGER
;
1843 return __apic_accept_irq(apic
, mode
, vector
, 1, trig_mode
,
1849 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu
*vcpu
)
1851 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1854 kvm_apic_local_deliver(apic
, APIC_LVT0
);
1857 static const struct kvm_io_device_ops apic_mmio_ops
= {
1858 .read
= apic_mmio_read
,
1859 .write
= apic_mmio_write
,
1862 static enum hrtimer_restart
apic_timer_fn(struct hrtimer
*data
)
1864 struct kvm_timer
*ktimer
= container_of(data
, struct kvm_timer
, timer
);
1865 struct kvm_lapic
*apic
= container_of(ktimer
, struct kvm_lapic
, lapic_timer
);
1867 apic_timer_expired(apic
);
1869 if (lapic_is_periodic(apic
)) {
1870 hrtimer_add_expires_ns(&ktimer
->timer
, ktimer
->period
);
1871 return HRTIMER_RESTART
;
1873 return HRTIMER_NORESTART
;
1876 int kvm_create_lapic(struct kvm_vcpu
*vcpu
)
1878 struct kvm_lapic
*apic
;
1880 ASSERT(vcpu
!= NULL
);
1881 apic_debug("apic_init %d\n", vcpu
->vcpu_id
);
1883 apic
= kzalloc(sizeof(*apic
), GFP_KERNEL
);
1887 vcpu
->arch
.apic
= apic
;
1889 apic
->regs
= (void *)get_zeroed_page(GFP_KERNEL
);
1891 printk(KERN_ERR
"malloc apic regs error for vcpu %x\n",
1893 goto nomem_free_apic
;
1897 hrtimer_init(&apic
->lapic_timer
.timer
, CLOCK_MONOTONIC
,
1898 HRTIMER_MODE_ABS_PINNED
);
1899 apic
->lapic_timer
.timer
.function
= apic_timer_fn
;
1902 * APIC is created enabled. This will prevent kvm_lapic_set_base from
1903 * thinking that APIC satet has changed.
1905 vcpu
->arch
.apic_base
= MSR_IA32_APICBASE_ENABLE
;
1906 kvm_lapic_set_base(vcpu
,
1907 APIC_DEFAULT_PHYS_BASE
| MSR_IA32_APICBASE_ENABLE
);
1909 static_key_slow_inc(&apic_sw_disabled
.key
); /* sw disabled at reset */
1910 kvm_lapic_reset(vcpu
, false);
1911 kvm_iodevice_init(&apic
->dev
, &apic_mmio_ops
);
1920 int kvm_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
1922 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1925 if (!apic_enabled(apic
))
1928 apic_update_ppr(apic
);
1929 highest_irr
= apic_find_highest_irr(apic
);
1930 if ((highest_irr
== -1) ||
1931 ((highest_irr
& 0xF0) <= kvm_lapic_get_reg(apic
, APIC_PROCPRI
)))
1936 int kvm_apic_accept_pic_intr(struct kvm_vcpu
*vcpu
)
1938 u32 lvt0
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_LVT0
);
1941 if (!kvm_apic_hw_enabled(vcpu
->arch
.apic
))
1943 if ((lvt0
& APIC_LVT_MASKED
) == 0 &&
1944 GET_APIC_DELIVERY_MODE(lvt0
) == APIC_MODE_EXTINT
)
1949 void kvm_inject_apic_timer_irqs(struct kvm_vcpu
*vcpu
)
1951 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1953 if (atomic_read(&apic
->lapic_timer
.pending
) > 0) {
1954 kvm_apic_local_deliver(apic
, APIC_LVTT
);
1955 if (apic_lvtt_tscdeadline(apic
))
1956 apic
->lapic_timer
.tscdeadline
= 0;
1957 atomic_set(&apic
->lapic_timer
.pending
, 0);
1961 int kvm_get_apic_interrupt(struct kvm_vcpu
*vcpu
)
1963 int vector
= kvm_apic_has_interrupt(vcpu
);
1964 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
1970 * We get here even with APIC virtualization enabled, if doing
1971 * nested virtualization and L1 runs with the "acknowledge interrupt
1972 * on exit" mode. Then we cannot inject the interrupt via RVI,
1973 * because the process would deliver it through the IDT.
1976 apic_set_isr(vector
, apic
);
1977 apic_update_ppr(apic
);
1978 apic_clear_irr(vector
, apic
);
1980 if (test_bit(vector
, vcpu_to_synic(vcpu
)->auto_eoi_bitmap
)) {
1981 apic_clear_isr(vector
, apic
);
1982 apic_update_ppr(apic
);
1988 static int kvm_apic_state_fixup(struct kvm_vcpu
*vcpu
,
1989 struct kvm_lapic_state
*s
, bool set
)
1991 if (apic_x2apic_mode(vcpu
->arch
.apic
)) {
1992 u32
*id
= (u32
*)(s
->regs
+ APIC_ID
);
2003 int kvm_apic_get_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
)
2005 memcpy(s
->regs
, vcpu
->arch
.apic
->regs
, sizeof(*s
));
2006 return kvm_apic_state_fixup(vcpu
, s
, false);
2009 int kvm_apic_set_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
)
2011 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2015 kvm_lapic_set_base(vcpu
, vcpu
->arch
.apic_base
);
2016 /* set SPIV separately to get count of SW disabled APICs right */
2017 apic_set_spiv(apic
, *((u32
*)(s
->regs
+ APIC_SPIV
)));
2019 r
= kvm_apic_state_fixup(vcpu
, s
, true);
2022 memcpy(vcpu
->arch
.apic
->regs
, s
->regs
, sizeof *s
);
2024 recalculate_apic_map(vcpu
->kvm
);
2025 kvm_apic_set_version(vcpu
);
2027 apic_update_ppr(apic
);
2028 hrtimer_cancel(&apic
->lapic_timer
.timer
);
2029 apic_update_lvtt(apic
);
2030 apic_manage_nmi_watchdog(apic
, kvm_lapic_get_reg(apic
, APIC_LVT0
));
2031 update_divide_count(apic
);
2032 start_apic_timer(apic
);
2033 apic
->irr_pending
= true;
2034 apic
->isr_count
= vcpu
->arch
.apicv_active
?
2035 1 : count_vectors(apic
->regs
+ APIC_ISR
);
2036 apic
->highest_isr_cache
= -1;
2037 if (vcpu
->arch
.apicv_active
) {
2038 if (kvm_x86_ops
->apicv_post_state_restore
)
2039 kvm_x86_ops
->apicv_post_state_restore(vcpu
);
2040 kvm_x86_ops
->hwapic_irr_update(vcpu
,
2041 apic_find_highest_irr(apic
));
2042 kvm_x86_ops
->hwapic_isr_update(vcpu
,
2043 apic_find_highest_isr(apic
));
2045 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
2046 if (ioapic_in_kernel(vcpu
->kvm
))
2047 kvm_rtc_eoi_tracking_restore_one(vcpu
);
2049 vcpu
->arch
.apic_arb_prio
= 0;
2054 void __kvm_migrate_apic_timer(struct kvm_vcpu
*vcpu
)
2056 struct hrtimer
*timer
;
2058 if (!lapic_in_kernel(vcpu
))
2061 timer
= &vcpu
->arch
.apic
->lapic_timer
.timer
;
2062 if (hrtimer_cancel(timer
))
2063 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS_PINNED
);
2067 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2069 * Detect whether guest triggered PV EOI since the
2070 * last entry. If yes, set EOI on guests's behalf.
2071 * Clear PV EOI in guest memory in any case.
2073 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu
*vcpu
,
2074 struct kvm_lapic
*apic
)
2079 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2080 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2082 * KVM_APIC_PV_EOI_PENDING is unset:
2083 * -> host disabled PV EOI.
2084 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2085 * -> host enabled PV EOI, guest did not execute EOI yet.
2086 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2087 * -> host enabled PV EOI, guest executed EOI.
2089 BUG_ON(!pv_eoi_enabled(vcpu
));
2090 pending
= pv_eoi_get_pending(vcpu
);
2092 * Clear pending bit in any case: it will be set again on vmentry.
2093 * While this might not be ideal from performance point of view,
2094 * this makes sure pv eoi is only enabled when we know it's safe.
2096 pv_eoi_clr_pending(vcpu
);
2099 vector
= apic_set_eoi(apic
);
2100 trace_kvm_pv_eoi(apic
, vector
);
2103 void kvm_lapic_sync_from_vapic(struct kvm_vcpu
*vcpu
)
2107 if (test_bit(KVM_APIC_PV_EOI_PENDING
, &vcpu
->arch
.apic_attention
))
2108 apic_sync_pv_eoi_from_guest(vcpu
, vcpu
->arch
.apic
);
2110 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
2113 if (kvm_read_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
2117 apic_set_tpr(vcpu
->arch
.apic
, data
& 0xff);
2121 * apic_sync_pv_eoi_to_guest - called before vmentry
2123 * Detect whether it's safe to enable PV EOI and
2126 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu
*vcpu
,
2127 struct kvm_lapic
*apic
)
2129 if (!pv_eoi_enabled(vcpu
) ||
2130 /* IRR set or many bits in ISR: could be nested. */
2131 apic
->irr_pending
||
2132 /* Cache not set: could be safe but we don't bother. */
2133 apic
->highest_isr_cache
== -1 ||
2134 /* Need EOI to update ioapic. */
2135 kvm_ioapic_handles_vector(apic
, apic
->highest_isr_cache
)) {
2137 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2138 * so we need not do anything here.
2143 pv_eoi_set_pending(apic
->vcpu
);
2146 void kvm_lapic_sync_to_vapic(struct kvm_vcpu
*vcpu
)
2149 int max_irr
, max_isr
;
2150 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2152 apic_sync_pv_eoi_to_guest(vcpu
, apic
);
2154 if (!test_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
))
2157 tpr
= kvm_lapic_get_reg(apic
, APIC_TASKPRI
) & 0xff;
2158 max_irr
= apic_find_highest_irr(apic
);
2161 max_isr
= apic_find_highest_isr(apic
);
2164 data
= (tpr
& 0xff) | ((max_isr
& 0xf0) << 8) | (max_irr
<< 24);
2166 kvm_write_guest_cached(vcpu
->kvm
, &vcpu
->arch
.apic
->vapic_cache
, &data
,
2170 int kvm_lapic_set_vapic_addr(struct kvm_vcpu
*vcpu
, gpa_t vapic_addr
)
2173 if (kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
2174 &vcpu
->arch
.apic
->vapic_cache
,
2175 vapic_addr
, sizeof(u32
)))
2177 __set_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
2179 __clear_bit(KVM_APIC_CHECK_VAPIC
, &vcpu
->arch
.apic_attention
);
2182 vcpu
->arch
.apic
->vapic_addr
= vapic_addr
;
2186 int kvm_x2apic_msr_write(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
2188 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2189 u32 reg
= (msr
- APIC_BASE_MSR
) << 4;
2191 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(apic
))
2194 if (reg
== APIC_ICR2
)
2197 /* if this is ICR write vector before command */
2198 if (reg
== APIC_ICR
)
2199 kvm_lapic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
2200 return kvm_lapic_reg_write(apic
, reg
, (u32
)data
);
2203 int kvm_x2apic_msr_read(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
)
2205 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2206 u32 reg
= (msr
- APIC_BASE_MSR
) << 4, low
, high
= 0;
2208 if (!lapic_in_kernel(vcpu
) || !apic_x2apic_mode(apic
))
2211 if (reg
== APIC_DFR
|| reg
== APIC_ICR2
) {
2212 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2217 if (kvm_lapic_reg_read(apic
, reg
, 4, &low
))
2219 if (reg
== APIC_ICR
)
2220 kvm_lapic_reg_read(apic
, APIC_ICR2
, 4, &high
);
2222 *data
= (((u64
)high
) << 32) | low
;
2227 int kvm_hv_vapic_msr_write(struct kvm_vcpu
*vcpu
, u32 reg
, u64 data
)
2229 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2231 if (!lapic_in_kernel(vcpu
))
2234 /* if this is ICR write vector before command */
2235 if (reg
== APIC_ICR
)
2236 kvm_lapic_reg_write(apic
, APIC_ICR2
, (u32
)(data
>> 32));
2237 return kvm_lapic_reg_write(apic
, reg
, (u32
)data
);
2240 int kvm_hv_vapic_msr_read(struct kvm_vcpu
*vcpu
, u32 reg
, u64
*data
)
2242 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2245 if (!lapic_in_kernel(vcpu
))
2248 if (kvm_lapic_reg_read(apic
, reg
, 4, &low
))
2250 if (reg
== APIC_ICR
)
2251 kvm_lapic_reg_read(apic
, APIC_ICR2
, 4, &high
);
2253 *data
= (((u64
)high
) << 32) | low
;
2258 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu
*vcpu
, u64 data
)
2260 u64 addr
= data
& ~KVM_MSR_ENABLED
;
2261 if (!IS_ALIGNED(addr
, 4))
2264 vcpu
->arch
.pv_eoi
.msr_val
= data
;
2265 if (!pv_eoi_enabled(vcpu
))
2267 return kvm_gfn_to_hva_cache_init(vcpu
->kvm
, &vcpu
->arch
.pv_eoi
.data
,
2271 void kvm_apic_accept_events(struct kvm_vcpu
*vcpu
)
2273 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
2277 if (!lapic_in_kernel(vcpu
) || !apic
->pending_events
)
2281 * INITs are latched while in SMM. Because an SMM CPU cannot
2282 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2283 * and delay processing of INIT until the next RSM.
2286 WARN_ON_ONCE(vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
);
2287 if (test_bit(KVM_APIC_SIPI
, &apic
->pending_events
))
2288 clear_bit(KVM_APIC_SIPI
, &apic
->pending_events
);
2292 pe
= xchg(&apic
->pending_events
, 0);
2293 if (test_bit(KVM_APIC_INIT
, &pe
)) {
2294 kvm_lapic_reset(vcpu
, true);
2295 kvm_vcpu_reset(vcpu
, true);
2296 if (kvm_vcpu_is_bsp(apic
->vcpu
))
2297 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
2299 vcpu
->arch
.mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
2301 if (test_bit(KVM_APIC_SIPI
, &pe
) &&
2302 vcpu
->arch
.mp_state
== KVM_MP_STATE_INIT_RECEIVED
) {
2303 /* evaluate pending_events before reading the vector */
2305 sipi_vector
= apic
->sipi_vector
;
2306 apic_debug("vcpu %d received sipi with vector # %x\n",
2307 vcpu
->vcpu_id
, sipi_vector
);
2308 kvm_vcpu_deliver_sipi_vector(vcpu
, sipi_vector
);
2309 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
2313 void kvm_lapic_init(void)
2315 /* do not patch jump label more than once per second */
2316 jump_label_rate_limit(&apic_hw_disabled
, HZ
);
2317 jump_label_rate_limit(&apic_sw_disabled
, HZ
);