]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kvm/lapic.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / lapic.c
1
2 /*
3 * Local APIC virtualization
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
14 *
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 */
20
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
27 #include <linux/io.h>
28 #include <linux/export.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
32 #include <asm/msr.h>
33 #include <asm/page.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
40 #include "irq.h"
41 #include "trace.h"
42 #include "x86.h"
43 #include "cpuid.h"
44 #include "hyperv.h"
45
46 #ifndef CONFIG_X86_64
47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
48 #else
49 #define mod_64(x, y) ((x) % (y))
50 #endif
51
52 #define PRId64 "d"
53 #define PRIx64 "llx"
54 #define PRIu64 "u"
55 #define PRIo64 "o"
56
57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58 #define apic_debug(fmt, arg...)
59
60 /* 14 is the version for Xeon and Pentium 8.4.8*/
61 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
62 #define LAPIC_MMIO_LENGTH (1 << 12)
63 /* followed define is not in apicdef.h */
64 #define APIC_SHORT_MASK 0xc0000
65 #define APIC_DEST_NOSHORT 0x0
66 #define APIC_DEST_MASK 0x800
67 #define MAX_APIC_VECTOR 256
68 #define APIC_VECTORS_PER_REG 32
69
70 #define APIC_BROADCAST 0xFF
71 #define X2APIC_BROADCAST 0xFFFFFFFFul
72
73 static inline int apic_test_vector(int vec, void *bitmap)
74 {
75 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
76 }
77
78 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
79 {
80 struct kvm_lapic *apic = vcpu->arch.apic;
81
82 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
83 apic_test_vector(vector, apic->regs + APIC_IRR);
84 }
85
86 static inline void apic_clear_vector(int vec, void *bitmap)
87 {
88 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
89 }
90
91 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
92 {
93 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
94 }
95
96 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
97 {
98 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
99 }
100
101 struct static_key_deferred apic_hw_disabled __read_mostly;
102 struct static_key_deferred apic_sw_disabled __read_mostly;
103
104 static inline int apic_enabled(struct kvm_lapic *apic)
105 {
106 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
107 }
108
109 #define LVT_MASK \
110 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
111
112 #define LINT_MASK \
113 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
114 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
115
116 static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
117 {
118 return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
119 }
120
121 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
122 {
123 return apic->vcpu->vcpu_id;
124 }
125
126 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
127 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
128 switch (map->mode) {
129 case KVM_APIC_MODE_X2APIC: {
130 u32 offset = (dest_id >> 16) * 16;
131 u32 max_apic_id = map->max_apic_id;
132
133 if (offset <= max_apic_id) {
134 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
135
136 *cluster = &map->phys_map[offset];
137 *mask = dest_id & (0xffff >> (16 - cluster_size));
138 } else {
139 *mask = 0;
140 }
141
142 return true;
143 }
144 case KVM_APIC_MODE_XAPIC_FLAT:
145 *cluster = map->xapic_flat_map;
146 *mask = dest_id & 0xff;
147 return true;
148 case KVM_APIC_MODE_XAPIC_CLUSTER:
149 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
150 *mask = dest_id & 0xf;
151 return true;
152 default:
153 /* Not optimized. */
154 return false;
155 }
156 }
157
158 static void kvm_apic_map_free(struct rcu_head *rcu)
159 {
160 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
161
162 kvfree(map);
163 }
164
165 static void recalculate_apic_map(struct kvm *kvm)
166 {
167 struct kvm_apic_map *new, *old = NULL;
168 struct kvm_vcpu *vcpu;
169 int i;
170 u32 max_id = 255; /* enough space for any xAPIC ID */
171
172 mutex_lock(&kvm->arch.apic_map_lock);
173
174 kvm_for_each_vcpu(i, vcpu, kvm)
175 if (kvm_apic_present(vcpu))
176 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
177
178 new = kvzalloc(sizeof(struct kvm_apic_map) +
179 sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
180
181 if (!new)
182 goto out;
183
184 new->max_apic_id = max_id;
185
186 kvm_for_each_vcpu(i, vcpu, kvm) {
187 struct kvm_lapic *apic = vcpu->arch.apic;
188 struct kvm_lapic **cluster;
189 u16 mask;
190 u32 ldr;
191 u8 xapic_id;
192 u32 x2apic_id;
193
194 if (!kvm_apic_present(vcpu))
195 continue;
196
197 xapic_id = kvm_xapic_id(apic);
198 x2apic_id = kvm_x2apic_id(apic);
199
200 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
201 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
202 x2apic_id <= new->max_apic_id)
203 new->phys_map[x2apic_id] = apic;
204 /*
205 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
206 * prevent them from masking VCPUs with APIC ID <= 0xff.
207 */
208 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
209 new->phys_map[xapic_id] = apic;
210
211 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
212
213 if (apic_x2apic_mode(apic)) {
214 new->mode |= KVM_APIC_MODE_X2APIC;
215 } else if (ldr) {
216 ldr = GET_APIC_LOGICAL_ID(ldr);
217 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
218 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
219 else
220 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
221 }
222
223 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
224 continue;
225
226 if (mask)
227 cluster[ffs(mask) - 1] = apic;
228 }
229 out:
230 old = rcu_dereference_protected(kvm->arch.apic_map,
231 lockdep_is_held(&kvm->arch.apic_map_lock));
232 rcu_assign_pointer(kvm->arch.apic_map, new);
233 mutex_unlock(&kvm->arch.apic_map_lock);
234
235 if (old)
236 call_rcu(&old->rcu, kvm_apic_map_free);
237
238 kvm_make_scan_ioapic_request(kvm);
239 }
240
241 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
242 {
243 bool enabled = val & APIC_SPIV_APIC_ENABLED;
244
245 kvm_lapic_set_reg(apic, APIC_SPIV, val);
246
247 if (enabled != apic->sw_enabled) {
248 apic->sw_enabled = enabled;
249 if (enabled) {
250 static_key_slow_dec_deferred(&apic_sw_disabled);
251 recalculate_apic_map(apic->vcpu->kvm);
252 } else
253 static_key_slow_inc(&apic_sw_disabled.key);
254 }
255 }
256
257 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
258 {
259 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
260 recalculate_apic_map(apic->vcpu->kvm);
261 }
262
263 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
264 {
265 kvm_lapic_set_reg(apic, APIC_LDR, id);
266 recalculate_apic_map(apic->vcpu->kvm);
267 }
268
269 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
270 {
271 return ((id >> 4) << 16) | (1 << (id & 0xf));
272 }
273
274 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
275 {
276 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
277
278 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
279
280 kvm_lapic_set_reg(apic, APIC_ID, id);
281 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
282 recalculate_apic_map(apic->vcpu->kvm);
283 }
284
285 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
286 {
287 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
288 }
289
290 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
291 {
292 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
293 }
294
295 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
296 {
297 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
298 }
299
300 static inline int apic_lvtt_period(struct kvm_lapic *apic)
301 {
302 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
303 }
304
305 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
306 {
307 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
308 }
309
310 static inline int apic_lvt_nmi_mode(u32 lvt_val)
311 {
312 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
313 }
314
315 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
316 {
317 struct kvm_lapic *apic = vcpu->arch.apic;
318 struct kvm_cpuid_entry2 *feat;
319 u32 v = APIC_VERSION;
320
321 if (!lapic_in_kernel(vcpu))
322 return;
323
324 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
325 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
326 v |= APIC_LVR_DIRECTED_EOI;
327 kvm_lapic_set_reg(apic, APIC_LVR, v);
328 }
329
330 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
331 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
332 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
333 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
334 LINT_MASK, LINT_MASK, /* LVT0-1 */
335 LVT_MASK /* LVTERR */
336 };
337
338 static int find_highest_vector(void *bitmap)
339 {
340 int vec;
341 u32 *reg;
342
343 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
344 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
345 reg = bitmap + REG_POS(vec);
346 if (*reg)
347 return __fls(*reg) + vec;
348 }
349
350 return -1;
351 }
352
353 static u8 count_vectors(void *bitmap)
354 {
355 int vec;
356 u32 *reg;
357 u8 count = 0;
358
359 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
360 reg = bitmap + REG_POS(vec);
361 count += hweight32(*reg);
362 }
363
364 return count;
365 }
366
367 int __kvm_apic_update_irr(u32 *pir, void *regs)
368 {
369 u32 i, vec;
370 u32 pir_val, irr_val;
371 int max_irr = -1;
372
373 for (i = vec = 0; i <= 7; i++, vec += 32) {
374 pir_val = READ_ONCE(pir[i]);
375 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
376 if (pir_val) {
377 irr_val |= xchg(&pir[i], 0);
378 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
379 }
380 if (irr_val)
381 max_irr = __fls(irr_val) + vec;
382 }
383
384 return max_irr;
385 }
386 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
387
388 int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
389 {
390 struct kvm_lapic *apic = vcpu->arch.apic;
391
392 return __kvm_apic_update_irr(pir, apic->regs);
393 }
394 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
395
396 static inline int apic_search_irr(struct kvm_lapic *apic)
397 {
398 return find_highest_vector(apic->regs + APIC_IRR);
399 }
400
401 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
402 {
403 int result;
404
405 /*
406 * Note that irr_pending is just a hint. It will be always
407 * true with virtual interrupt delivery enabled.
408 */
409 if (!apic->irr_pending)
410 return -1;
411
412 result = apic_search_irr(apic);
413 ASSERT(result == -1 || result >= 16);
414
415 return result;
416 }
417
418 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
419 {
420 struct kvm_vcpu *vcpu;
421
422 vcpu = apic->vcpu;
423
424 if (unlikely(vcpu->arch.apicv_active)) {
425 /* need to update RVI */
426 apic_clear_vector(vec, apic->regs + APIC_IRR);
427 kvm_x86_ops->hwapic_irr_update(vcpu,
428 apic_find_highest_irr(apic));
429 } else {
430 apic->irr_pending = false;
431 apic_clear_vector(vec, apic->regs + APIC_IRR);
432 if (apic_search_irr(apic) != -1)
433 apic->irr_pending = true;
434 }
435 }
436
437 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
438 {
439 struct kvm_vcpu *vcpu;
440
441 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
442 return;
443
444 vcpu = apic->vcpu;
445
446 /*
447 * With APIC virtualization enabled, all caching is disabled
448 * because the processor can modify ISR under the hood. Instead
449 * just set SVI.
450 */
451 if (unlikely(vcpu->arch.apicv_active))
452 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
453 else {
454 ++apic->isr_count;
455 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
456 /*
457 * ISR (in service register) bit is set when injecting an interrupt.
458 * The highest vector is injected. Thus the latest bit set matches
459 * the highest bit in ISR.
460 */
461 apic->highest_isr_cache = vec;
462 }
463 }
464
465 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
466 {
467 int result;
468
469 /*
470 * Note that isr_count is always 1, and highest_isr_cache
471 * is always -1, with APIC virtualization enabled.
472 */
473 if (!apic->isr_count)
474 return -1;
475 if (likely(apic->highest_isr_cache != -1))
476 return apic->highest_isr_cache;
477
478 result = find_highest_vector(apic->regs + APIC_ISR);
479 ASSERT(result == -1 || result >= 16);
480
481 return result;
482 }
483
484 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
485 {
486 struct kvm_vcpu *vcpu;
487 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
488 return;
489
490 vcpu = apic->vcpu;
491
492 /*
493 * We do get here for APIC virtualization enabled if the guest
494 * uses the Hyper-V APIC enlightenment. In this case we may need
495 * to trigger a new interrupt delivery by writing the SVI field;
496 * on the other hand isr_count and highest_isr_cache are unused
497 * and must be left alone.
498 */
499 if (unlikely(vcpu->arch.apicv_active))
500 kvm_x86_ops->hwapic_isr_update(vcpu,
501 apic_find_highest_isr(apic));
502 else {
503 --apic->isr_count;
504 BUG_ON(apic->isr_count < 0);
505 apic->highest_isr_cache = -1;
506 }
507 }
508
509 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
510 {
511 /* This may race with setting of irr in __apic_accept_irq() and
512 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
513 * will cause vmexit immediately and the value will be recalculated
514 * on the next vmentry.
515 */
516 return apic_find_highest_irr(vcpu->arch.apic);
517 }
518 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
519
520 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
521 int vector, int level, int trig_mode,
522 struct dest_map *dest_map);
523
524 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
525 struct dest_map *dest_map)
526 {
527 struct kvm_lapic *apic = vcpu->arch.apic;
528
529 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
530 irq->level, irq->trig_mode, dest_map);
531 }
532
533 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
534 {
535
536 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
537 sizeof(val));
538 }
539
540 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
541 {
542
543 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
544 sizeof(*val));
545 }
546
547 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
548 {
549 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
550 }
551
552 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
553 {
554 u8 val;
555 if (pv_eoi_get_user(vcpu, &val) < 0)
556 apic_debug("Can't read EOI MSR value: 0x%llx\n",
557 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
558 return val & 0x1;
559 }
560
561 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
562 {
563 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
564 apic_debug("Can't set EOI MSR value: 0x%llx\n",
565 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
566 return;
567 }
568 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
569 }
570
571 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
572 {
573 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
574 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
575 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
576 return;
577 }
578 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
579 }
580
581 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
582 {
583 int highest_irr;
584 if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active)
585 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
586 else
587 highest_irr = apic_find_highest_irr(apic);
588 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
589 return -1;
590 return highest_irr;
591 }
592
593 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
594 {
595 u32 tpr, isrv, ppr, old_ppr;
596 int isr;
597
598 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
599 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
600 isr = apic_find_highest_isr(apic);
601 isrv = (isr != -1) ? isr : 0;
602
603 if ((tpr & 0xf0) >= (isrv & 0xf0))
604 ppr = tpr & 0xff;
605 else
606 ppr = isrv & 0xf0;
607
608 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
609 apic, ppr, isr, isrv);
610
611 *new_ppr = ppr;
612 if (old_ppr != ppr)
613 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
614
615 return ppr < old_ppr;
616 }
617
618 static void apic_update_ppr(struct kvm_lapic *apic)
619 {
620 u32 ppr;
621
622 if (__apic_update_ppr(apic, &ppr) &&
623 apic_has_interrupt_for_ppr(apic, ppr) != -1)
624 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
625 }
626
627 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
628 {
629 apic_update_ppr(vcpu->arch.apic);
630 }
631 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
632
633 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
634 {
635 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
636 apic_update_ppr(apic);
637 }
638
639 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
640 {
641 return mda == (apic_x2apic_mode(apic) ?
642 X2APIC_BROADCAST : APIC_BROADCAST);
643 }
644
645 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
646 {
647 if (kvm_apic_broadcast(apic, mda))
648 return true;
649
650 if (apic_x2apic_mode(apic))
651 return mda == kvm_x2apic_id(apic);
652
653 /*
654 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
655 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
656 * this allows unique addressing of VCPUs with APIC ID over 0xff.
657 * The 0xff condition is needed because writeable xAPIC ID.
658 */
659 if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
660 return true;
661
662 return mda == kvm_xapic_id(apic);
663 }
664
665 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
666 {
667 u32 logical_id;
668
669 if (kvm_apic_broadcast(apic, mda))
670 return true;
671
672 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
673
674 if (apic_x2apic_mode(apic))
675 return ((logical_id >> 16) == (mda >> 16))
676 && (logical_id & mda & 0xffff) != 0;
677
678 logical_id = GET_APIC_LOGICAL_ID(logical_id);
679
680 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
681 case APIC_DFR_FLAT:
682 return (logical_id & mda) != 0;
683 case APIC_DFR_CLUSTER:
684 return ((logical_id >> 4) == (mda >> 4))
685 && (logical_id & mda & 0xf) != 0;
686 default:
687 apic_debug("Bad DFR vcpu %d: %08x\n",
688 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
689 return false;
690 }
691 }
692
693 /* The KVM local APIC implementation has two quirks:
694 *
695 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
696 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
697 * KVM doesn't do that aliasing.
698 *
699 * - in-kernel IOAPIC messages have to be delivered directly to
700 * x2APIC, because the kernel does not support interrupt remapping.
701 * In order to support broadcast without interrupt remapping, x2APIC
702 * rewrites the destination of non-IPI messages from APIC_BROADCAST
703 * to X2APIC_BROADCAST.
704 *
705 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
706 * important when userspace wants to use x2APIC-format MSIs, because
707 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
708 */
709 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
710 struct kvm_lapic *source, struct kvm_lapic *target)
711 {
712 bool ipi = source != NULL;
713
714 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
715 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
716 return X2APIC_BROADCAST;
717
718 return dest_id;
719 }
720
721 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
722 int short_hand, unsigned int dest, int dest_mode)
723 {
724 struct kvm_lapic *target = vcpu->arch.apic;
725 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
726
727 apic_debug("target %p, source %p, dest 0x%x, "
728 "dest_mode 0x%x, short_hand 0x%x\n",
729 target, source, dest, dest_mode, short_hand);
730
731 ASSERT(target);
732 switch (short_hand) {
733 case APIC_DEST_NOSHORT:
734 if (dest_mode == APIC_DEST_PHYSICAL)
735 return kvm_apic_match_physical_addr(target, mda);
736 else
737 return kvm_apic_match_logical_addr(target, mda);
738 case APIC_DEST_SELF:
739 return target == source;
740 case APIC_DEST_ALLINC:
741 return true;
742 case APIC_DEST_ALLBUT:
743 return target != source;
744 default:
745 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
746 short_hand);
747 return false;
748 }
749 }
750 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
751
752 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
753 const unsigned long *bitmap, u32 bitmap_size)
754 {
755 u32 mod;
756 int i, idx = -1;
757
758 mod = vector % dest_vcpus;
759
760 for (i = 0; i <= mod; i++) {
761 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
762 BUG_ON(idx == bitmap_size);
763 }
764
765 return idx;
766 }
767
768 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
769 {
770 if (!kvm->arch.disabled_lapic_found) {
771 kvm->arch.disabled_lapic_found = true;
772 printk(KERN_INFO
773 "Disabled LAPIC found during irq injection\n");
774 }
775 }
776
777 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
778 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
779 {
780 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
781 if ((irq->dest_id == APIC_BROADCAST &&
782 map->mode != KVM_APIC_MODE_X2APIC))
783 return true;
784 if (irq->dest_id == X2APIC_BROADCAST)
785 return true;
786 } else {
787 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
788 if (irq->dest_id == (x2apic_ipi ?
789 X2APIC_BROADCAST : APIC_BROADCAST))
790 return true;
791 }
792
793 return false;
794 }
795
796 /* Return true if the interrupt can be handled by using *bitmap as index mask
797 * for valid destinations in *dst array.
798 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
799 * Note: we may have zero kvm_lapic destinations when we return true, which
800 * means that the interrupt should be dropped. In this case, *bitmap would be
801 * zero and *dst undefined.
802 */
803 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
804 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
805 struct kvm_apic_map *map, struct kvm_lapic ***dst,
806 unsigned long *bitmap)
807 {
808 int i, lowest;
809
810 if (irq->shorthand == APIC_DEST_SELF && src) {
811 *dst = src;
812 *bitmap = 1;
813 return true;
814 } else if (irq->shorthand)
815 return false;
816
817 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
818 return false;
819
820 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
821 if (irq->dest_id > map->max_apic_id) {
822 *bitmap = 0;
823 } else {
824 *dst = &map->phys_map[irq->dest_id];
825 *bitmap = 1;
826 }
827 return true;
828 }
829
830 *bitmap = 0;
831 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
832 (u16 *)bitmap))
833 return false;
834
835 if (!kvm_lowest_prio_delivery(irq))
836 return true;
837
838 if (!kvm_vector_hashing_enabled()) {
839 lowest = -1;
840 for_each_set_bit(i, bitmap, 16) {
841 if (!(*dst)[i])
842 continue;
843 if (lowest < 0)
844 lowest = i;
845 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
846 (*dst)[lowest]->vcpu) < 0)
847 lowest = i;
848 }
849 } else {
850 if (!*bitmap)
851 return true;
852
853 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
854 bitmap, 16);
855
856 if (!(*dst)[lowest]) {
857 kvm_apic_disabled_lapic_found(kvm);
858 *bitmap = 0;
859 return true;
860 }
861 }
862
863 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
864
865 return true;
866 }
867
868 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
869 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
870 {
871 struct kvm_apic_map *map;
872 unsigned long bitmap;
873 struct kvm_lapic **dst = NULL;
874 int i;
875 bool ret;
876
877 *r = -1;
878
879 if (irq->shorthand == APIC_DEST_SELF) {
880 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
881 return true;
882 }
883
884 rcu_read_lock();
885 map = rcu_dereference(kvm->arch.apic_map);
886
887 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
888 if (ret)
889 for_each_set_bit(i, &bitmap, 16) {
890 if (!dst[i])
891 continue;
892 if (*r < 0)
893 *r = 0;
894 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
895 }
896
897 rcu_read_unlock();
898 return ret;
899 }
900
901 /*
902 * This routine tries to handler interrupts in posted mode, here is how
903 * it deals with different cases:
904 * - For single-destination interrupts, handle it in posted mode
905 * - Else if vector hashing is enabled and it is a lowest-priority
906 * interrupt, handle it in posted mode and use the following mechanism
907 * to find the destinaiton vCPU.
908 * 1. For lowest-priority interrupts, store all the possible
909 * destination vCPUs in an array.
910 * 2. Use "guest vector % max number of destination vCPUs" to find
911 * the right destination vCPU in the array for the lowest-priority
912 * interrupt.
913 * - Otherwise, use remapped mode to inject the interrupt.
914 */
915 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
916 struct kvm_vcpu **dest_vcpu)
917 {
918 struct kvm_apic_map *map;
919 unsigned long bitmap;
920 struct kvm_lapic **dst = NULL;
921 bool ret = false;
922
923 if (irq->shorthand)
924 return false;
925
926 rcu_read_lock();
927 map = rcu_dereference(kvm->arch.apic_map);
928
929 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
930 hweight16(bitmap) == 1) {
931 unsigned long i = find_first_bit(&bitmap, 16);
932
933 if (dst[i]) {
934 *dest_vcpu = dst[i]->vcpu;
935 ret = true;
936 }
937 }
938
939 rcu_read_unlock();
940 return ret;
941 }
942
943 /*
944 * Add a pending IRQ into lapic.
945 * Return 1 if successfully added and 0 if discarded.
946 */
947 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
948 int vector, int level, int trig_mode,
949 struct dest_map *dest_map)
950 {
951 int result = 0;
952 struct kvm_vcpu *vcpu = apic->vcpu;
953
954 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
955 trig_mode, vector);
956 switch (delivery_mode) {
957 case APIC_DM_LOWEST:
958 vcpu->arch.apic_arb_prio++;
959 case APIC_DM_FIXED:
960 if (unlikely(trig_mode && !level))
961 break;
962
963 /* FIXME add logic for vcpu on reset */
964 if (unlikely(!apic_enabled(apic)))
965 break;
966
967 result = 1;
968
969 if (dest_map) {
970 __set_bit(vcpu->vcpu_id, dest_map->map);
971 dest_map->vectors[vcpu->vcpu_id] = vector;
972 }
973
974 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
975 if (trig_mode)
976 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
977 else
978 apic_clear_vector(vector, apic->regs + APIC_TMR);
979 }
980
981 if (vcpu->arch.apicv_active)
982 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
983 else {
984 kvm_lapic_set_irr(vector, apic);
985
986 kvm_make_request(KVM_REQ_EVENT, vcpu);
987 kvm_vcpu_kick(vcpu);
988 }
989 break;
990
991 case APIC_DM_REMRD:
992 result = 1;
993 vcpu->arch.pv.pv_unhalted = 1;
994 kvm_make_request(KVM_REQ_EVENT, vcpu);
995 kvm_vcpu_kick(vcpu);
996 break;
997
998 case APIC_DM_SMI:
999 result = 1;
1000 kvm_make_request(KVM_REQ_SMI, vcpu);
1001 kvm_vcpu_kick(vcpu);
1002 break;
1003
1004 case APIC_DM_NMI:
1005 result = 1;
1006 kvm_inject_nmi(vcpu);
1007 kvm_vcpu_kick(vcpu);
1008 break;
1009
1010 case APIC_DM_INIT:
1011 if (!trig_mode || level) {
1012 result = 1;
1013 /* assumes that there are only KVM_APIC_INIT/SIPI */
1014 apic->pending_events = (1UL << KVM_APIC_INIT);
1015 /* make sure pending_events is visible before sending
1016 * the request */
1017 smp_wmb();
1018 kvm_make_request(KVM_REQ_EVENT, vcpu);
1019 kvm_vcpu_kick(vcpu);
1020 } else {
1021 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
1022 vcpu->vcpu_id);
1023 }
1024 break;
1025
1026 case APIC_DM_STARTUP:
1027 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
1028 vcpu->vcpu_id, vector);
1029 result = 1;
1030 apic->sipi_vector = vector;
1031 /* make sure sipi_vector is visible for the receiver */
1032 smp_wmb();
1033 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1034 kvm_make_request(KVM_REQ_EVENT, vcpu);
1035 kvm_vcpu_kick(vcpu);
1036 break;
1037
1038 case APIC_DM_EXTINT:
1039 /*
1040 * Should only be called by kvm_apic_local_deliver() with LVT0,
1041 * before NMI watchdog was enabled. Already handled by
1042 * kvm_apic_accept_pic_intr().
1043 */
1044 break;
1045
1046 default:
1047 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1048 delivery_mode);
1049 break;
1050 }
1051 return result;
1052 }
1053
1054 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1055 {
1056 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1057 }
1058
1059 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1060 {
1061 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1062 }
1063
1064 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1065 {
1066 int trigger_mode;
1067
1068 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1069 if (!kvm_ioapic_handles_vector(apic, vector))
1070 return;
1071
1072 /* Request a KVM exit to inform the userspace IOAPIC. */
1073 if (irqchip_split(apic->vcpu->kvm)) {
1074 apic->vcpu->arch.pending_ioapic_eoi = vector;
1075 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1076 return;
1077 }
1078
1079 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1080 trigger_mode = IOAPIC_LEVEL_TRIG;
1081 else
1082 trigger_mode = IOAPIC_EDGE_TRIG;
1083
1084 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1085 }
1086
1087 static int apic_set_eoi(struct kvm_lapic *apic)
1088 {
1089 int vector = apic_find_highest_isr(apic);
1090
1091 trace_kvm_eoi(apic, vector);
1092
1093 /*
1094 * Not every write EOI will has corresponding ISR,
1095 * one example is when Kernel check timer on setup_IO_APIC
1096 */
1097 if (vector == -1)
1098 return vector;
1099
1100 apic_clear_isr(vector, apic);
1101 apic_update_ppr(apic);
1102
1103 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1104 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1105
1106 kvm_ioapic_send_eoi(apic, vector);
1107 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1108 return vector;
1109 }
1110
1111 /*
1112 * this interface assumes a trap-like exit, which has already finished
1113 * desired side effect including vISR and vPPR update.
1114 */
1115 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1116 {
1117 struct kvm_lapic *apic = vcpu->arch.apic;
1118
1119 trace_kvm_eoi(apic, vector);
1120
1121 kvm_ioapic_send_eoi(apic, vector);
1122 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1123 }
1124 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1125
1126 static void apic_send_ipi(struct kvm_lapic *apic)
1127 {
1128 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
1129 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1130 struct kvm_lapic_irq irq;
1131
1132 irq.vector = icr_low & APIC_VECTOR_MASK;
1133 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1134 irq.dest_mode = icr_low & APIC_DEST_MASK;
1135 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1136 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1137 irq.shorthand = icr_low & APIC_SHORT_MASK;
1138 irq.msi_redir_hint = false;
1139 if (apic_x2apic_mode(apic))
1140 irq.dest_id = icr_high;
1141 else
1142 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1143
1144 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1145
1146 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1147 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1148 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1149 "msi_redir_hint 0x%x\n",
1150 icr_high, icr_low, irq.shorthand, irq.dest_id,
1151 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1152 irq.vector, irq.msi_redir_hint);
1153
1154 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1155 }
1156
1157 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1158 {
1159 ktime_t remaining, now;
1160 s64 ns;
1161 u32 tmcct;
1162
1163 ASSERT(apic != NULL);
1164
1165 /* if initial count is 0, current count should also be 0 */
1166 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1167 apic->lapic_timer.period == 0)
1168 return 0;
1169
1170 now = ktime_get();
1171 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1172 if (ktime_to_ns(remaining) < 0)
1173 remaining = 0;
1174
1175 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1176 tmcct = div64_u64(ns,
1177 (APIC_BUS_CYCLE_NS * apic->divide_count));
1178
1179 return tmcct;
1180 }
1181
1182 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1183 {
1184 struct kvm_vcpu *vcpu = apic->vcpu;
1185 struct kvm_run *run = vcpu->run;
1186
1187 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1188 run->tpr_access.rip = kvm_rip_read(vcpu);
1189 run->tpr_access.is_write = write;
1190 }
1191
1192 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1193 {
1194 if (apic->vcpu->arch.tpr_access_reporting)
1195 __report_tpr_access(apic, write);
1196 }
1197
1198 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1199 {
1200 u32 val = 0;
1201
1202 if (offset >= LAPIC_MMIO_LENGTH)
1203 return 0;
1204
1205 switch (offset) {
1206 case APIC_ARBPRI:
1207 apic_debug("Access APIC ARBPRI register which is for P6\n");
1208 break;
1209
1210 case APIC_TMCCT: /* Timer CCR */
1211 if (apic_lvtt_tscdeadline(apic))
1212 return 0;
1213
1214 val = apic_get_tmcct(apic);
1215 break;
1216 case APIC_PROCPRI:
1217 apic_update_ppr(apic);
1218 val = kvm_lapic_get_reg(apic, offset);
1219 break;
1220 case APIC_TASKPRI:
1221 report_tpr_access(apic, false);
1222 /* fall thru */
1223 default:
1224 val = kvm_lapic_get_reg(apic, offset);
1225 break;
1226 }
1227
1228 return val;
1229 }
1230
1231 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1232 {
1233 return container_of(dev, struct kvm_lapic, dev);
1234 }
1235
1236 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1237 void *data)
1238 {
1239 unsigned char alignment = offset & 0xf;
1240 u32 result;
1241 /* this bitmask has a bit cleared for each reserved register */
1242 static const u64 rmask = 0x43ff01ffffffe70cULL;
1243
1244 if ((alignment + len) > 4) {
1245 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1246 offset, len);
1247 return 1;
1248 }
1249
1250 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1251 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1252 offset);
1253 return 1;
1254 }
1255
1256 result = __apic_read(apic, offset & ~0xf);
1257
1258 trace_kvm_apic_read(offset, result);
1259
1260 switch (len) {
1261 case 1:
1262 case 2:
1263 case 4:
1264 memcpy(data, (char *)&result + alignment, len);
1265 break;
1266 default:
1267 printk(KERN_ERR "Local APIC read with len = %x, "
1268 "should be 1,2, or 4 instead\n", len);
1269 break;
1270 }
1271 return 0;
1272 }
1273 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1274
1275 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1276 {
1277 return kvm_apic_hw_enabled(apic) &&
1278 addr >= apic->base_address &&
1279 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1280 }
1281
1282 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1283 gpa_t address, int len, void *data)
1284 {
1285 struct kvm_lapic *apic = to_lapic(this);
1286 u32 offset = address - apic->base_address;
1287
1288 if (!apic_mmio_in_range(apic, address))
1289 return -EOPNOTSUPP;
1290
1291 kvm_lapic_reg_read(apic, offset, len, data);
1292
1293 return 0;
1294 }
1295
1296 static void update_divide_count(struct kvm_lapic *apic)
1297 {
1298 u32 tmp1, tmp2, tdcr;
1299
1300 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1301 tmp1 = tdcr & 0xf;
1302 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1303 apic->divide_count = 0x1 << (tmp2 & 0x7);
1304
1305 apic_debug("timer divide count is 0x%x\n",
1306 apic->divide_count);
1307 }
1308
1309 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1310 {
1311 /*
1312 * Do not allow the guest to program periodic timers with small
1313 * interval, since the hrtimers are not throttled by the host
1314 * scheduler.
1315 */
1316 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1317 s64 min_period = min_timer_period_us * 1000LL;
1318
1319 if (apic->lapic_timer.period < min_period) {
1320 pr_info_ratelimited(
1321 "kvm: vcpu %i: requested %lld ns "
1322 "lapic timer period limited to %lld ns\n",
1323 apic->vcpu->vcpu_id,
1324 apic->lapic_timer.period, min_period);
1325 apic->lapic_timer.period = min_period;
1326 }
1327 }
1328 }
1329
1330 static void apic_update_lvtt(struct kvm_lapic *apic)
1331 {
1332 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1333 apic->lapic_timer.timer_mode_mask;
1334
1335 if (apic->lapic_timer.timer_mode != timer_mode) {
1336 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1337 APIC_LVT_TIMER_TSCDEADLINE)) {
1338 hrtimer_cancel(&apic->lapic_timer.timer);
1339 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1340 apic->lapic_timer.period = 0;
1341 apic->lapic_timer.tscdeadline = 0;
1342 }
1343 apic->lapic_timer.timer_mode = timer_mode;
1344 limit_periodic_timer_frequency(apic);
1345 }
1346 }
1347
1348 static void apic_timer_expired(struct kvm_lapic *apic)
1349 {
1350 struct kvm_vcpu *vcpu = apic->vcpu;
1351 struct swait_queue_head *q = &vcpu->wq;
1352 struct kvm_timer *ktimer = &apic->lapic_timer;
1353
1354 if (atomic_read(&apic->lapic_timer.pending))
1355 return;
1356
1357 atomic_inc(&apic->lapic_timer.pending);
1358 kvm_set_pending_timer(vcpu);
1359
1360 /*
1361 * For x86, the atomic_inc() is serialized, thus
1362 * using swait_active() is safe.
1363 */
1364 if (swait_active(q))
1365 swake_up(q);
1366
1367 if (apic_lvtt_tscdeadline(apic))
1368 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1369 }
1370
1371 /*
1372 * On APICv, this test will cause a busy wait
1373 * during a higher-priority task.
1374 */
1375
1376 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1377 {
1378 struct kvm_lapic *apic = vcpu->arch.apic;
1379 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1380
1381 if (kvm_apic_hw_enabled(apic)) {
1382 int vec = reg & APIC_VECTOR_MASK;
1383 void *bitmap = apic->regs + APIC_ISR;
1384
1385 if (vcpu->arch.apicv_active)
1386 bitmap = apic->regs + APIC_IRR;
1387
1388 if (apic_test_vector(vec, bitmap))
1389 return true;
1390 }
1391 return false;
1392 }
1393
1394 void wait_lapic_expire(struct kvm_vcpu *vcpu)
1395 {
1396 struct kvm_lapic *apic = vcpu->arch.apic;
1397 u64 guest_tsc, tsc_deadline;
1398
1399 if (!lapic_in_kernel(vcpu))
1400 return;
1401
1402 if (apic->lapic_timer.expired_tscdeadline == 0)
1403 return;
1404
1405 if (!lapic_timer_int_injected(vcpu))
1406 return;
1407
1408 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1409 apic->lapic_timer.expired_tscdeadline = 0;
1410 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1411 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1412
1413 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1414 if (guest_tsc < tsc_deadline)
1415 __delay(min(tsc_deadline - guest_tsc,
1416 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1417 }
1418
1419 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1420 {
1421 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1422 u64 ns = 0;
1423 ktime_t expire;
1424 struct kvm_vcpu *vcpu = apic->vcpu;
1425 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1426 unsigned long flags;
1427 ktime_t now;
1428
1429 if (unlikely(!tscdeadline || !this_tsc_khz))
1430 return;
1431
1432 local_irq_save(flags);
1433
1434 now = ktime_get();
1435 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1436 if (likely(tscdeadline > guest_tsc)) {
1437 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1438 do_div(ns, this_tsc_khz);
1439 expire = ktime_add_ns(now, ns);
1440 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1441 hrtimer_start(&apic->lapic_timer.timer,
1442 expire, HRTIMER_MODE_ABS_PINNED);
1443 } else
1444 apic_timer_expired(apic);
1445
1446 local_irq_restore(flags);
1447 }
1448
1449 static void start_sw_period(struct kvm_lapic *apic)
1450 {
1451 if (!apic->lapic_timer.period)
1452 return;
1453
1454 if (apic_lvtt_oneshot(apic) &&
1455 ktime_after(ktime_get(),
1456 apic->lapic_timer.target_expiration)) {
1457 apic_timer_expired(apic);
1458 return;
1459 }
1460
1461 hrtimer_start(&apic->lapic_timer.timer,
1462 apic->lapic_timer.target_expiration,
1463 HRTIMER_MODE_ABS_PINNED);
1464 }
1465
1466 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1467 {
1468 ktime_t now, remaining;
1469 u64 ns_remaining_old, ns_remaining_new;
1470
1471 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1472 * APIC_BUS_CYCLE_NS * apic->divide_count;
1473 limit_periodic_timer_frequency(apic);
1474
1475 now = ktime_get();
1476 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1477 if (ktime_to_ns(remaining) < 0)
1478 remaining = 0;
1479
1480 ns_remaining_old = ktime_to_ns(remaining);
1481 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1482 apic->divide_count, old_divisor);
1483
1484 apic->lapic_timer.tscdeadline +=
1485 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1486 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1487 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1488 }
1489
1490 static bool set_target_expiration(struct kvm_lapic *apic)
1491 {
1492 ktime_t now;
1493 u64 tscl = rdtsc();
1494
1495 now = ktime_get();
1496 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1497 * APIC_BUS_CYCLE_NS * apic->divide_count;
1498
1499 if (!apic->lapic_timer.period) {
1500 apic->lapic_timer.tscdeadline = 0;
1501 return false;
1502 }
1503
1504 limit_periodic_timer_frequency(apic);
1505
1506 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1507 PRIx64 ", "
1508 "timer initial count 0x%x, period %lldns, "
1509 "expire @ 0x%016" PRIx64 ".\n", __func__,
1510 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1511 kvm_lapic_get_reg(apic, APIC_TMICT),
1512 apic->lapic_timer.period,
1513 ktime_to_ns(ktime_add_ns(now,
1514 apic->lapic_timer.period)));
1515
1516 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1517 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1518 apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
1519
1520 return true;
1521 }
1522
1523 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1524 {
1525 apic->lapic_timer.tscdeadline +=
1526 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1527 apic->lapic_timer.target_expiration =
1528 ktime_add_ns(apic->lapic_timer.target_expiration,
1529 apic->lapic_timer.period);
1530 }
1531
1532 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1533 {
1534 if (!lapic_in_kernel(vcpu))
1535 return false;
1536
1537 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1538 }
1539 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1540
1541 static void cancel_hv_timer(struct kvm_lapic *apic)
1542 {
1543 WARN_ON(preemptible());
1544 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1545 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1546 apic->lapic_timer.hv_timer_in_use = false;
1547 }
1548
1549 static bool start_hv_timer(struct kvm_lapic *apic)
1550 {
1551 struct kvm_timer *ktimer = &apic->lapic_timer;
1552 int r;
1553
1554 WARN_ON(preemptible());
1555 if (!kvm_x86_ops->set_hv_timer)
1556 return false;
1557
1558 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1559 return false;
1560
1561 if (!ktimer->tscdeadline)
1562 return false;
1563
1564 r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline);
1565 if (r < 0)
1566 return false;
1567
1568 ktimer->hv_timer_in_use = true;
1569 hrtimer_cancel(&ktimer->timer);
1570
1571 /*
1572 * Also recheck ktimer->pending, in case the sw timer triggered in
1573 * the window. For periodic timer, leave the hv timer running for
1574 * simplicity, and the deadline will be recomputed on the next vmexit.
1575 */
1576 if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) {
1577 if (r)
1578 apic_timer_expired(apic);
1579 return false;
1580 }
1581
1582 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
1583 return true;
1584 }
1585
1586 static void start_sw_timer(struct kvm_lapic *apic)
1587 {
1588 struct kvm_timer *ktimer = &apic->lapic_timer;
1589
1590 WARN_ON(preemptible());
1591 if (apic->lapic_timer.hv_timer_in_use)
1592 cancel_hv_timer(apic);
1593 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1594 return;
1595
1596 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1597 start_sw_period(apic);
1598 else if (apic_lvtt_tscdeadline(apic))
1599 start_sw_tscdeadline(apic);
1600 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1601 }
1602
1603 static void restart_apic_timer(struct kvm_lapic *apic)
1604 {
1605 preempt_disable();
1606 if (!start_hv_timer(apic))
1607 start_sw_timer(apic);
1608 preempt_enable();
1609 }
1610
1611 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1612 {
1613 struct kvm_lapic *apic = vcpu->arch.apic;
1614
1615 preempt_disable();
1616 /* If the preempt notifier has already run, it also called apic_timer_expired */
1617 if (!apic->lapic_timer.hv_timer_in_use)
1618 goto out;
1619 WARN_ON(swait_active(&vcpu->wq));
1620 cancel_hv_timer(apic);
1621 apic_timer_expired(apic);
1622
1623 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1624 advance_periodic_target_expiration(apic);
1625 restart_apic_timer(apic);
1626 }
1627 out:
1628 preempt_enable();
1629 }
1630 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1631
1632 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1633 {
1634 restart_apic_timer(vcpu->arch.apic);
1635 }
1636 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1637
1638 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1639 {
1640 struct kvm_lapic *apic = vcpu->arch.apic;
1641
1642 preempt_disable();
1643 /* Possibly the TSC deadline timer is not enabled yet */
1644 if (apic->lapic_timer.hv_timer_in_use)
1645 start_sw_timer(apic);
1646 preempt_enable();
1647 }
1648 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1649
1650 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1651 {
1652 struct kvm_lapic *apic = vcpu->arch.apic;
1653
1654 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1655 restart_apic_timer(apic);
1656 }
1657
1658 static void start_apic_timer(struct kvm_lapic *apic)
1659 {
1660 atomic_set(&apic->lapic_timer.pending, 0);
1661
1662 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1663 && !set_target_expiration(apic))
1664 return;
1665
1666 restart_apic_timer(apic);
1667 }
1668
1669 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1670 {
1671 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1672
1673 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1674 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1675 if (lvt0_in_nmi_mode) {
1676 apic_debug("Receive NMI setting on APIC_LVT0 "
1677 "for cpu %d\n", apic->vcpu->vcpu_id);
1678 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1679 } else
1680 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1681 }
1682 }
1683
1684 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1685 {
1686 int ret = 0;
1687
1688 trace_kvm_apic_write(reg, val);
1689
1690 switch (reg) {
1691 case APIC_ID: /* Local APIC ID */
1692 if (!apic_x2apic_mode(apic))
1693 kvm_apic_set_xapic_id(apic, val >> 24);
1694 else
1695 ret = 1;
1696 break;
1697
1698 case APIC_TASKPRI:
1699 report_tpr_access(apic, true);
1700 apic_set_tpr(apic, val & 0xff);
1701 break;
1702
1703 case APIC_EOI:
1704 apic_set_eoi(apic);
1705 break;
1706
1707 case APIC_LDR:
1708 if (!apic_x2apic_mode(apic))
1709 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1710 else
1711 ret = 1;
1712 break;
1713
1714 case APIC_DFR:
1715 if (!apic_x2apic_mode(apic)) {
1716 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1717 recalculate_apic_map(apic->vcpu->kvm);
1718 } else
1719 ret = 1;
1720 break;
1721
1722 case APIC_SPIV: {
1723 u32 mask = 0x3ff;
1724 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1725 mask |= APIC_SPIV_DIRECTED_EOI;
1726 apic_set_spiv(apic, val & mask);
1727 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1728 int i;
1729 u32 lvt_val;
1730
1731 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1732 lvt_val = kvm_lapic_get_reg(apic,
1733 APIC_LVTT + 0x10 * i);
1734 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1735 lvt_val | APIC_LVT_MASKED);
1736 }
1737 apic_update_lvtt(apic);
1738 atomic_set(&apic->lapic_timer.pending, 0);
1739
1740 }
1741 break;
1742 }
1743 case APIC_ICR:
1744 /* No delay here, so we always clear the pending bit */
1745 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1746 apic_send_ipi(apic);
1747 break;
1748
1749 case APIC_ICR2:
1750 if (!apic_x2apic_mode(apic))
1751 val &= 0xff000000;
1752 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1753 break;
1754
1755 case APIC_LVT0:
1756 apic_manage_nmi_watchdog(apic, val);
1757 case APIC_LVTTHMR:
1758 case APIC_LVTPC:
1759 case APIC_LVT1:
1760 case APIC_LVTERR:
1761 /* TODO: Check vector */
1762 if (!kvm_apic_sw_enabled(apic))
1763 val |= APIC_LVT_MASKED;
1764
1765 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1766 kvm_lapic_set_reg(apic, reg, val);
1767
1768 break;
1769
1770 case APIC_LVTT:
1771 if (!kvm_apic_sw_enabled(apic))
1772 val |= APIC_LVT_MASKED;
1773 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1774 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1775 apic_update_lvtt(apic);
1776 break;
1777
1778 case APIC_TMICT:
1779 if (apic_lvtt_tscdeadline(apic))
1780 break;
1781
1782 hrtimer_cancel(&apic->lapic_timer.timer);
1783 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1784 start_apic_timer(apic);
1785 break;
1786
1787 case APIC_TDCR: {
1788 uint32_t old_divisor = apic->divide_count;
1789
1790 if (val & 4)
1791 apic_debug("KVM_WRITE:TDCR %x\n", val);
1792 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1793 update_divide_count(apic);
1794 if (apic->divide_count != old_divisor &&
1795 apic->lapic_timer.period) {
1796 hrtimer_cancel(&apic->lapic_timer.timer);
1797 update_target_expiration(apic, old_divisor);
1798 restart_apic_timer(apic);
1799 }
1800 break;
1801 }
1802 case APIC_ESR:
1803 if (apic_x2apic_mode(apic) && val != 0) {
1804 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1805 ret = 1;
1806 }
1807 break;
1808
1809 case APIC_SELF_IPI:
1810 if (apic_x2apic_mode(apic)) {
1811 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1812 } else
1813 ret = 1;
1814 break;
1815 default:
1816 ret = 1;
1817 break;
1818 }
1819 if (ret)
1820 apic_debug("Local APIC Write to read-only register %x\n", reg);
1821 return ret;
1822 }
1823 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
1824
1825 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1826 gpa_t address, int len, const void *data)
1827 {
1828 struct kvm_lapic *apic = to_lapic(this);
1829 unsigned int offset = address - apic->base_address;
1830 u32 val;
1831
1832 if (!apic_mmio_in_range(apic, address))
1833 return -EOPNOTSUPP;
1834
1835 /*
1836 * APIC register must be aligned on 128-bits boundary.
1837 * 32/64/128 bits registers must be accessed thru 32 bits.
1838 * Refer SDM 8.4.1
1839 */
1840 if (len != 4 || (offset & 0xf)) {
1841 /* Don't shout loud, $infamous_os would cause only noise. */
1842 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1843 return 0;
1844 }
1845
1846 val = *(u32*)data;
1847
1848 /* too common printing */
1849 if (offset != APIC_EOI)
1850 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1851 "0x%x\n", __func__, offset, len, val);
1852
1853 kvm_lapic_reg_write(apic, offset & 0xff0, val);
1854
1855 return 0;
1856 }
1857
1858 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1859 {
1860 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1861 }
1862 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1863
1864 /* emulate APIC access in a trap manner */
1865 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
1866 {
1867 u32 val = 0;
1868
1869 /* hw has done the conditional check and inst decode */
1870 offset &= 0xff0;
1871
1872 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
1873
1874 /* TODO: optimize to just emulate side effect w/o one more write */
1875 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
1876 }
1877 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
1878
1879 void kvm_free_lapic(struct kvm_vcpu *vcpu)
1880 {
1881 struct kvm_lapic *apic = vcpu->arch.apic;
1882
1883 if (!vcpu->arch.apic)
1884 return;
1885
1886 hrtimer_cancel(&apic->lapic_timer.timer);
1887
1888 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1889 static_key_slow_dec_deferred(&apic_hw_disabled);
1890
1891 if (!apic->sw_enabled)
1892 static_key_slow_dec_deferred(&apic_sw_disabled);
1893
1894 if (apic->regs)
1895 free_page((unsigned long)apic->regs);
1896
1897 kfree(apic);
1898 }
1899
1900 /*
1901 *----------------------------------------------------------------------
1902 * LAPIC interface
1903 *----------------------------------------------------------------------
1904 */
1905 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1906 {
1907 struct kvm_lapic *apic = vcpu->arch.apic;
1908
1909 if (!lapic_in_kernel(vcpu) ||
1910 !apic_lvtt_tscdeadline(apic))
1911 return 0;
1912
1913 return apic->lapic_timer.tscdeadline;
1914 }
1915
1916 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1917 {
1918 struct kvm_lapic *apic = vcpu->arch.apic;
1919
1920 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1921 apic_lvtt_period(apic))
1922 return;
1923
1924 hrtimer_cancel(&apic->lapic_timer.timer);
1925 apic->lapic_timer.tscdeadline = data;
1926 start_apic_timer(apic);
1927 }
1928
1929 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1930 {
1931 struct kvm_lapic *apic = vcpu->arch.apic;
1932
1933 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1934 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
1935 }
1936
1937 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1938 {
1939 u64 tpr;
1940
1941 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1942
1943 return (tpr & 0xf0) >> 4;
1944 }
1945
1946 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1947 {
1948 u64 old_value = vcpu->arch.apic_base;
1949 struct kvm_lapic *apic = vcpu->arch.apic;
1950
1951 if (!apic)
1952 value |= MSR_IA32_APICBASE_BSP;
1953
1954 vcpu->arch.apic_base = value;
1955
1956 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
1957 kvm_update_cpuid(vcpu);
1958
1959 if (!apic)
1960 return;
1961
1962 /* update jump label if enable bit changes */
1963 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1964 if (value & MSR_IA32_APICBASE_ENABLE) {
1965 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
1966 static_key_slow_dec_deferred(&apic_hw_disabled);
1967 } else {
1968 static_key_slow_inc(&apic_hw_disabled.key);
1969 recalculate_apic_map(vcpu->kvm);
1970 }
1971 }
1972
1973 if ((old_value ^ value) & X2APIC_ENABLE) {
1974 if (value & X2APIC_ENABLE) {
1975 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
1976 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
1977 } else
1978 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
1979 }
1980
1981 apic->base_address = apic->vcpu->arch.apic_base &
1982 MSR_IA32_APICBASE_BASE;
1983
1984 if ((value & MSR_IA32_APICBASE_ENABLE) &&
1985 apic->base_address != APIC_DEFAULT_PHYS_BASE)
1986 pr_warn_once("APIC base relocation is unsupported by KVM");
1987
1988 /* with FSB delivery interrupt, we can restart APIC functionality */
1989 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1990 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
1991
1992 }
1993
1994 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1995 {
1996 struct kvm_lapic *apic;
1997 int i;
1998
1999 apic_debug("%s\n", __func__);
2000
2001 ASSERT(vcpu);
2002 apic = vcpu->arch.apic;
2003 ASSERT(apic != NULL);
2004
2005 /* Stop the timer in case it's a reset to an active apic */
2006 hrtimer_cancel(&apic->lapic_timer.timer);
2007
2008 if (!init_event) {
2009 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2010 MSR_IA32_APICBASE_ENABLE);
2011 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2012 }
2013 kvm_apic_set_version(apic->vcpu);
2014
2015 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2016 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2017 apic_update_lvtt(apic);
2018 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2019 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2020 kvm_lapic_set_reg(apic, APIC_LVT0,
2021 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2022 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2023
2024 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
2025 apic_set_spiv(apic, 0xff);
2026 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2027 if (!apic_x2apic_mode(apic))
2028 kvm_apic_set_ldr(apic, 0);
2029 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2030 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2031 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2032 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2033 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2034 for (i = 0; i < 8; i++) {
2035 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2036 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2037 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2038 }
2039 apic->irr_pending = vcpu->arch.apicv_active;
2040 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
2041 apic->highest_isr_cache = -1;
2042 update_divide_count(apic);
2043 atomic_set(&apic->lapic_timer.pending, 0);
2044 if (kvm_vcpu_is_bsp(vcpu))
2045 kvm_lapic_set_base(vcpu,
2046 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2047 vcpu->arch.pv_eoi.msr_val = 0;
2048 apic_update_ppr(apic);
2049 if (vcpu->arch.apicv_active) {
2050 kvm_x86_ops->apicv_post_state_restore(vcpu);
2051 kvm_x86_ops->hwapic_irr_update(vcpu, -1);
2052 kvm_x86_ops->hwapic_isr_update(vcpu, -1);
2053 }
2054
2055 vcpu->arch.apic_arb_prio = 0;
2056 vcpu->arch.apic_attention = 0;
2057
2058 apic_debug("%s: vcpu=%p, id=0x%x, base_msr="
2059 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
2060 vcpu, kvm_lapic_get_reg(apic, APIC_ID),
2061 vcpu->arch.apic_base, apic->base_address);
2062 }
2063
2064 /*
2065 *----------------------------------------------------------------------
2066 * timer interface
2067 *----------------------------------------------------------------------
2068 */
2069
2070 static bool lapic_is_periodic(struct kvm_lapic *apic)
2071 {
2072 return apic_lvtt_period(apic);
2073 }
2074
2075 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2076 {
2077 struct kvm_lapic *apic = vcpu->arch.apic;
2078
2079 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2080 return atomic_read(&apic->lapic_timer.pending);
2081
2082 return 0;
2083 }
2084
2085 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2086 {
2087 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2088 int vector, mode, trig_mode;
2089
2090 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2091 vector = reg & APIC_VECTOR_MASK;
2092 mode = reg & APIC_MODE_MASK;
2093 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2094 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2095 NULL);
2096 }
2097 return 0;
2098 }
2099
2100 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2101 {
2102 struct kvm_lapic *apic = vcpu->arch.apic;
2103
2104 if (apic)
2105 kvm_apic_local_deliver(apic, APIC_LVT0);
2106 }
2107
2108 static const struct kvm_io_device_ops apic_mmio_ops = {
2109 .read = apic_mmio_read,
2110 .write = apic_mmio_write,
2111 };
2112
2113 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2114 {
2115 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2116 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2117
2118 apic_timer_expired(apic);
2119
2120 if (lapic_is_periodic(apic)) {
2121 advance_periodic_target_expiration(apic);
2122 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2123 return HRTIMER_RESTART;
2124 } else
2125 return HRTIMER_NORESTART;
2126 }
2127
2128 int kvm_create_lapic(struct kvm_vcpu *vcpu)
2129 {
2130 struct kvm_lapic *apic;
2131
2132 ASSERT(vcpu != NULL);
2133 apic_debug("apic_init %d\n", vcpu->vcpu_id);
2134
2135 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
2136 if (!apic)
2137 goto nomem;
2138
2139 vcpu->arch.apic = apic;
2140
2141 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
2142 if (!apic->regs) {
2143 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2144 vcpu->vcpu_id);
2145 goto nomem_free_apic;
2146 }
2147 apic->vcpu = vcpu;
2148
2149 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2150 HRTIMER_MODE_ABS_PINNED);
2151 apic->lapic_timer.timer.function = apic_timer_fn;
2152
2153 /*
2154 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2155 * thinking that APIC satet has changed.
2156 */
2157 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2158 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2159 kvm_lapic_reset(vcpu, false);
2160 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2161
2162 return 0;
2163 nomem_free_apic:
2164 kfree(apic);
2165 nomem:
2166 return -ENOMEM;
2167 }
2168
2169 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2170 {
2171 struct kvm_lapic *apic = vcpu->arch.apic;
2172 u32 ppr;
2173
2174 if (!apic_enabled(apic))
2175 return -1;
2176
2177 __apic_update_ppr(apic, &ppr);
2178 return apic_has_interrupt_for_ppr(apic, ppr);
2179 }
2180
2181 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2182 {
2183 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2184 int r = 0;
2185
2186 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2187 r = 1;
2188 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2189 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2190 r = 1;
2191 return r;
2192 }
2193
2194 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2195 {
2196 struct kvm_lapic *apic = vcpu->arch.apic;
2197
2198 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2199 kvm_apic_local_deliver(apic, APIC_LVTT);
2200 if (apic_lvtt_tscdeadline(apic))
2201 apic->lapic_timer.tscdeadline = 0;
2202 if (apic_lvtt_oneshot(apic)) {
2203 apic->lapic_timer.tscdeadline = 0;
2204 apic->lapic_timer.target_expiration = 0;
2205 }
2206 atomic_set(&apic->lapic_timer.pending, 0);
2207 }
2208 }
2209
2210 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2211 {
2212 int vector = kvm_apic_has_interrupt(vcpu);
2213 struct kvm_lapic *apic = vcpu->arch.apic;
2214 u32 ppr;
2215
2216 if (vector == -1)
2217 return -1;
2218
2219 /*
2220 * We get here even with APIC virtualization enabled, if doing
2221 * nested virtualization and L1 runs with the "acknowledge interrupt
2222 * on exit" mode. Then we cannot inject the interrupt via RVI,
2223 * because the process would deliver it through the IDT.
2224 */
2225
2226 apic_clear_irr(vector, apic);
2227 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2228 /*
2229 * For auto-EOI interrupts, there might be another pending
2230 * interrupt above PPR, so check whether to raise another
2231 * KVM_REQ_EVENT.
2232 */
2233 apic_update_ppr(apic);
2234 } else {
2235 /*
2236 * For normal interrupts, PPR has been raised and there cannot
2237 * be a higher-priority pending interrupt---except if there was
2238 * a concurrent interrupt injection, but that would have
2239 * triggered KVM_REQ_EVENT already.
2240 */
2241 apic_set_isr(vector, apic);
2242 __apic_update_ppr(apic, &ppr);
2243 }
2244
2245 return vector;
2246 }
2247
2248 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2249 struct kvm_lapic_state *s, bool set)
2250 {
2251 if (apic_x2apic_mode(vcpu->arch.apic)) {
2252 u32 *id = (u32 *)(s->regs + APIC_ID);
2253 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2254
2255 if (vcpu->kvm->arch.x2apic_format) {
2256 if (*id != vcpu->vcpu_id)
2257 return -EINVAL;
2258 } else {
2259 if (set)
2260 *id >>= 24;
2261 else
2262 *id <<= 24;
2263 }
2264
2265 /* In x2APIC mode, the LDR is fixed and based on the id */
2266 if (set)
2267 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2268 }
2269
2270 return 0;
2271 }
2272
2273 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2274 {
2275 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2276 return kvm_apic_state_fixup(vcpu, s, false);
2277 }
2278
2279 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2280 {
2281 struct kvm_lapic *apic = vcpu->arch.apic;
2282 int r;
2283
2284
2285 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2286 /* set SPIV separately to get count of SW disabled APICs right */
2287 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2288
2289 r = kvm_apic_state_fixup(vcpu, s, true);
2290 if (r)
2291 return r;
2292 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2293
2294 recalculate_apic_map(vcpu->kvm);
2295 kvm_apic_set_version(vcpu);
2296
2297 apic_update_ppr(apic);
2298 hrtimer_cancel(&apic->lapic_timer.timer);
2299 apic_update_lvtt(apic);
2300 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2301 update_divide_count(apic);
2302 start_apic_timer(apic);
2303 apic->irr_pending = true;
2304 apic->isr_count = vcpu->arch.apicv_active ?
2305 1 : count_vectors(apic->regs + APIC_ISR);
2306 apic->highest_isr_cache = -1;
2307 if (vcpu->arch.apicv_active) {
2308 kvm_x86_ops->apicv_post_state_restore(vcpu);
2309 kvm_x86_ops->hwapic_irr_update(vcpu,
2310 apic_find_highest_irr(apic));
2311 kvm_x86_ops->hwapic_isr_update(vcpu,
2312 apic_find_highest_isr(apic));
2313 }
2314 kvm_make_request(KVM_REQ_EVENT, vcpu);
2315 if (ioapic_in_kernel(vcpu->kvm))
2316 kvm_rtc_eoi_tracking_restore_one(vcpu);
2317
2318 vcpu->arch.apic_arb_prio = 0;
2319
2320 return 0;
2321 }
2322
2323 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2324 {
2325 struct hrtimer *timer;
2326
2327 if (!lapic_in_kernel(vcpu))
2328 return;
2329
2330 timer = &vcpu->arch.apic->lapic_timer.timer;
2331 if (hrtimer_cancel(timer))
2332 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2333 }
2334
2335 /*
2336 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2337 *
2338 * Detect whether guest triggered PV EOI since the
2339 * last entry. If yes, set EOI on guests's behalf.
2340 * Clear PV EOI in guest memory in any case.
2341 */
2342 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2343 struct kvm_lapic *apic)
2344 {
2345 bool pending;
2346 int vector;
2347 /*
2348 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2349 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2350 *
2351 * KVM_APIC_PV_EOI_PENDING is unset:
2352 * -> host disabled PV EOI.
2353 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2354 * -> host enabled PV EOI, guest did not execute EOI yet.
2355 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2356 * -> host enabled PV EOI, guest executed EOI.
2357 */
2358 BUG_ON(!pv_eoi_enabled(vcpu));
2359 pending = pv_eoi_get_pending(vcpu);
2360 /*
2361 * Clear pending bit in any case: it will be set again on vmentry.
2362 * While this might not be ideal from performance point of view,
2363 * this makes sure pv eoi is only enabled when we know it's safe.
2364 */
2365 pv_eoi_clr_pending(vcpu);
2366 if (pending)
2367 return;
2368 vector = apic_set_eoi(apic);
2369 trace_kvm_pv_eoi(apic, vector);
2370 }
2371
2372 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2373 {
2374 u32 data;
2375
2376 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2377 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2378
2379 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2380 return;
2381
2382 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2383 sizeof(u32)))
2384 return;
2385
2386 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2387 }
2388
2389 /*
2390 * apic_sync_pv_eoi_to_guest - called before vmentry
2391 *
2392 * Detect whether it's safe to enable PV EOI and
2393 * if yes do so.
2394 */
2395 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2396 struct kvm_lapic *apic)
2397 {
2398 if (!pv_eoi_enabled(vcpu) ||
2399 /* IRR set or many bits in ISR: could be nested. */
2400 apic->irr_pending ||
2401 /* Cache not set: could be safe but we don't bother. */
2402 apic->highest_isr_cache == -1 ||
2403 /* Need EOI to update ioapic. */
2404 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2405 /*
2406 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2407 * so we need not do anything here.
2408 */
2409 return;
2410 }
2411
2412 pv_eoi_set_pending(apic->vcpu);
2413 }
2414
2415 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2416 {
2417 u32 data, tpr;
2418 int max_irr, max_isr;
2419 struct kvm_lapic *apic = vcpu->arch.apic;
2420
2421 apic_sync_pv_eoi_to_guest(vcpu, apic);
2422
2423 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2424 return;
2425
2426 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2427 max_irr = apic_find_highest_irr(apic);
2428 if (max_irr < 0)
2429 max_irr = 0;
2430 max_isr = apic_find_highest_isr(apic);
2431 if (max_isr < 0)
2432 max_isr = 0;
2433 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2434
2435 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2436 sizeof(u32));
2437 }
2438
2439 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2440 {
2441 if (vapic_addr) {
2442 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2443 &vcpu->arch.apic->vapic_cache,
2444 vapic_addr, sizeof(u32)))
2445 return -EINVAL;
2446 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2447 } else {
2448 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2449 }
2450
2451 vcpu->arch.apic->vapic_addr = vapic_addr;
2452 return 0;
2453 }
2454
2455 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2456 {
2457 struct kvm_lapic *apic = vcpu->arch.apic;
2458 u32 reg = (msr - APIC_BASE_MSR) << 4;
2459
2460 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2461 return 1;
2462
2463 if (reg == APIC_ICR2)
2464 return 1;
2465
2466 /* if this is ICR write vector before command */
2467 if (reg == APIC_ICR)
2468 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2469 return kvm_lapic_reg_write(apic, reg, (u32)data);
2470 }
2471
2472 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2473 {
2474 struct kvm_lapic *apic = vcpu->arch.apic;
2475 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2476
2477 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2478 return 1;
2479
2480 if (reg == APIC_DFR || reg == APIC_ICR2) {
2481 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2482 reg);
2483 return 1;
2484 }
2485
2486 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2487 return 1;
2488 if (reg == APIC_ICR)
2489 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2490
2491 *data = (((u64)high) << 32) | low;
2492
2493 return 0;
2494 }
2495
2496 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2497 {
2498 struct kvm_lapic *apic = vcpu->arch.apic;
2499
2500 if (!lapic_in_kernel(vcpu))
2501 return 1;
2502
2503 /* if this is ICR write vector before command */
2504 if (reg == APIC_ICR)
2505 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2506 return kvm_lapic_reg_write(apic, reg, (u32)data);
2507 }
2508
2509 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2510 {
2511 struct kvm_lapic *apic = vcpu->arch.apic;
2512 u32 low, high = 0;
2513
2514 if (!lapic_in_kernel(vcpu))
2515 return 1;
2516
2517 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2518 return 1;
2519 if (reg == APIC_ICR)
2520 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2521
2522 *data = (((u64)high) << 32) | low;
2523
2524 return 0;
2525 }
2526
2527 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2528 {
2529 u64 addr = data & ~KVM_MSR_ENABLED;
2530 if (!IS_ALIGNED(addr, 4))
2531 return 1;
2532
2533 vcpu->arch.pv_eoi.msr_val = data;
2534 if (!pv_eoi_enabled(vcpu))
2535 return 0;
2536 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2537 addr, sizeof(u8));
2538 }
2539
2540 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2541 {
2542 struct kvm_lapic *apic = vcpu->arch.apic;
2543 u8 sipi_vector;
2544 unsigned long pe;
2545
2546 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2547 return;
2548
2549 /*
2550 * INITs are latched while in SMM. Because an SMM CPU cannot
2551 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2552 * and delay processing of INIT until the next RSM.
2553 */
2554 if (is_smm(vcpu)) {
2555 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2556 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2557 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2558 return;
2559 }
2560
2561 pe = xchg(&apic->pending_events, 0);
2562 if (test_bit(KVM_APIC_INIT, &pe)) {
2563 kvm_lapic_reset(vcpu, true);
2564 kvm_vcpu_reset(vcpu, true);
2565 if (kvm_vcpu_is_bsp(apic->vcpu))
2566 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2567 else
2568 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2569 }
2570 if (test_bit(KVM_APIC_SIPI, &pe) &&
2571 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2572 /* evaluate pending_events before reading the vector */
2573 smp_rmb();
2574 sipi_vector = apic->sipi_vector;
2575 apic_debug("vcpu %d received sipi with vector # %x\n",
2576 vcpu->vcpu_id, sipi_vector);
2577 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2578 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2579 }
2580 }
2581
2582 void kvm_lapic_init(void)
2583 {
2584 /* do not patch jump label more than once per second */
2585 jump_label_rate_limit(&apic_hw_disabled, HZ);
2586 jump_label_rate_limit(&apic_sw_disabled, HZ);
2587 }
2588
2589 void kvm_lapic_exit(void)
2590 {
2591 static_key_deferred_flush(&apic_hw_disabled);
2592 static_key_deferred_flush(&apic_sw_disabled);
2593 }