]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86/kvm/lapic.c
0cefba28c864a3a0925378ed9b438a05b554cff2
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kvm / lapic.c
1
2 /*
3 * Local APIC virtualization
4 *
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
14 *
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 */
20
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
27 #include <linux/io.h>
28 #include <linux/export.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
32 #include <asm/msr.h>
33 #include <asm/page.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
40 #include "irq.h"
41 #include "trace.h"
42 #include "x86.h"
43 #include "cpuid.h"
44 #include "hyperv.h"
45
46 #ifndef CONFIG_X86_64
47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
48 #else
49 #define mod_64(x, y) ((x) % (y))
50 #endif
51
52 #define PRId64 "d"
53 #define PRIx64 "llx"
54 #define PRIu64 "u"
55 #define PRIo64 "o"
56
57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58 #define apic_debug(fmt, arg...)
59
60 /* 14 is the version for Xeon and Pentium 8.4.8*/
61 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
62 #define LAPIC_MMIO_LENGTH (1 << 12)
63 /* followed define is not in apicdef.h */
64 #define APIC_SHORT_MASK 0xc0000
65 #define APIC_DEST_NOSHORT 0x0
66 #define APIC_DEST_MASK 0x800
67 #define MAX_APIC_VECTOR 256
68 #define APIC_VECTORS_PER_REG 32
69
70 #define APIC_BROADCAST 0xFF
71 #define X2APIC_BROADCAST 0xFFFFFFFFul
72
73 static inline int apic_test_vector(int vec, void *bitmap)
74 {
75 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
76 }
77
78 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
79 {
80 struct kvm_lapic *apic = vcpu->arch.apic;
81
82 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
83 apic_test_vector(vector, apic->regs + APIC_IRR);
84 }
85
86 static inline void apic_clear_vector(int vec, void *bitmap)
87 {
88 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
89 }
90
91 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
92 {
93 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
94 }
95
96 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
97 {
98 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
99 }
100
101 struct static_key_deferred apic_hw_disabled __read_mostly;
102 struct static_key_deferred apic_sw_disabled __read_mostly;
103
104 static inline int apic_enabled(struct kvm_lapic *apic)
105 {
106 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
107 }
108
109 #define LVT_MASK \
110 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
111
112 #define LINT_MASK \
113 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
114 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
115
116 static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
117 {
118 return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
119 }
120
121 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
122 {
123 return apic->vcpu->vcpu_id;
124 }
125
126 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
127 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
128 switch (map->mode) {
129 case KVM_APIC_MODE_X2APIC: {
130 u32 offset = (dest_id >> 16) * 16;
131 u32 max_apic_id = map->max_apic_id;
132
133 if (offset <= max_apic_id) {
134 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
135
136 *cluster = &map->phys_map[offset];
137 *mask = dest_id & (0xffff >> (16 - cluster_size));
138 } else {
139 *mask = 0;
140 }
141
142 return true;
143 }
144 case KVM_APIC_MODE_XAPIC_FLAT:
145 *cluster = map->xapic_flat_map;
146 *mask = dest_id & 0xff;
147 return true;
148 case KVM_APIC_MODE_XAPIC_CLUSTER:
149 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
150 *mask = dest_id & 0xf;
151 return true;
152 default:
153 /* Not optimized. */
154 return false;
155 }
156 }
157
158 static void kvm_apic_map_free(struct rcu_head *rcu)
159 {
160 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
161
162 kvfree(map);
163 }
164
165 static void recalculate_apic_map(struct kvm *kvm)
166 {
167 struct kvm_apic_map *new, *old = NULL;
168 struct kvm_vcpu *vcpu;
169 int i;
170 u32 max_id = 255; /* enough space for any xAPIC ID */
171
172 mutex_lock(&kvm->arch.apic_map_lock);
173
174 kvm_for_each_vcpu(i, vcpu, kvm)
175 if (kvm_apic_present(vcpu))
176 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
177
178 new = kvzalloc(sizeof(struct kvm_apic_map) +
179 sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
180
181 if (!new)
182 goto out;
183
184 new->max_apic_id = max_id;
185
186 kvm_for_each_vcpu(i, vcpu, kvm) {
187 struct kvm_lapic *apic = vcpu->arch.apic;
188 struct kvm_lapic **cluster;
189 u16 mask;
190 u32 ldr;
191 u8 xapic_id;
192 u32 x2apic_id;
193
194 if (!kvm_apic_present(vcpu))
195 continue;
196
197 xapic_id = kvm_xapic_id(apic);
198 x2apic_id = kvm_x2apic_id(apic);
199
200 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
201 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
202 x2apic_id <= new->max_apic_id)
203 new->phys_map[x2apic_id] = apic;
204 /*
205 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
206 * prevent them from masking VCPUs with APIC ID <= 0xff.
207 */
208 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
209 new->phys_map[xapic_id] = apic;
210
211 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
212
213 if (apic_x2apic_mode(apic)) {
214 new->mode |= KVM_APIC_MODE_X2APIC;
215 } else if (ldr) {
216 ldr = GET_APIC_LOGICAL_ID(ldr);
217 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
218 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
219 else
220 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
221 }
222
223 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
224 continue;
225
226 if (mask)
227 cluster[ffs(mask) - 1] = apic;
228 }
229 out:
230 old = rcu_dereference_protected(kvm->arch.apic_map,
231 lockdep_is_held(&kvm->arch.apic_map_lock));
232 rcu_assign_pointer(kvm->arch.apic_map, new);
233 mutex_unlock(&kvm->arch.apic_map_lock);
234
235 if (old)
236 call_rcu(&old->rcu, kvm_apic_map_free);
237
238 kvm_make_scan_ioapic_request(kvm);
239 }
240
241 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
242 {
243 bool enabled = val & APIC_SPIV_APIC_ENABLED;
244
245 kvm_lapic_set_reg(apic, APIC_SPIV, val);
246
247 if (enabled != apic->sw_enabled) {
248 apic->sw_enabled = enabled;
249 if (enabled) {
250 static_key_slow_dec_deferred(&apic_sw_disabled);
251 recalculate_apic_map(apic->vcpu->kvm);
252 } else
253 static_key_slow_inc(&apic_sw_disabled.key);
254 }
255 }
256
257 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
258 {
259 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
260 recalculate_apic_map(apic->vcpu->kvm);
261 }
262
263 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
264 {
265 kvm_lapic_set_reg(apic, APIC_LDR, id);
266 recalculate_apic_map(apic->vcpu->kvm);
267 }
268
269 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
270 {
271 return ((id >> 4) << 16) | (1 << (id & 0xf));
272 }
273
274 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
275 {
276 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
277
278 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
279
280 kvm_lapic_set_reg(apic, APIC_ID, id);
281 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
282 recalculate_apic_map(apic->vcpu->kvm);
283 }
284
285 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
286 {
287 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
288 }
289
290 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
291 {
292 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
293 }
294
295 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
296 {
297 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
298 }
299
300 static inline int apic_lvtt_period(struct kvm_lapic *apic)
301 {
302 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
303 }
304
305 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
306 {
307 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
308 }
309
310 static inline int apic_lvt_nmi_mode(u32 lvt_val)
311 {
312 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
313 }
314
315 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
316 {
317 struct kvm_lapic *apic = vcpu->arch.apic;
318 struct kvm_cpuid_entry2 *feat;
319 u32 v = APIC_VERSION;
320
321 if (!lapic_in_kernel(vcpu))
322 return;
323
324 /*
325 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
326 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
327 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
328 * version first and level-triggered interrupts never get EOIed in
329 * IOAPIC.
330 */
331 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
332 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
333 !ioapic_in_kernel(vcpu->kvm))
334 v |= APIC_LVR_DIRECTED_EOI;
335 kvm_lapic_set_reg(apic, APIC_LVR, v);
336 }
337
338 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
339 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
340 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
341 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
342 LINT_MASK, LINT_MASK, /* LVT0-1 */
343 LVT_MASK /* LVTERR */
344 };
345
346 static int find_highest_vector(void *bitmap)
347 {
348 int vec;
349 u32 *reg;
350
351 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
352 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
353 reg = bitmap + REG_POS(vec);
354 if (*reg)
355 return __fls(*reg) + vec;
356 }
357
358 return -1;
359 }
360
361 static u8 count_vectors(void *bitmap)
362 {
363 int vec;
364 u32 *reg;
365 u8 count = 0;
366
367 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
368 reg = bitmap + REG_POS(vec);
369 count += hweight32(*reg);
370 }
371
372 return count;
373 }
374
375 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
376 {
377 u32 i, vec;
378 u32 pir_val, irr_val, prev_irr_val;
379 int max_updated_irr;
380
381 max_updated_irr = -1;
382 *max_irr = -1;
383
384 for (i = vec = 0; i <= 7; i++, vec += 32) {
385 pir_val = READ_ONCE(pir[i]);
386 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
387 if (pir_val) {
388 prev_irr_val = irr_val;
389 irr_val |= xchg(&pir[i], 0);
390 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
391 if (prev_irr_val != irr_val) {
392 max_updated_irr =
393 __fls(irr_val ^ prev_irr_val) + vec;
394 }
395 }
396 if (irr_val)
397 *max_irr = __fls(irr_val) + vec;
398 }
399
400 return ((max_updated_irr != -1) &&
401 (max_updated_irr == *max_irr));
402 }
403 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
404
405 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
406 {
407 struct kvm_lapic *apic = vcpu->arch.apic;
408
409 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
410 }
411 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
412
413 static inline int apic_search_irr(struct kvm_lapic *apic)
414 {
415 return find_highest_vector(apic->regs + APIC_IRR);
416 }
417
418 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
419 {
420 int result;
421
422 /*
423 * Note that irr_pending is just a hint. It will be always
424 * true with virtual interrupt delivery enabled.
425 */
426 if (!apic->irr_pending)
427 return -1;
428
429 result = apic_search_irr(apic);
430 ASSERT(result == -1 || result >= 16);
431
432 return result;
433 }
434
435 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
436 {
437 struct kvm_vcpu *vcpu;
438
439 vcpu = apic->vcpu;
440
441 if (unlikely(vcpu->arch.apicv_active)) {
442 /* need to update RVI */
443 apic_clear_vector(vec, apic->regs + APIC_IRR);
444 kvm_x86_ops->hwapic_irr_update(vcpu,
445 apic_find_highest_irr(apic));
446 } else {
447 apic->irr_pending = false;
448 apic_clear_vector(vec, apic->regs + APIC_IRR);
449 if (apic_search_irr(apic) != -1)
450 apic->irr_pending = true;
451 }
452 }
453
454 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
455 {
456 struct kvm_vcpu *vcpu;
457
458 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
459 return;
460
461 vcpu = apic->vcpu;
462
463 /*
464 * With APIC virtualization enabled, all caching is disabled
465 * because the processor can modify ISR under the hood. Instead
466 * just set SVI.
467 */
468 if (unlikely(vcpu->arch.apicv_active))
469 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
470 else {
471 ++apic->isr_count;
472 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
473 /*
474 * ISR (in service register) bit is set when injecting an interrupt.
475 * The highest vector is injected. Thus the latest bit set matches
476 * the highest bit in ISR.
477 */
478 apic->highest_isr_cache = vec;
479 }
480 }
481
482 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
483 {
484 int result;
485
486 /*
487 * Note that isr_count is always 1, and highest_isr_cache
488 * is always -1, with APIC virtualization enabled.
489 */
490 if (!apic->isr_count)
491 return -1;
492 if (likely(apic->highest_isr_cache != -1))
493 return apic->highest_isr_cache;
494
495 result = find_highest_vector(apic->regs + APIC_ISR);
496 ASSERT(result == -1 || result >= 16);
497
498 return result;
499 }
500
501 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
502 {
503 struct kvm_vcpu *vcpu;
504 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
505 return;
506
507 vcpu = apic->vcpu;
508
509 /*
510 * We do get here for APIC virtualization enabled if the guest
511 * uses the Hyper-V APIC enlightenment. In this case we may need
512 * to trigger a new interrupt delivery by writing the SVI field;
513 * on the other hand isr_count and highest_isr_cache are unused
514 * and must be left alone.
515 */
516 if (unlikely(vcpu->arch.apicv_active))
517 kvm_x86_ops->hwapic_isr_update(vcpu,
518 apic_find_highest_isr(apic));
519 else {
520 --apic->isr_count;
521 BUG_ON(apic->isr_count < 0);
522 apic->highest_isr_cache = -1;
523 }
524 }
525
526 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
527 {
528 /* This may race with setting of irr in __apic_accept_irq() and
529 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
530 * will cause vmexit immediately and the value will be recalculated
531 * on the next vmentry.
532 */
533 return apic_find_highest_irr(vcpu->arch.apic);
534 }
535 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
536
537 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
538 int vector, int level, int trig_mode,
539 struct dest_map *dest_map);
540
541 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
542 struct dest_map *dest_map)
543 {
544 struct kvm_lapic *apic = vcpu->arch.apic;
545
546 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
547 irq->level, irq->trig_mode, dest_map);
548 }
549
550 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
551 unsigned long ipi_bitmap_high, int min,
552 unsigned long icr, int op_64_bit)
553 {
554 int i;
555 struct kvm_apic_map *map;
556 struct kvm_vcpu *vcpu;
557 struct kvm_lapic_irq irq = {0};
558 int cluster_size = op_64_bit ? 64 : 32;
559 int count = 0;
560
561 irq.vector = icr & APIC_VECTOR_MASK;
562 irq.delivery_mode = icr & APIC_MODE_MASK;
563 irq.level = (icr & APIC_INT_ASSERT) != 0;
564 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
565
566 if (icr & APIC_DEST_MASK)
567 return -KVM_EINVAL;
568 if (icr & APIC_SHORT_MASK)
569 return -KVM_EINVAL;
570
571 rcu_read_lock();
572 map = rcu_dereference(kvm->arch.apic_map);
573
574 /* Bits above cluster_size are masked in the caller. */
575 for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
576 vcpu = map->phys_map[min + i]->vcpu;
577 count += kvm_apic_set_irq(vcpu, &irq, NULL);
578 }
579
580 min += cluster_size;
581 for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
582 vcpu = map->phys_map[min + i]->vcpu;
583 count += kvm_apic_set_irq(vcpu, &irq, NULL);
584 }
585
586 rcu_read_unlock();
587 return count;
588 }
589
590 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
591 {
592
593 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
594 sizeof(val));
595 }
596
597 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
598 {
599
600 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
601 sizeof(*val));
602 }
603
604 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
605 {
606 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
607 }
608
609 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
610 {
611 u8 val;
612 if (pv_eoi_get_user(vcpu, &val) < 0)
613 apic_debug("Can't read EOI MSR value: 0x%llx\n",
614 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
615 return val & 0x1;
616 }
617
618 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
619 {
620 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
621 apic_debug("Can't set EOI MSR value: 0x%llx\n",
622 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
623 return;
624 }
625 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
626 }
627
628 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
629 {
630 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
631 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
632 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
633 return;
634 }
635 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
636 }
637
638 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
639 {
640 int highest_irr;
641 if (apic->vcpu->arch.apicv_active)
642 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
643 else
644 highest_irr = apic_find_highest_irr(apic);
645 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
646 return -1;
647 return highest_irr;
648 }
649
650 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
651 {
652 u32 tpr, isrv, ppr, old_ppr;
653 int isr;
654
655 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
656 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
657 isr = apic_find_highest_isr(apic);
658 isrv = (isr != -1) ? isr : 0;
659
660 if ((tpr & 0xf0) >= (isrv & 0xf0))
661 ppr = tpr & 0xff;
662 else
663 ppr = isrv & 0xf0;
664
665 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
666 apic, ppr, isr, isrv);
667
668 *new_ppr = ppr;
669 if (old_ppr != ppr)
670 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
671
672 return ppr < old_ppr;
673 }
674
675 static void apic_update_ppr(struct kvm_lapic *apic)
676 {
677 u32 ppr;
678
679 if (__apic_update_ppr(apic, &ppr) &&
680 apic_has_interrupt_for_ppr(apic, ppr) != -1)
681 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
682 }
683
684 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
685 {
686 apic_update_ppr(vcpu->arch.apic);
687 }
688 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
689
690 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
691 {
692 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
693 apic_update_ppr(apic);
694 }
695
696 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
697 {
698 return mda == (apic_x2apic_mode(apic) ?
699 X2APIC_BROADCAST : APIC_BROADCAST);
700 }
701
702 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
703 {
704 if (kvm_apic_broadcast(apic, mda))
705 return true;
706
707 if (apic_x2apic_mode(apic))
708 return mda == kvm_x2apic_id(apic);
709
710 /*
711 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
712 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
713 * this allows unique addressing of VCPUs with APIC ID over 0xff.
714 * The 0xff condition is needed because writeable xAPIC ID.
715 */
716 if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
717 return true;
718
719 return mda == kvm_xapic_id(apic);
720 }
721
722 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
723 {
724 u32 logical_id;
725
726 if (kvm_apic_broadcast(apic, mda))
727 return true;
728
729 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
730
731 if (apic_x2apic_mode(apic))
732 return ((logical_id >> 16) == (mda >> 16))
733 && (logical_id & mda & 0xffff) != 0;
734
735 logical_id = GET_APIC_LOGICAL_ID(logical_id);
736
737 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
738 case APIC_DFR_FLAT:
739 return (logical_id & mda) != 0;
740 case APIC_DFR_CLUSTER:
741 return ((logical_id >> 4) == (mda >> 4))
742 && (logical_id & mda & 0xf) != 0;
743 default:
744 apic_debug("Bad DFR vcpu %d: %08x\n",
745 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
746 return false;
747 }
748 }
749
750 /* The KVM local APIC implementation has two quirks:
751 *
752 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
753 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
754 * KVM doesn't do that aliasing.
755 *
756 * - in-kernel IOAPIC messages have to be delivered directly to
757 * x2APIC, because the kernel does not support interrupt remapping.
758 * In order to support broadcast without interrupt remapping, x2APIC
759 * rewrites the destination of non-IPI messages from APIC_BROADCAST
760 * to X2APIC_BROADCAST.
761 *
762 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
763 * important when userspace wants to use x2APIC-format MSIs, because
764 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
765 */
766 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
767 struct kvm_lapic *source, struct kvm_lapic *target)
768 {
769 bool ipi = source != NULL;
770
771 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
772 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
773 return X2APIC_BROADCAST;
774
775 return dest_id;
776 }
777
778 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
779 int short_hand, unsigned int dest, int dest_mode)
780 {
781 struct kvm_lapic *target = vcpu->arch.apic;
782 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
783
784 apic_debug("target %p, source %p, dest 0x%x, "
785 "dest_mode 0x%x, short_hand 0x%x\n",
786 target, source, dest, dest_mode, short_hand);
787
788 ASSERT(target);
789 switch (short_hand) {
790 case APIC_DEST_NOSHORT:
791 if (dest_mode == APIC_DEST_PHYSICAL)
792 return kvm_apic_match_physical_addr(target, mda);
793 else
794 return kvm_apic_match_logical_addr(target, mda);
795 case APIC_DEST_SELF:
796 return target == source;
797 case APIC_DEST_ALLINC:
798 return true;
799 case APIC_DEST_ALLBUT:
800 return target != source;
801 default:
802 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
803 short_hand);
804 return false;
805 }
806 }
807 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
808
809 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
810 const unsigned long *bitmap, u32 bitmap_size)
811 {
812 u32 mod;
813 int i, idx = -1;
814
815 mod = vector % dest_vcpus;
816
817 for (i = 0; i <= mod; i++) {
818 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
819 BUG_ON(idx == bitmap_size);
820 }
821
822 return idx;
823 }
824
825 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
826 {
827 if (!kvm->arch.disabled_lapic_found) {
828 kvm->arch.disabled_lapic_found = true;
829 printk(KERN_INFO
830 "Disabled LAPIC found during irq injection\n");
831 }
832 }
833
834 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
835 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
836 {
837 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
838 if ((irq->dest_id == APIC_BROADCAST &&
839 map->mode != KVM_APIC_MODE_X2APIC))
840 return true;
841 if (irq->dest_id == X2APIC_BROADCAST)
842 return true;
843 } else {
844 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
845 if (irq->dest_id == (x2apic_ipi ?
846 X2APIC_BROADCAST : APIC_BROADCAST))
847 return true;
848 }
849
850 return false;
851 }
852
853 /* Return true if the interrupt can be handled by using *bitmap as index mask
854 * for valid destinations in *dst array.
855 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
856 * Note: we may have zero kvm_lapic destinations when we return true, which
857 * means that the interrupt should be dropped. In this case, *bitmap would be
858 * zero and *dst undefined.
859 */
860 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
861 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
862 struct kvm_apic_map *map, struct kvm_lapic ***dst,
863 unsigned long *bitmap)
864 {
865 int i, lowest;
866
867 if (irq->shorthand == APIC_DEST_SELF && src) {
868 *dst = src;
869 *bitmap = 1;
870 return true;
871 } else if (irq->shorthand)
872 return false;
873
874 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
875 return false;
876
877 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
878 if (irq->dest_id > map->max_apic_id) {
879 *bitmap = 0;
880 } else {
881 *dst = &map->phys_map[irq->dest_id];
882 *bitmap = 1;
883 }
884 return true;
885 }
886
887 *bitmap = 0;
888 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
889 (u16 *)bitmap))
890 return false;
891
892 if (!kvm_lowest_prio_delivery(irq))
893 return true;
894
895 if (!kvm_vector_hashing_enabled()) {
896 lowest = -1;
897 for_each_set_bit(i, bitmap, 16) {
898 if (!(*dst)[i])
899 continue;
900 if (lowest < 0)
901 lowest = i;
902 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
903 (*dst)[lowest]->vcpu) < 0)
904 lowest = i;
905 }
906 } else {
907 if (!*bitmap)
908 return true;
909
910 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
911 bitmap, 16);
912
913 if (!(*dst)[lowest]) {
914 kvm_apic_disabled_lapic_found(kvm);
915 *bitmap = 0;
916 return true;
917 }
918 }
919
920 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
921
922 return true;
923 }
924
925 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
926 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
927 {
928 struct kvm_apic_map *map;
929 unsigned long bitmap;
930 struct kvm_lapic **dst = NULL;
931 int i;
932 bool ret;
933
934 *r = -1;
935
936 if (irq->shorthand == APIC_DEST_SELF) {
937 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
938 return true;
939 }
940
941 rcu_read_lock();
942 map = rcu_dereference(kvm->arch.apic_map);
943
944 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
945 if (ret)
946 for_each_set_bit(i, &bitmap, 16) {
947 if (!dst[i])
948 continue;
949 if (*r < 0)
950 *r = 0;
951 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
952 }
953
954 rcu_read_unlock();
955 return ret;
956 }
957
958 /*
959 * This routine tries to handler interrupts in posted mode, here is how
960 * it deals with different cases:
961 * - For single-destination interrupts, handle it in posted mode
962 * - Else if vector hashing is enabled and it is a lowest-priority
963 * interrupt, handle it in posted mode and use the following mechanism
964 * to find the destinaiton vCPU.
965 * 1. For lowest-priority interrupts, store all the possible
966 * destination vCPUs in an array.
967 * 2. Use "guest vector % max number of destination vCPUs" to find
968 * the right destination vCPU in the array for the lowest-priority
969 * interrupt.
970 * - Otherwise, use remapped mode to inject the interrupt.
971 */
972 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
973 struct kvm_vcpu **dest_vcpu)
974 {
975 struct kvm_apic_map *map;
976 unsigned long bitmap;
977 struct kvm_lapic **dst = NULL;
978 bool ret = false;
979
980 if (irq->shorthand)
981 return false;
982
983 rcu_read_lock();
984 map = rcu_dereference(kvm->arch.apic_map);
985
986 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
987 hweight16(bitmap) == 1) {
988 unsigned long i = find_first_bit(&bitmap, 16);
989
990 if (dst[i]) {
991 *dest_vcpu = dst[i]->vcpu;
992 ret = true;
993 }
994 }
995
996 rcu_read_unlock();
997 return ret;
998 }
999
1000 /*
1001 * Add a pending IRQ into lapic.
1002 * Return 1 if successfully added and 0 if discarded.
1003 */
1004 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1005 int vector, int level, int trig_mode,
1006 struct dest_map *dest_map)
1007 {
1008 int result = 0;
1009 struct kvm_vcpu *vcpu = apic->vcpu;
1010
1011 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1012 trig_mode, vector);
1013 switch (delivery_mode) {
1014 case APIC_DM_LOWEST:
1015 vcpu->arch.apic_arb_prio++;
1016 case APIC_DM_FIXED:
1017 if (unlikely(trig_mode && !level))
1018 break;
1019
1020 /* FIXME add logic for vcpu on reset */
1021 if (unlikely(!apic_enabled(apic)))
1022 break;
1023
1024 result = 1;
1025
1026 if (dest_map) {
1027 __set_bit(vcpu->vcpu_id, dest_map->map);
1028 dest_map->vectors[vcpu->vcpu_id] = vector;
1029 }
1030
1031 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1032 if (trig_mode)
1033 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
1034 else
1035 apic_clear_vector(vector, apic->regs + APIC_TMR);
1036 }
1037
1038 if (vcpu->arch.apicv_active)
1039 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
1040 else {
1041 kvm_lapic_set_irr(vector, apic);
1042
1043 kvm_make_request(KVM_REQ_EVENT, vcpu);
1044 kvm_vcpu_kick(vcpu);
1045 }
1046 break;
1047
1048 case APIC_DM_REMRD:
1049 result = 1;
1050 vcpu->arch.pv.pv_unhalted = 1;
1051 kvm_make_request(KVM_REQ_EVENT, vcpu);
1052 kvm_vcpu_kick(vcpu);
1053 break;
1054
1055 case APIC_DM_SMI:
1056 result = 1;
1057 kvm_make_request(KVM_REQ_SMI, vcpu);
1058 kvm_vcpu_kick(vcpu);
1059 break;
1060
1061 case APIC_DM_NMI:
1062 result = 1;
1063 kvm_inject_nmi(vcpu);
1064 kvm_vcpu_kick(vcpu);
1065 break;
1066
1067 case APIC_DM_INIT:
1068 if (!trig_mode || level) {
1069 result = 1;
1070 /* assumes that there are only KVM_APIC_INIT/SIPI */
1071 apic->pending_events = (1UL << KVM_APIC_INIT);
1072 /* make sure pending_events is visible before sending
1073 * the request */
1074 smp_wmb();
1075 kvm_make_request(KVM_REQ_EVENT, vcpu);
1076 kvm_vcpu_kick(vcpu);
1077 } else {
1078 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
1079 vcpu->vcpu_id);
1080 }
1081 break;
1082
1083 case APIC_DM_STARTUP:
1084 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
1085 vcpu->vcpu_id, vector);
1086 result = 1;
1087 apic->sipi_vector = vector;
1088 /* make sure sipi_vector is visible for the receiver */
1089 smp_wmb();
1090 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1091 kvm_make_request(KVM_REQ_EVENT, vcpu);
1092 kvm_vcpu_kick(vcpu);
1093 break;
1094
1095 case APIC_DM_EXTINT:
1096 /*
1097 * Should only be called by kvm_apic_local_deliver() with LVT0,
1098 * before NMI watchdog was enabled. Already handled by
1099 * kvm_apic_accept_pic_intr().
1100 */
1101 break;
1102
1103 default:
1104 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1105 delivery_mode);
1106 break;
1107 }
1108 return result;
1109 }
1110
1111 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1112 {
1113 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1114 }
1115
1116 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1117 {
1118 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1119 }
1120
1121 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1122 {
1123 int trigger_mode;
1124
1125 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1126 if (!kvm_ioapic_handles_vector(apic, vector))
1127 return;
1128
1129 /* Request a KVM exit to inform the userspace IOAPIC. */
1130 if (irqchip_split(apic->vcpu->kvm)) {
1131 apic->vcpu->arch.pending_ioapic_eoi = vector;
1132 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1133 return;
1134 }
1135
1136 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1137 trigger_mode = IOAPIC_LEVEL_TRIG;
1138 else
1139 trigger_mode = IOAPIC_EDGE_TRIG;
1140
1141 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1142 }
1143
1144 static int apic_set_eoi(struct kvm_lapic *apic)
1145 {
1146 int vector = apic_find_highest_isr(apic);
1147
1148 trace_kvm_eoi(apic, vector);
1149
1150 /*
1151 * Not every write EOI will has corresponding ISR,
1152 * one example is when Kernel check timer on setup_IO_APIC
1153 */
1154 if (vector == -1)
1155 return vector;
1156
1157 apic_clear_isr(vector, apic);
1158 apic_update_ppr(apic);
1159
1160 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1161 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1162
1163 kvm_ioapic_send_eoi(apic, vector);
1164 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1165 return vector;
1166 }
1167
1168 /*
1169 * this interface assumes a trap-like exit, which has already finished
1170 * desired side effect including vISR and vPPR update.
1171 */
1172 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1173 {
1174 struct kvm_lapic *apic = vcpu->arch.apic;
1175
1176 trace_kvm_eoi(apic, vector);
1177
1178 kvm_ioapic_send_eoi(apic, vector);
1179 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1180 }
1181 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1182
1183 static void apic_send_ipi(struct kvm_lapic *apic)
1184 {
1185 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
1186 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1187 struct kvm_lapic_irq irq;
1188
1189 irq.vector = icr_low & APIC_VECTOR_MASK;
1190 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1191 irq.dest_mode = icr_low & APIC_DEST_MASK;
1192 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1193 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1194 irq.shorthand = icr_low & APIC_SHORT_MASK;
1195 irq.msi_redir_hint = false;
1196 if (apic_x2apic_mode(apic))
1197 irq.dest_id = icr_high;
1198 else
1199 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1200
1201 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1202
1203 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1204 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1205 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1206 "msi_redir_hint 0x%x\n",
1207 icr_high, icr_low, irq.shorthand, irq.dest_id,
1208 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1209 irq.vector, irq.msi_redir_hint);
1210
1211 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1212 }
1213
1214 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1215 {
1216 ktime_t remaining, now;
1217 s64 ns;
1218 u32 tmcct;
1219
1220 ASSERT(apic != NULL);
1221
1222 /* if initial count is 0, current count should also be 0 */
1223 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1224 apic->lapic_timer.period == 0)
1225 return 0;
1226
1227 now = ktime_get();
1228 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1229 if (ktime_to_ns(remaining) < 0)
1230 remaining = 0;
1231
1232 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1233 tmcct = div64_u64(ns,
1234 (APIC_BUS_CYCLE_NS * apic->divide_count));
1235
1236 return tmcct;
1237 }
1238
1239 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1240 {
1241 struct kvm_vcpu *vcpu = apic->vcpu;
1242 struct kvm_run *run = vcpu->run;
1243
1244 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1245 run->tpr_access.rip = kvm_rip_read(vcpu);
1246 run->tpr_access.is_write = write;
1247 }
1248
1249 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1250 {
1251 if (apic->vcpu->arch.tpr_access_reporting)
1252 __report_tpr_access(apic, write);
1253 }
1254
1255 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1256 {
1257 u32 val = 0;
1258
1259 if (offset >= LAPIC_MMIO_LENGTH)
1260 return 0;
1261
1262 switch (offset) {
1263 case APIC_ARBPRI:
1264 apic_debug("Access APIC ARBPRI register which is for P6\n");
1265 break;
1266
1267 case APIC_TMCCT: /* Timer CCR */
1268 if (apic_lvtt_tscdeadline(apic))
1269 return 0;
1270
1271 val = apic_get_tmcct(apic);
1272 break;
1273 case APIC_PROCPRI:
1274 apic_update_ppr(apic);
1275 val = kvm_lapic_get_reg(apic, offset);
1276 break;
1277 case APIC_TASKPRI:
1278 report_tpr_access(apic, false);
1279 /* fall thru */
1280 default:
1281 val = kvm_lapic_get_reg(apic, offset);
1282 break;
1283 }
1284
1285 return val;
1286 }
1287
1288 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1289 {
1290 return container_of(dev, struct kvm_lapic, dev);
1291 }
1292
1293 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1294 void *data)
1295 {
1296 unsigned char alignment = offset & 0xf;
1297 u32 result;
1298 /* this bitmask has a bit cleared for each reserved register */
1299 static const u64 rmask = 0x43ff01ffffffe70cULL;
1300
1301 if ((alignment + len) > 4) {
1302 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1303 offset, len);
1304 return 1;
1305 }
1306
1307 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1308 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1309 offset);
1310 return 1;
1311 }
1312
1313 result = __apic_read(apic, offset & ~0xf);
1314
1315 trace_kvm_apic_read(offset, result);
1316
1317 switch (len) {
1318 case 1:
1319 case 2:
1320 case 4:
1321 memcpy(data, (char *)&result + alignment, len);
1322 break;
1323 default:
1324 printk(KERN_ERR "Local APIC read with len = %x, "
1325 "should be 1,2, or 4 instead\n", len);
1326 break;
1327 }
1328 return 0;
1329 }
1330 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1331
1332 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1333 {
1334 return kvm_apic_hw_enabled(apic) &&
1335 addr >= apic->base_address &&
1336 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1337 }
1338
1339 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1340 gpa_t address, int len, void *data)
1341 {
1342 struct kvm_lapic *apic = to_lapic(this);
1343 u32 offset = address - apic->base_address;
1344
1345 if (!apic_mmio_in_range(apic, address))
1346 return -EOPNOTSUPP;
1347
1348 kvm_lapic_reg_read(apic, offset, len, data);
1349
1350 return 0;
1351 }
1352
1353 static void update_divide_count(struct kvm_lapic *apic)
1354 {
1355 u32 tmp1, tmp2, tdcr;
1356
1357 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1358 tmp1 = tdcr & 0xf;
1359 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1360 apic->divide_count = 0x1 << (tmp2 & 0x7);
1361
1362 apic_debug("timer divide count is 0x%x\n",
1363 apic->divide_count);
1364 }
1365
1366 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1367 {
1368 /*
1369 * Do not allow the guest to program periodic timers with small
1370 * interval, since the hrtimers are not throttled by the host
1371 * scheduler.
1372 */
1373 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1374 s64 min_period = min_timer_period_us * 1000LL;
1375
1376 if (apic->lapic_timer.period < min_period) {
1377 pr_info_ratelimited(
1378 "kvm: vcpu %i: requested %lld ns "
1379 "lapic timer period limited to %lld ns\n",
1380 apic->vcpu->vcpu_id,
1381 apic->lapic_timer.period, min_period);
1382 apic->lapic_timer.period = min_period;
1383 }
1384 }
1385 }
1386
1387 static void apic_update_lvtt(struct kvm_lapic *apic)
1388 {
1389 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1390 apic->lapic_timer.timer_mode_mask;
1391
1392 if (apic->lapic_timer.timer_mode != timer_mode) {
1393 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1394 APIC_LVT_TIMER_TSCDEADLINE)) {
1395 hrtimer_cancel(&apic->lapic_timer.timer);
1396 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1397 apic->lapic_timer.period = 0;
1398 apic->lapic_timer.tscdeadline = 0;
1399 }
1400 apic->lapic_timer.timer_mode = timer_mode;
1401 limit_periodic_timer_frequency(apic);
1402 }
1403 }
1404
1405 static void apic_timer_expired(struct kvm_lapic *apic)
1406 {
1407 struct kvm_vcpu *vcpu = apic->vcpu;
1408 struct swait_queue_head *q = &vcpu->wq;
1409 struct kvm_timer *ktimer = &apic->lapic_timer;
1410
1411 if (atomic_read(&apic->lapic_timer.pending))
1412 return;
1413
1414 atomic_inc(&apic->lapic_timer.pending);
1415 kvm_set_pending_timer(vcpu);
1416
1417 /*
1418 * For x86, the atomic_inc() is serialized, thus
1419 * using swait_active() is safe.
1420 */
1421 if (swait_active(q))
1422 swake_up_one(q);
1423
1424 if (apic_lvtt_tscdeadline(apic))
1425 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1426 }
1427
1428 /*
1429 * On APICv, this test will cause a busy wait
1430 * during a higher-priority task.
1431 */
1432
1433 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1434 {
1435 struct kvm_lapic *apic = vcpu->arch.apic;
1436 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1437
1438 if (kvm_apic_hw_enabled(apic)) {
1439 int vec = reg & APIC_VECTOR_MASK;
1440 void *bitmap = apic->regs + APIC_ISR;
1441
1442 if (vcpu->arch.apicv_active)
1443 bitmap = apic->regs + APIC_IRR;
1444
1445 if (apic_test_vector(vec, bitmap))
1446 return true;
1447 }
1448 return false;
1449 }
1450
1451 void wait_lapic_expire(struct kvm_vcpu *vcpu)
1452 {
1453 struct kvm_lapic *apic = vcpu->arch.apic;
1454 u64 guest_tsc, tsc_deadline;
1455
1456 if (!lapic_in_kernel(vcpu))
1457 return;
1458
1459 if (apic->lapic_timer.expired_tscdeadline == 0)
1460 return;
1461
1462 if (!lapic_timer_int_injected(vcpu))
1463 return;
1464
1465 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1466 apic->lapic_timer.expired_tscdeadline = 0;
1467 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1468 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1469
1470 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1471 if (guest_tsc < tsc_deadline)
1472 __delay(min(tsc_deadline - guest_tsc,
1473 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1474 }
1475
1476 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1477 {
1478 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1479 u64 ns = 0;
1480 ktime_t expire;
1481 struct kvm_vcpu *vcpu = apic->vcpu;
1482 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1483 unsigned long flags;
1484 ktime_t now;
1485
1486 if (unlikely(!tscdeadline || !this_tsc_khz))
1487 return;
1488
1489 local_irq_save(flags);
1490
1491 now = ktime_get();
1492 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1493 if (likely(tscdeadline > guest_tsc)) {
1494 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1495 do_div(ns, this_tsc_khz);
1496 expire = ktime_add_ns(now, ns);
1497 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1498 hrtimer_start(&apic->lapic_timer.timer,
1499 expire, HRTIMER_MODE_ABS_PINNED);
1500 } else
1501 apic_timer_expired(apic);
1502
1503 local_irq_restore(flags);
1504 }
1505
1506 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1507 {
1508 ktime_t now, remaining;
1509 u64 ns_remaining_old, ns_remaining_new;
1510
1511 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1512 * APIC_BUS_CYCLE_NS * apic->divide_count;
1513 limit_periodic_timer_frequency(apic);
1514
1515 now = ktime_get();
1516 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1517 if (ktime_to_ns(remaining) < 0)
1518 remaining = 0;
1519
1520 ns_remaining_old = ktime_to_ns(remaining);
1521 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1522 apic->divide_count, old_divisor);
1523
1524 apic->lapic_timer.tscdeadline +=
1525 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1526 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1527 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1528 }
1529
1530 static bool set_target_expiration(struct kvm_lapic *apic)
1531 {
1532 ktime_t now;
1533 u64 tscl = rdtsc();
1534
1535 now = ktime_get();
1536 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1537 * APIC_BUS_CYCLE_NS * apic->divide_count;
1538
1539 if (!apic->lapic_timer.period) {
1540 apic->lapic_timer.tscdeadline = 0;
1541 return false;
1542 }
1543
1544 limit_periodic_timer_frequency(apic);
1545
1546 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1547 PRIx64 ", "
1548 "timer initial count 0x%x, period %lldns, "
1549 "expire @ 0x%016" PRIx64 ".\n", __func__,
1550 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1551 kvm_lapic_get_reg(apic, APIC_TMICT),
1552 apic->lapic_timer.period,
1553 ktime_to_ns(ktime_add_ns(now,
1554 apic->lapic_timer.period)));
1555
1556 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1557 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1558 apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
1559
1560 return true;
1561 }
1562
1563 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1564 {
1565 ktime_t now = ktime_get();
1566 u64 tscl = rdtsc();
1567 ktime_t delta;
1568
1569 /*
1570 * Synchronize both deadlines to the same time source or
1571 * differences in the periods (caused by differences in the
1572 * underlying clocks or numerical approximation errors) will
1573 * cause the two to drift apart over time as the errors
1574 * accumulate.
1575 */
1576 apic->lapic_timer.target_expiration =
1577 ktime_add_ns(apic->lapic_timer.target_expiration,
1578 apic->lapic_timer.period);
1579 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1580 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1581 nsec_to_cycles(apic->vcpu, delta);
1582 }
1583
1584 static void start_sw_period(struct kvm_lapic *apic)
1585 {
1586 if (!apic->lapic_timer.period)
1587 return;
1588
1589 if (ktime_after(ktime_get(),
1590 apic->lapic_timer.target_expiration)) {
1591 apic_timer_expired(apic);
1592
1593 if (apic_lvtt_oneshot(apic))
1594 return;
1595
1596 advance_periodic_target_expiration(apic);
1597 }
1598
1599 hrtimer_start(&apic->lapic_timer.timer,
1600 apic->lapic_timer.target_expiration,
1601 HRTIMER_MODE_ABS_PINNED);
1602 }
1603
1604 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1605 {
1606 if (!lapic_in_kernel(vcpu))
1607 return false;
1608
1609 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1610 }
1611 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1612
1613 static void cancel_hv_timer(struct kvm_lapic *apic)
1614 {
1615 WARN_ON(preemptible());
1616 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1617 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1618 apic->lapic_timer.hv_timer_in_use = false;
1619 }
1620
1621 static bool start_hv_timer(struct kvm_lapic *apic)
1622 {
1623 struct kvm_timer *ktimer = &apic->lapic_timer;
1624 int r;
1625
1626 WARN_ON(preemptible());
1627 if (!kvm_x86_ops->set_hv_timer)
1628 return false;
1629
1630 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1631 return false;
1632
1633 if (!ktimer->tscdeadline)
1634 return false;
1635
1636 r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline);
1637 if (r < 0)
1638 return false;
1639
1640 ktimer->hv_timer_in_use = true;
1641 hrtimer_cancel(&ktimer->timer);
1642
1643 /*
1644 * Also recheck ktimer->pending, in case the sw timer triggered in
1645 * the window. For periodic timer, leave the hv timer running for
1646 * simplicity, and the deadline will be recomputed on the next vmexit.
1647 */
1648 if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) {
1649 if (r)
1650 apic_timer_expired(apic);
1651 return false;
1652 }
1653
1654 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
1655 return true;
1656 }
1657
1658 static void start_sw_timer(struct kvm_lapic *apic)
1659 {
1660 struct kvm_timer *ktimer = &apic->lapic_timer;
1661
1662 WARN_ON(preemptible());
1663 if (apic->lapic_timer.hv_timer_in_use)
1664 cancel_hv_timer(apic);
1665 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1666 return;
1667
1668 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1669 start_sw_period(apic);
1670 else if (apic_lvtt_tscdeadline(apic))
1671 start_sw_tscdeadline(apic);
1672 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1673 }
1674
1675 static void restart_apic_timer(struct kvm_lapic *apic)
1676 {
1677 preempt_disable();
1678 if (!start_hv_timer(apic))
1679 start_sw_timer(apic);
1680 preempt_enable();
1681 }
1682
1683 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1684 {
1685 struct kvm_lapic *apic = vcpu->arch.apic;
1686
1687 preempt_disable();
1688 /* If the preempt notifier has already run, it also called apic_timer_expired */
1689 if (!apic->lapic_timer.hv_timer_in_use)
1690 goto out;
1691 WARN_ON(swait_active(&vcpu->wq));
1692 cancel_hv_timer(apic);
1693 apic_timer_expired(apic);
1694
1695 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1696 advance_periodic_target_expiration(apic);
1697 restart_apic_timer(apic);
1698 }
1699 out:
1700 preempt_enable();
1701 }
1702 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1703
1704 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1705 {
1706 restart_apic_timer(vcpu->arch.apic);
1707 }
1708 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1709
1710 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1711 {
1712 struct kvm_lapic *apic = vcpu->arch.apic;
1713
1714 preempt_disable();
1715 /* Possibly the TSC deadline timer is not enabled yet */
1716 if (apic->lapic_timer.hv_timer_in_use)
1717 start_sw_timer(apic);
1718 preempt_enable();
1719 }
1720 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1721
1722 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1723 {
1724 struct kvm_lapic *apic = vcpu->arch.apic;
1725
1726 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1727 restart_apic_timer(apic);
1728 }
1729
1730 static void start_apic_timer(struct kvm_lapic *apic)
1731 {
1732 atomic_set(&apic->lapic_timer.pending, 0);
1733
1734 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1735 && !set_target_expiration(apic))
1736 return;
1737
1738 restart_apic_timer(apic);
1739 }
1740
1741 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1742 {
1743 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1744
1745 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1746 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1747 if (lvt0_in_nmi_mode) {
1748 apic_debug("Receive NMI setting on APIC_LVT0 "
1749 "for cpu %d\n", apic->vcpu->vcpu_id);
1750 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1751 } else
1752 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1753 }
1754 }
1755
1756 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1757 {
1758 int ret = 0;
1759
1760 trace_kvm_apic_write(reg, val);
1761
1762 switch (reg) {
1763 case APIC_ID: /* Local APIC ID */
1764 if (!apic_x2apic_mode(apic))
1765 kvm_apic_set_xapic_id(apic, val >> 24);
1766 else
1767 ret = 1;
1768 break;
1769
1770 case APIC_TASKPRI:
1771 report_tpr_access(apic, true);
1772 apic_set_tpr(apic, val & 0xff);
1773 break;
1774
1775 case APIC_EOI:
1776 apic_set_eoi(apic);
1777 break;
1778
1779 case APIC_LDR:
1780 if (!apic_x2apic_mode(apic))
1781 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1782 else
1783 ret = 1;
1784 break;
1785
1786 case APIC_DFR:
1787 if (!apic_x2apic_mode(apic)) {
1788 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1789 recalculate_apic_map(apic->vcpu->kvm);
1790 } else
1791 ret = 1;
1792 break;
1793
1794 case APIC_SPIV: {
1795 u32 mask = 0x3ff;
1796 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1797 mask |= APIC_SPIV_DIRECTED_EOI;
1798 apic_set_spiv(apic, val & mask);
1799 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1800 int i;
1801 u32 lvt_val;
1802
1803 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1804 lvt_val = kvm_lapic_get_reg(apic,
1805 APIC_LVTT + 0x10 * i);
1806 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1807 lvt_val | APIC_LVT_MASKED);
1808 }
1809 apic_update_lvtt(apic);
1810 atomic_set(&apic->lapic_timer.pending, 0);
1811
1812 }
1813 break;
1814 }
1815 case APIC_ICR:
1816 /* No delay here, so we always clear the pending bit */
1817 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1818 apic_send_ipi(apic);
1819 break;
1820
1821 case APIC_ICR2:
1822 if (!apic_x2apic_mode(apic))
1823 val &= 0xff000000;
1824 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1825 break;
1826
1827 case APIC_LVT0:
1828 apic_manage_nmi_watchdog(apic, val);
1829 case APIC_LVTTHMR:
1830 case APIC_LVTPC:
1831 case APIC_LVT1:
1832 case APIC_LVTERR:
1833 /* TODO: Check vector */
1834 if (!kvm_apic_sw_enabled(apic))
1835 val |= APIC_LVT_MASKED;
1836
1837 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1838 kvm_lapic_set_reg(apic, reg, val);
1839
1840 break;
1841
1842 case APIC_LVTT:
1843 if (!kvm_apic_sw_enabled(apic))
1844 val |= APIC_LVT_MASKED;
1845 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1846 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1847 apic_update_lvtt(apic);
1848 break;
1849
1850 case APIC_TMICT:
1851 if (apic_lvtt_tscdeadline(apic))
1852 break;
1853
1854 hrtimer_cancel(&apic->lapic_timer.timer);
1855 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1856 start_apic_timer(apic);
1857 break;
1858
1859 case APIC_TDCR: {
1860 uint32_t old_divisor = apic->divide_count;
1861
1862 if (val & 4)
1863 apic_debug("KVM_WRITE:TDCR %x\n", val);
1864 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1865 update_divide_count(apic);
1866 if (apic->divide_count != old_divisor &&
1867 apic->lapic_timer.period) {
1868 hrtimer_cancel(&apic->lapic_timer.timer);
1869 update_target_expiration(apic, old_divisor);
1870 restart_apic_timer(apic);
1871 }
1872 break;
1873 }
1874 case APIC_ESR:
1875 if (apic_x2apic_mode(apic) && val != 0) {
1876 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1877 ret = 1;
1878 }
1879 break;
1880
1881 case APIC_SELF_IPI:
1882 if (apic_x2apic_mode(apic)) {
1883 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1884 } else
1885 ret = 1;
1886 break;
1887 default:
1888 ret = 1;
1889 break;
1890 }
1891 if (ret)
1892 apic_debug("Local APIC Write to read-only register %x\n", reg);
1893 return ret;
1894 }
1895 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
1896
1897 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1898 gpa_t address, int len, const void *data)
1899 {
1900 struct kvm_lapic *apic = to_lapic(this);
1901 unsigned int offset = address - apic->base_address;
1902 u32 val;
1903
1904 if (!apic_mmio_in_range(apic, address))
1905 return -EOPNOTSUPP;
1906
1907 /*
1908 * APIC register must be aligned on 128-bits boundary.
1909 * 32/64/128 bits registers must be accessed thru 32 bits.
1910 * Refer SDM 8.4.1
1911 */
1912 if (len != 4 || (offset & 0xf)) {
1913 /* Don't shout loud, $infamous_os would cause only noise. */
1914 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1915 return 0;
1916 }
1917
1918 val = *(u32*)data;
1919
1920 /* too common printing */
1921 if (offset != APIC_EOI)
1922 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1923 "0x%x\n", __func__, offset, len, val);
1924
1925 kvm_lapic_reg_write(apic, offset & 0xff0, val);
1926
1927 return 0;
1928 }
1929
1930 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1931 {
1932 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1933 }
1934 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1935
1936 /* emulate APIC access in a trap manner */
1937 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
1938 {
1939 u32 val = 0;
1940
1941 /* hw has done the conditional check and inst decode */
1942 offset &= 0xff0;
1943
1944 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
1945
1946 /* TODO: optimize to just emulate side effect w/o one more write */
1947 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
1948 }
1949 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
1950
1951 void kvm_free_lapic(struct kvm_vcpu *vcpu)
1952 {
1953 struct kvm_lapic *apic = vcpu->arch.apic;
1954
1955 if (!vcpu->arch.apic)
1956 return;
1957
1958 hrtimer_cancel(&apic->lapic_timer.timer);
1959
1960 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1961 static_key_slow_dec_deferred(&apic_hw_disabled);
1962
1963 if (!apic->sw_enabled)
1964 static_key_slow_dec_deferred(&apic_sw_disabled);
1965
1966 if (apic->regs)
1967 free_page((unsigned long)apic->regs);
1968
1969 kfree(apic);
1970 }
1971
1972 /*
1973 *----------------------------------------------------------------------
1974 * LAPIC interface
1975 *----------------------------------------------------------------------
1976 */
1977 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1978 {
1979 struct kvm_lapic *apic = vcpu->arch.apic;
1980
1981 if (!lapic_in_kernel(vcpu) ||
1982 !apic_lvtt_tscdeadline(apic))
1983 return 0;
1984
1985 return apic->lapic_timer.tscdeadline;
1986 }
1987
1988 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1989 {
1990 struct kvm_lapic *apic = vcpu->arch.apic;
1991
1992 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1993 apic_lvtt_period(apic))
1994 return;
1995
1996 hrtimer_cancel(&apic->lapic_timer.timer);
1997 apic->lapic_timer.tscdeadline = data;
1998 start_apic_timer(apic);
1999 }
2000
2001 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2002 {
2003 struct kvm_lapic *apic = vcpu->arch.apic;
2004
2005 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2006 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2007 }
2008
2009 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2010 {
2011 u64 tpr;
2012
2013 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2014
2015 return (tpr & 0xf0) >> 4;
2016 }
2017
2018 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2019 {
2020 u64 old_value = vcpu->arch.apic_base;
2021 struct kvm_lapic *apic = vcpu->arch.apic;
2022
2023 if (!apic)
2024 value |= MSR_IA32_APICBASE_BSP;
2025
2026 vcpu->arch.apic_base = value;
2027
2028 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2029 kvm_update_cpuid(vcpu);
2030
2031 if (!apic)
2032 return;
2033
2034 /* update jump label if enable bit changes */
2035 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2036 if (value & MSR_IA32_APICBASE_ENABLE) {
2037 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2038 static_key_slow_dec_deferred(&apic_hw_disabled);
2039 } else {
2040 static_key_slow_inc(&apic_hw_disabled.key);
2041 recalculate_apic_map(vcpu->kvm);
2042 }
2043 }
2044
2045 if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2046 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2047
2048 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2049 kvm_x86_ops->set_virtual_apic_mode(vcpu);
2050
2051 apic->base_address = apic->vcpu->arch.apic_base &
2052 MSR_IA32_APICBASE_BASE;
2053
2054 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2055 apic->base_address != APIC_DEFAULT_PHYS_BASE)
2056 pr_warn_once("APIC base relocation is unsupported by KVM");
2057
2058 /* with FSB delivery interrupt, we can restart APIC functionality */
2059 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
2060 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
2061
2062 }
2063
2064 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2065 {
2066 struct kvm_lapic *apic = vcpu->arch.apic;
2067 int i;
2068
2069 if (!apic)
2070 return;
2071
2072 apic_debug("%s\n", __func__);
2073
2074 /* Stop the timer in case it's a reset to an active apic */
2075 hrtimer_cancel(&apic->lapic_timer.timer);
2076
2077 if (!init_event) {
2078 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2079 MSR_IA32_APICBASE_ENABLE);
2080 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2081 }
2082 kvm_apic_set_version(apic->vcpu);
2083
2084 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2085 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2086 apic_update_lvtt(apic);
2087 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2088 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2089 kvm_lapic_set_reg(apic, APIC_LVT0,
2090 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2091 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2092
2093 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
2094 apic_set_spiv(apic, 0xff);
2095 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2096 if (!apic_x2apic_mode(apic))
2097 kvm_apic_set_ldr(apic, 0);
2098 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2099 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2100 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2101 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2102 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2103 for (i = 0; i < 8; i++) {
2104 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2105 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2106 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2107 }
2108 apic->irr_pending = vcpu->arch.apicv_active;
2109 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
2110 apic->highest_isr_cache = -1;
2111 update_divide_count(apic);
2112 atomic_set(&apic->lapic_timer.pending, 0);
2113 if (kvm_vcpu_is_bsp(vcpu))
2114 kvm_lapic_set_base(vcpu,
2115 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2116 vcpu->arch.pv_eoi.msr_val = 0;
2117 apic_update_ppr(apic);
2118 if (vcpu->arch.apicv_active) {
2119 kvm_x86_ops->apicv_post_state_restore(vcpu);
2120 kvm_x86_ops->hwapic_irr_update(vcpu, -1);
2121 kvm_x86_ops->hwapic_isr_update(vcpu, -1);
2122 }
2123
2124 vcpu->arch.apic_arb_prio = 0;
2125 vcpu->arch.apic_attention = 0;
2126
2127 apic_debug("%s: vcpu=%p, id=0x%x, base_msr="
2128 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
2129 vcpu, kvm_lapic_get_reg(apic, APIC_ID),
2130 vcpu->arch.apic_base, apic->base_address);
2131 }
2132
2133 /*
2134 *----------------------------------------------------------------------
2135 * timer interface
2136 *----------------------------------------------------------------------
2137 */
2138
2139 static bool lapic_is_periodic(struct kvm_lapic *apic)
2140 {
2141 return apic_lvtt_period(apic);
2142 }
2143
2144 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2145 {
2146 struct kvm_lapic *apic = vcpu->arch.apic;
2147
2148 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2149 return atomic_read(&apic->lapic_timer.pending);
2150
2151 return 0;
2152 }
2153
2154 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2155 {
2156 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2157 int vector, mode, trig_mode;
2158
2159 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2160 vector = reg & APIC_VECTOR_MASK;
2161 mode = reg & APIC_MODE_MASK;
2162 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2163 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2164 NULL);
2165 }
2166 return 0;
2167 }
2168
2169 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2170 {
2171 struct kvm_lapic *apic = vcpu->arch.apic;
2172
2173 if (apic)
2174 kvm_apic_local_deliver(apic, APIC_LVT0);
2175 }
2176
2177 static const struct kvm_io_device_ops apic_mmio_ops = {
2178 .read = apic_mmio_read,
2179 .write = apic_mmio_write,
2180 };
2181
2182 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2183 {
2184 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2185 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2186
2187 apic_timer_expired(apic);
2188
2189 if (lapic_is_periodic(apic)) {
2190 advance_periodic_target_expiration(apic);
2191 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2192 return HRTIMER_RESTART;
2193 } else
2194 return HRTIMER_NORESTART;
2195 }
2196
2197 int kvm_create_lapic(struct kvm_vcpu *vcpu)
2198 {
2199 struct kvm_lapic *apic;
2200
2201 ASSERT(vcpu != NULL);
2202 apic_debug("apic_init %d\n", vcpu->vcpu_id);
2203
2204 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
2205 if (!apic)
2206 goto nomem;
2207
2208 vcpu->arch.apic = apic;
2209
2210 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
2211 if (!apic->regs) {
2212 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2213 vcpu->vcpu_id);
2214 goto nomem_free_apic;
2215 }
2216 apic->vcpu = vcpu;
2217
2218 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2219 HRTIMER_MODE_ABS_PINNED);
2220 apic->lapic_timer.timer.function = apic_timer_fn;
2221
2222 /*
2223 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2224 * thinking that APIC satet has changed.
2225 */
2226 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2227 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2228 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2229
2230 return 0;
2231 nomem_free_apic:
2232 kfree(apic);
2233 nomem:
2234 return -ENOMEM;
2235 }
2236
2237 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2238 {
2239 struct kvm_lapic *apic = vcpu->arch.apic;
2240 u32 ppr;
2241
2242 if (!apic_enabled(apic))
2243 return -1;
2244
2245 __apic_update_ppr(apic, &ppr);
2246 return apic_has_interrupt_for_ppr(apic, ppr);
2247 }
2248
2249 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2250 {
2251 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2252 int r = 0;
2253
2254 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2255 r = 1;
2256 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2257 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2258 r = 1;
2259 return r;
2260 }
2261
2262 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2263 {
2264 struct kvm_lapic *apic = vcpu->arch.apic;
2265
2266 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2267 kvm_apic_local_deliver(apic, APIC_LVTT);
2268 if (apic_lvtt_tscdeadline(apic))
2269 apic->lapic_timer.tscdeadline = 0;
2270 if (apic_lvtt_oneshot(apic)) {
2271 apic->lapic_timer.tscdeadline = 0;
2272 apic->lapic_timer.target_expiration = 0;
2273 }
2274 atomic_set(&apic->lapic_timer.pending, 0);
2275 }
2276 }
2277
2278 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2279 {
2280 int vector = kvm_apic_has_interrupt(vcpu);
2281 struct kvm_lapic *apic = vcpu->arch.apic;
2282 u32 ppr;
2283
2284 if (vector == -1)
2285 return -1;
2286
2287 /*
2288 * We get here even with APIC virtualization enabled, if doing
2289 * nested virtualization and L1 runs with the "acknowledge interrupt
2290 * on exit" mode. Then we cannot inject the interrupt via RVI,
2291 * because the process would deliver it through the IDT.
2292 */
2293
2294 apic_clear_irr(vector, apic);
2295 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2296 /*
2297 * For auto-EOI interrupts, there might be another pending
2298 * interrupt above PPR, so check whether to raise another
2299 * KVM_REQ_EVENT.
2300 */
2301 apic_update_ppr(apic);
2302 } else {
2303 /*
2304 * For normal interrupts, PPR has been raised and there cannot
2305 * be a higher-priority pending interrupt---except if there was
2306 * a concurrent interrupt injection, but that would have
2307 * triggered KVM_REQ_EVENT already.
2308 */
2309 apic_set_isr(vector, apic);
2310 __apic_update_ppr(apic, &ppr);
2311 }
2312
2313 return vector;
2314 }
2315
2316 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2317 struct kvm_lapic_state *s, bool set)
2318 {
2319 if (apic_x2apic_mode(vcpu->arch.apic)) {
2320 u32 *id = (u32 *)(s->regs + APIC_ID);
2321 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2322
2323 if (vcpu->kvm->arch.x2apic_format) {
2324 if (*id != vcpu->vcpu_id)
2325 return -EINVAL;
2326 } else {
2327 if (set)
2328 *id >>= 24;
2329 else
2330 *id <<= 24;
2331 }
2332
2333 /* In x2APIC mode, the LDR is fixed and based on the id */
2334 if (set)
2335 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2336 }
2337
2338 return 0;
2339 }
2340
2341 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2342 {
2343 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2344 return kvm_apic_state_fixup(vcpu, s, false);
2345 }
2346
2347 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2348 {
2349 struct kvm_lapic *apic = vcpu->arch.apic;
2350 int r;
2351
2352
2353 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2354 /* set SPIV separately to get count of SW disabled APICs right */
2355 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2356
2357 r = kvm_apic_state_fixup(vcpu, s, true);
2358 if (r)
2359 return r;
2360 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2361
2362 recalculate_apic_map(vcpu->kvm);
2363 kvm_apic_set_version(vcpu);
2364
2365 apic_update_ppr(apic);
2366 hrtimer_cancel(&apic->lapic_timer.timer);
2367 apic_update_lvtt(apic);
2368 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2369 update_divide_count(apic);
2370 start_apic_timer(apic);
2371 apic->irr_pending = true;
2372 apic->isr_count = vcpu->arch.apicv_active ?
2373 1 : count_vectors(apic->regs + APIC_ISR);
2374 apic->highest_isr_cache = -1;
2375 if (vcpu->arch.apicv_active) {
2376 kvm_x86_ops->apicv_post_state_restore(vcpu);
2377 kvm_x86_ops->hwapic_irr_update(vcpu,
2378 apic_find_highest_irr(apic));
2379 kvm_x86_ops->hwapic_isr_update(vcpu,
2380 apic_find_highest_isr(apic));
2381 }
2382 kvm_make_request(KVM_REQ_EVENT, vcpu);
2383 if (ioapic_in_kernel(vcpu->kvm))
2384 kvm_rtc_eoi_tracking_restore_one(vcpu);
2385
2386 vcpu->arch.apic_arb_prio = 0;
2387
2388 return 0;
2389 }
2390
2391 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2392 {
2393 struct hrtimer *timer;
2394
2395 if (!lapic_in_kernel(vcpu))
2396 return;
2397
2398 timer = &vcpu->arch.apic->lapic_timer.timer;
2399 if (hrtimer_cancel(timer))
2400 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2401 }
2402
2403 /*
2404 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2405 *
2406 * Detect whether guest triggered PV EOI since the
2407 * last entry. If yes, set EOI on guests's behalf.
2408 * Clear PV EOI in guest memory in any case.
2409 */
2410 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2411 struct kvm_lapic *apic)
2412 {
2413 bool pending;
2414 int vector;
2415 /*
2416 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2417 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2418 *
2419 * KVM_APIC_PV_EOI_PENDING is unset:
2420 * -> host disabled PV EOI.
2421 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2422 * -> host enabled PV EOI, guest did not execute EOI yet.
2423 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2424 * -> host enabled PV EOI, guest executed EOI.
2425 */
2426 BUG_ON(!pv_eoi_enabled(vcpu));
2427 pending = pv_eoi_get_pending(vcpu);
2428 /*
2429 * Clear pending bit in any case: it will be set again on vmentry.
2430 * While this might not be ideal from performance point of view,
2431 * this makes sure pv eoi is only enabled when we know it's safe.
2432 */
2433 pv_eoi_clr_pending(vcpu);
2434 if (pending)
2435 return;
2436 vector = apic_set_eoi(apic);
2437 trace_kvm_pv_eoi(apic, vector);
2438 }
2439
2440 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2441 {
2442 u32 data;
2443
2444 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2445 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2446
2447 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2448 return;
2449
2450 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2451 sizeof(u32)))
2452 return;
2453
2454 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2455 }
2456
2457 /*
2458 * apic_sync_pv_eoi_to_guest - called before vmentry
2459 *
2460 * Detect whether it's safe to enable PV EOI and
2461 * if yes do so.
2462 */
2463 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2464 struct kvm_lapic *apic)
2465 {
2466 if (!pv_eoi_enabled(vcpu) ||
2467 /* IRR set or many bits in ISR: could be nested. */
2468 apic->irr_pending ||
2469 /* Cache not set: could be safe but we don't bother. */
2470 apic->highest_isr_cache == -1 ||
2471 /* Need EOI to update ioapic. */
2472 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2473 /*
2474 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2475 * so we need not do anything here.
2476 */
2477 return;
2478 }
2479
2480 pv_eoi_set_pending(apic->vcpu);
2481 }
2482
2483 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2484 {
2485 u32 data, tpr;
2486 int max_irr, max_isr;
2487 struct kvm_lapic *apic = vcpu->arch.apic;
2488
2489 apic_sync_pv_eoi_to_guest(vcpu, apic);
2490
2491 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2492 return;
2493
2494 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2495 max_irr = apic_find_highest_irr(apic);
2496 if (max_irr < 0)
2497 max_irr = 0;
2498 max_isr = apic_find_highest_isr(apic);
2499 if (max_isr < 0)
2500 max_isr = 0;
2501 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2502
2503 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2504 sizeof(u32));
2505 }
2506
2507 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2508 {
2509 if (vapic_addr) {
2510 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2511 &vcpu->arch.apic->vapic_cache,
2512 vapic_addr, sizeof(u32)))
2513 return -EINVAL;
2514 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2515 } else {
2516 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2517 }
2518
2519 vcpu->arch.apic->vapic_addr = vapic_addr;
2520 return 0;
2521 }
2522
2523 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2524 {
2525 struct kvm_lapic *apic = vcpu->arch.apic;
2526 u32 reg = (msr - APIC_BASE_MSR) << 4;
2527
2528 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2529 return 1;
2530
2531 if (reg == APIC_ICR2)
2532 return 1;
2533
2534 /* if this is ICR write vector before command */
2535 if (reg == APIC_ICR)
2536 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2537 return kvm_lapic_reg_write(apic, reg, (u32)data);
2538 }
2539
2540 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2541 {
2542 struct kvm_lapic *apic = vcpu->arch.apic;
2543 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2544
2545 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2546 return 1;
2547
2548 if (reg == APIC_DFR || reg == APIC_ICR2) {
2549 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2550 reg);
2551 return 1;
2552 }
2553
2554 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2555 return 1;
2556 if (reg == APIC_ICR)
2557 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2558
2559 *data = (((u64)high) << 32) | low;
2560
2561 return 0;
2562 }
2563
2564 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2565 {
2566 struct kvm_lapic *apic = vcpu->arch.apic;
2567
2568 if (!lapic_in_kernel(vcpu))
2569 return 1;
2570
2571 /* if this is ICR write vector before command */
2572 if (reg == APIC_ICR)
2573 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2574 return kvm_lapic_reg_write(apic, reg, (u32)data);
2575 }
2576
2577 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2578 {
2579 struct kvm_lapic *apic = vcpu->arch.apic;
2580 u32 low, high = 0;
2581
2582 if (!lapic_in_kernel(vcpu))
2583 return 1;
2584
2585 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2586 return 1;
2587 if (reg == APIC_ICR)
2588 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2589
2590 *data = (((u64)high) << 32) | low;
2591
2592 return 0;
2593 }
2594
2595 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2596 {
2597 u64 addr = data & ~KVM_MSR_ENABLED;
2598 if (!IS_ALIGNED(addr, 4))
2599 return 1;
2600
2601 vcpu->arch.pv_eoi.msr_val = data;
2602 if (!pv_eoi_enabled(vcpu))
2603 return 0;
2604 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2605 addr, sizeof(u8));
2606 }
2607
2608 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2609 {
2610 struct kvm_lapic *apic = vcpu->arch.apic;
2611 u8 sipi_vector;
2612 unsigned long pe;
2613
2614 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2615 return;
2616
2617 /*
2618 * INITs are latched while in SMM. Because an SMM CPU cannot
2619 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2620 * and delay processing of INIT until the next RSM.
2621 */
2622 if (is_smm(vcpu)) {
2623 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2624 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2625 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2626 return;
2627 }
2628
2629 pe = xchg(&apic->pending_events, 0);
2630 if (test_bit(KVM_APIC_INIT, &pe)) {
2631 kvm_vcpu_reset(vcpu, true);
2632 if (kvm_vcpu_is_bsp(apic->vcpu))
2633 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2634 else
2635 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2636 }
2637 if (test_bit(KVM_APIC_SIPI, &pe) &&
2638 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2639 /* evaluate pending_events before reading the vector */
2640 smp_rmb();
2641 sipi_vector = apic->sipi_vector;
2642 apic_debug("vcpu %d received sipi with vector # %x\n",
2643 vcpu->vcpu_id, sipi_vector);
2644 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2645 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2646 }
2647 }
2648
2649 void kvm_lapic_init(void)
2650 {
2651 /* do not patch jump label more than once per second */
2652 jump_label_rate_limit(&apic_hw_disabled, HZ);
2653 jump_label_rate_limit(&apic_sw_disabled, HZ);
2654 }
2655
2656 void kvm_lapic_exit(void)
2657 {
2658 static_key_deferred_flush(&apic_hw_disabled);
2659 static_key_deferred_flush(&apic_sw_disabled);
2660 }