]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. | |
4 | * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. | |
5 | * | |
6 | * KVM Xen emulation | |
7 | */ | |
8 | ||
9 | #ifndef __ARCH_X86_KVM_XEN_H__ | |
10 | #define __ARCH_X86_KVM_XEN_H__ | |
11 | ||
12 | #ifdef CONFIG_KVM_XEN | |
13 | #include <linux/jump_label_ratelimit.h> | |
14 | ||
15 | extern struct static_key_false_deferred kvm_xen_enabled; | |
16 | ||
17 | int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); | |
18 | int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); | |
19 | int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); | |
20 | int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); | |
21 | int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); | |
22 | int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); | |
23 | int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); | |
24 | void kvm_xen_init_vm(struct kvm *kvm); | |
25 | void kvm_xen_destroy_vm(struct kvm *kvm); | |
26 | ||
27 | static inline bool kvm_xen_msr_enabled(struct kvm *kvm) | |
28 | { | |
29 | return static_branch_unlikely(&kvm_xen_enabled.key) && | |
30 | kvm->arch.xen_hvm_config.msr; | |
31 | } | |
32 | ||
33 | static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) | |
34 | { | |
35 | return static_branch_unlikely(&kvm_xen_enabled.key) && | |
36 | (kvm->arch.xen_hvm_config.flags & | |
37 | KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); | |
38 | } | |
39 | ||
40 | static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) | |
41 | { | |
42 | if (static_branch_unlikely(&kvm_xen_enabled.key) && | |
43 | vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector) | |
44 | return __kvm_xen_has_interrupt(vcpu); | |
45 | ||
46 | return 0; | |
47 | } | |
48 | #else | |
49 | static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) | |
50 | { | |
51 | return 1; | |
52 | } | |
53 | ||
54 | static inline void kvm_xen_init_vm(struct kvm *kvm) | |
55 | { | |
56 | } | |
57 | ||
58 | static inline void kvm_xen_destroy_vm(struct kvm *kvm) | |
59 | { | |
60 | } | |
61 | ||
62 | static inline bool kvm_xen_msr_enabled(struct kvm *kvm) | |
63 | { | |
64 | return false; | |
65 | } | |
66 | ||
67 | static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) | |
68 | { | |
69 | return false; | |
70 | } | |
71 | ||
72 | static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | return 0; | |
75 | } | |
76 | #endif | |
77 | ||
78 | int kvm_xen_hypercall(struct kvm_vcpu *vcpu); | |
79 | ||
80 | #include <asm/pvclock-abi.h> | |
81 | #include <asm/xen/interface.h> | |
82 | #include <xen/interface/vcpu.h> | |
83 | ||
84 | void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state); | |
85 | ||
86 | static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu) | |
87 | { | |
88 | kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running); | |
89 | } | |
90 | ||
91 | static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) | |
92 | { | |
93 | /* | |
94 | * If the vCPU wasn't preempted but took a normal exit for | |
95 | * some reason (hypercalls, I/O, etc.), that is accounted as | |
96 | * still RUNSTATE_running, as the VMM is still operating on | |
97 | * behalf of the vCPU. Only if the VMM does actually block | |
98 | * does it need to enter RUNSTATE_blocked. | |
99 | */ | |
100 | if (WARN_ON_ONCE(!vcpu->preempted)) | |
101 | return; | |
102 | ||
103 | kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); | |
104 | } | |
105 | ||
106 | /* 32-bit compatibility definitions, also used natively in 32-bit build */ | |
107 | struct compat_arch_vcpu_info { | |
108 | unsigned int cr2; | |
109 | unsigned int pad[5]; | |
110 | }; | |
111 | ||
112 | struct compat_vcpu_info { | |
113 | uint8_t evtchn_upcall_pending; | |
114 | uint8_t evtchn_upcall_mask; | |
115 | uint16_t pad; | |
116 | uint32_t evtchn_pending_sel; | |
117 | struct compat_arch_vcpu_info arch; | |
118 | struct pvclock_vcpu_time_info time; | |
119 | }; /* 64 bytes (x86) */ | |
120 | ||
121 | struct compat_arch_shared_info { | |
122 | unsigned int max_pfn; | |
123 | unsigned int pfn_to_mfn_frame_list_list; | |
124 | unsigned int nmi_reason; | |
125 | unsigned int p2m_cr3; | |
126 | unsigned int p2m_vaddr; | |
127 | unsigned int p2m_generation; | |
128 | uint32_t wc_sec_hi; | |
129 | }; | |
130 | ||
131 | struct compat_shared_info { | |
132 | struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]; | |
133 | uint32_t evtchn_pending[32]; | |
134 | uint32_t evtchn_mask[32]; | |
135 | struct pvclock_wall_clock wc; | |
136 | struct compat_arch_shared_info arch; | |
137 | }; | |
138 | ||
139 | struct compat_vcpu_runstate_info { | |
140 | int state; | |
141 | uint64_t state_entry_time; | |
142 | uint64_t time[4]; | |
143 | } __attribute__((packed)); | |
144 | ||
145 | #endif /* __ARCH_X86_KVM_XEN_H__ */ |