]>
Commit | Line | Data |
---|---|---|
00b27a3e AK |
1 | #ifndef ARCH_X86_KVM_CPUID_H |
2 | #define ARCH_X86_KVM_CPUID_H | |
3 | ||
4 | #include "x86.h" | |
5 | ||
dd598091 | 6 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); |
00b27a3e AK |
7 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
8 | u32 function, u32 index); | |
9c15bb1d BP |
9 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
10 | struct kvm_cpuid_entry2 __user *entries, | |
11 | unsigned int type); | |
00b27a3e AK |
12 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
13 | struct kvm_cpuid *cpuid, | |
14 | struct kvm_cpuid_entry __user *entries); | |
15 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, | |
16 | struct kvm_cpuid2 *cpuid, | |
17 | struct kvm_cpuid_entry2 __user *entries); | |
18 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, | |
19 | struct kvm_cpuid2 *cpuid, | |
20 | struct kvm_cpuid_entry2 __user *entries); | |
62046e5a | 21 | void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); |
00b27a3e | 22 | |
5a4f55cd EK |
23 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); |
24 | ||
25 | static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) | |
26 | { | |
27 | return vcpu->arch.maxphyaddr; | |
28 | } | |
00b27a3e AK |
29 | |
30 | static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) | |
31 | { | |
32 | struct kvm_cpuid_entry2 *best; | |
33 | ||
6d1068b3 | 34 | if (!static_cpu_has(X86_FEATURE_XSAVE)) |
1d804d07 | 35 | return false; |
6d1068b3 | 36 | |
00b27a3e AK |
37 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
38 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); | |
39 | } | |
40 | ||
ba904635 WA |
41 | static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) |
42 | { | |
43 | struct kvm_cpuid_entry2 *best; | |
44 | ||
45 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
46 | return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); | |
47 | } | |
48 | ||
00b27a3e AK |
49 | static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) |
50 | { | |
51 | struct kvm_cpuid_entry2 *best; | |
52 | ||
53 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
54 | return best && (best->ebx & bit(X86_FEATURE_SMEP)); | |
55 | } | |
56 | ||
97ec8c06 FW |
57 | static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) |
58 | { | |
59 | struct kvm_cpuid_entry2 *best; | |
60 | ||
61 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
62 | return best && (best->ebx & bit(X86_FEATURE_SMAP)); | |
63 | } | |
64 | ||
00b27a3e AK |
65 | static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) |
66 | { | |
67 | struct kvm_cpuid_entry2 *best; | |
68 | ||
69 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
70 | return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); | |
71 | } | |
72 | ||
660a5d51 PB |
73 | static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) |
74 | { | |
75 | struct kvm_cpuid_entry2 *best; | |
76 | ||
77 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | |
78 | return best && (best->edx & bit(X86_FEATURE_LM)); | |
79 | } | |
80 | ||
2b036c6b BO |
81 | static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) |
82 | { | |
83 | struct kvm_cpuid_entry2 *best; | |
84 | ||
85 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | |
86 | return best && (best->ecx & bit(X86_FEATURE_OSVW)); | |
87 | } | |
88 | ||
ad756a16 MJ |
89 | static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) |
90 | { | |
91 | struct kvm_cpuid_entry2 *best; | |
92 | ||
93 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | |
94 | return best && (best->ecx & bit(X86_FEATURE_PCID)); | |
95 | } | |
96 | ||
58cb628d JK |
97 | static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) |
98 | { | |
99 | struct kvm_cpuid_entry2 *best; | |
100 | ||
101 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | |
102 | return best && (best->ecx & bit(X86_FEATURE_X2APIC)); | |
103 | } | |
104 | ||
a0c0feb5 PB |
105 | static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) |
106 | { | |
107 | struct kvm_cpuid_entry2 *best; | |
108 | ||
109 | best = kvm_find_cpuid_entry(vcpu, 0, 0); | |
110 | return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; | |
111 | } | |
112 | ||
5f7dde7b NA |
113 | static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) |
114 | { | |
115 | struct kvm_cpuid_entry2 *best; | |
116 | ||
117 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | |
118 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); | |
119 | } | |
6f43ed01 NA |
120 | |
121 | static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) | |
122 | { | |
123 | struct kvm_cpuid_entry2 *best; | |
124 | ||
125 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
126 | return best && (best->ebx & bit(X86_FEATURE_RTM)); | |
127 | } | |
c447e76b LL |
128 | |
129 | static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) | |
130 | { | |
131 | struct kvm_cpuid_entry2 *best; | |
132 | ||
133 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | |
134 | return best && (best->ebx & bit(X86_FEATURE_MPX)); | |
135 | } | |
00b27a3e | 136 | #endif |