1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
7 #include <asm/processor.h>
8 #include <uapi/asm/kvm_para.h>
11 * Hardware-defined CPUID leafs that are scattered in the kernel, but need to
12 * be directly used by KVM. Note, these word values conflict with the kernel's
13 * "bug" caps, but KVM doesn't use those.
15 enum kvm_only_cpuid_leafs
{
16 CPUID_12_EAX
= NCAPINTS
,
19 NKVMCAPINTS
= NR_KVM_CPU_CAPS
- NCAPINTS
,
22 #define KVM_X86_FEATURE(w, f) ((w)*32 + (f))
24 /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
25 #define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0)
26 #define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
28 extern u32 kvm_cpu_caps
[NR_KVM_CPU_CAPS
] __read_mostly
;
29 void kvm_set_cpu_caps(void);
31 void kvm_update_cpuid_runtime(struct kvm_vcpu
*vcpu
);
32 void kvm_update_pv_runtime(struct kvm_vcpu
*vcpu
);
33 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
34 u32 function
, u32 index
);
35 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
36 struct kvm_cpuid_entry2 __user
*entries
,
38 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
39 struct kvm_cpuid
*cpuid
,
40 struct kvm_cpuid_entry __user
*entries
);
41 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
42 struct kvm_cpuid2
*cpuid
,
43 struct kvm_cpuid_entry2 __user
*entries
);
44 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
45 struct kvm_cpuid2
*cpuid
,
46 struct kvm_cpuid_entry2 __user
*entries
);
47 bool kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
,
48 u32
*ecx
, u32
*edx
, bool exact_only
);
50 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
);
51 u64
kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu
*vcpu
);
53 static inline int cpuid_maxphyaddr(struct kvm_vcpu
*vcpu
)
55 return vcpu
->arch
.maxphyaddr
;
58 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
60 return !(gpa
& vcpu
->arch
.reserved_gpa_bits
);
63 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
65 return !kvm_vcpu_is_legal_gpa(vcpu
, gpa
);
68 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu
*vcpu
,
69 gpa_t gpa
, gpa_t alignment
)
71 return IS_ALIGNED(gpa
, alignment
) && kvm_vcpu_is_legal_gpa(vcpu
, gpa
);
74 static inline bool page_address_valid(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
76 return kvm_vcpu_is_legal_aligned_gpa(vcpu
, gpa
, PAGE_SIZE
);
85 static const struct cpuid_reg reverse_cpuid
[] = {
86 [CPUID_1_EDX
] = { 1, 0, CPUID_EDX
},
87 [CPUID_8000_0001_EDX
] = {0x80000001, 0, CPUID_EDX
},
88 [CPUID_8086_0001_EDX
] = {0x80860001, 0, CPUID_EDX
},
89 [CPUID_1_ECX
] = { 1, 0, CPUID_ECX
},
90 [CPUID_C000_0001_EDX
] = {0xc0000001, 0, CPUID_EDX
},
91 [CPUID_8000_0001_ECX
] = {0x80000001, 0, CPUID_ECX
},
92 [CPUID_7_0_EBX
] = { 7, 0, CPUID_EBX
},
93 [CPUID_D_1_EAX
] = { 0xd, 1, CPUID_EAX
},
94 [CPUID_8000_0008_EBX
] = {0x80000008, 0, CPUID_EBX
},
95 [CPUID_6_EAX
] = { 6, 0, CPUID_EAX
},
96 [CPUID_8000_000A_EDX
] = {0x8000000a, 0, CPUID_EDX
},
97 [CPUID_7_ECX
] = { 7, 0, CPUID_ECX
},
98 [CPUID_8000_0007_EBX
] = {0x80000007, 0, CPUID_EBX
},
99 [CPUID_7_EDX
] = { 7, 0, CPUID_EDX
},
100 [CPUID_7_1_EAX
] = { 7, 1, CPUID_EAX
},
101 [CPUID_12_EAX
] = {0x00000012, 0, CPUID_EAX
},
105 * Reverse CPUID and its derivatives can only be used for hardware-defined
106 * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
107 * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
108 * is nonsensical as the bit number/mask is an arbitrary software-defined value
109 * and can't be used by KVM to query/control guest capabilities. And obviously
110 * the leaf being queried must have an entry in the lookup table.
112 static __always_inline
void reverse_cpuid_check(unsigned int x86_leaf
)
114 BUILD_BUG_ON(x86_leaf
== CPUID_LNX_1
);
115 BUILD_BUG_ON(x86_leaf
== CPUID_LNX_2
);
116 BUILD_BUG_ON(x86_leaf
== CPUID_LNX_3
);
117 BUILD_BUG_ON(x86_leaf
== CPUID_LNX_4
);
118 BUILD_BUG_ON(x86_leaf
>= ARRAY_SIZE(reverse_cpuid
));
119 BUILD_BUG_ON(reverse_cpuid
[x86_leaf
].function
== 0);
123 * Translate feature bits that are scattered in the kernel's cpufeatures word
124 * into KVM feature words that align with hardware's definitions.
126 static __always_inline u32
__feature_translate(int x86_feature
)
128 if (x86_feature
== X86_FEATURE_SGX1
)
129 return KVM_X86_FEATURE_SGX1
;
130 else if (x86_feature
== X86_FEATURE_SGX2
)
131 return KVM_X86_FEATURE_SGX2
;
136 static __always_inline u32
__feature_leaf(int x86_feature
)
138 return __feature_translate(x86_feature
) / 32;
142 * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
143 * the hardware defined bit number (stored in bits 4:0) and a software defined
144 * "word" (stored in bits 31:5). The word is used to index into arrays of
145 * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
147 static __always_inline u32
__feature_bit(int x86_feature
)
149 x86_feature
= __feature_translate(x86_feature
);
151 reverse_cpuid_check(x86_feature
/ 32);
152 return 1 << (x86_feature
& 31);
155 #define feature_bit(name) __feature_bit(X86_FEATURE_##name)
157 static __always_inline
struct cpuid_reg
x86_feature_cpuid(unsigned int x86_feature
)
159 unsigned int x86_leaf
= __feature_leaf(x86_feature
);
161 reverse_cpuid_check(x86_leaf
);
162 return reverse_cpuid
[x86_leaf
];
165 static __always_inline u32
*__cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
,
183 static __always_inline u32
*cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
,
184 unsigned int x86_feature
)
186 const struct cpuid_reg cpuid
= x86_feature_cpuid(x86_feature
);
188 return __cpuid_entry_get_reg(entry
, cpuid
.reg
);
191 static __always_inline u32
cpuid_entry_get(struct kvm_cpuid_entry2
*entry
,
192 unsigned int x86_feature
)
194 u32
*reg
= cpuid_entry_get_reg(entry
, x86_feature
);
196 return *reg
& __feature_bit(x86_feature
);
199 static __always_inline
bool cpuid_entry_has(struct kvm_cpuid_entry2
*entry
,
200 unsigned int x86_feature
)
202 return cpuid_entry_get(entry
, x86_feature
);
205 static __always_inline
void cpuid_entry_clear(struct kvm_cpuid_entry2
*entry
,
206 unsigned int x86_feature
)
208 u32
*reg
= cpuid_entry_get_reg(entry
, x86_feature
);
210 *reg
&= ~__feature_bit(x86_feature
);
213 static __always_inline
void cpuid_entry_set(struct kvm_cpuid_entry2
*entry
,
214 unsigned int x86_feature
)
216 u32
*reg
= cpuid_entry_get_reg(entry
, x86_feature
);
218 *reg
|= __feature_bit(x86_feature
);
221 static __always_inline
void cpuid_entry_change(struct kvm_cpuid_entry2
*entry
,
222 unsigned int x86_feature
,
225 u32
*reg
= cpuid_entry_get_reg(entry
, x86_feature
);
228 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
229 * compiler into using CMOV instead of Jcc when possible.
232 *reg
|= __feature_bit(x86_feature
);
234 *reg
&= ~__feature_bit(x86_feature
);
237 static __always_inline
void cpuid_entry_override(struct kvm_cpuid_entry2
*entry
,
240 u32
*reg
= cpuid_entry_get_reg(entry
, leaf
* 32);
242 BUILD_BUG_ON(leaf
>= ARRAY_SIZE(kvm_cpu_caps
));
243 *reg
= kvm_cpu_caps
[leaf
];
246 static __always_inline u32
*guest_cpuid_get_register(struct kvm_vcpu
*vcpu
,
247 unsigned int x86_feature
)
249 const struct cpuid_reg cpuid
= x86_feature_cpuid(x86_feature
);
250 struct kvm_cpuid_entry2
*entry
;
252 entry
= kvm_find_cpuid_entry(vcpu
, cpuid
.function
, cpuid
.index
);
256 return __cpuid_entry_get_reg(entry
, cpuid
.reg
);
259 static __always_inline
bool guest_cpuid_has(struct kvm_vcpu
*vcpu
,
260 unsigned int x86_feature
)
264 reg
= guest_cpuid_get_register(vcpu
, x86_feature
);
268 return *reg
& __feature_bit(x86_feature
);
271 static __always_inline
void guest_cpuid_clear(struct kvm_vcpu
*vcpu
,
272 unsigned int x86_feature
)
276 reg
= guest_cpuid_get_register(vcpu
, x86_feature
);
278 *reg
&= ~__feature_bit(x86_feature
);
281 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu
*vcpu
)
283 struct kvm_cpuid_entry2
*best
;
285 best
= kvm_find_cpuid_entry(vcpu
, 0, 0);
287 (is_guest_vendor_amd(best
->ebx
, best
->ecx
, best
->edx
) ||
288 is_guest_vendor_hygon(best
->ebx
, best
->ecx
, best
->edx
));
291 static inline bool guest_cpuid_is_intel(struct kvm_vcpu
*vcpu
)
293 struct kvm_cpuid_entry2
*best
;
295 best
= kvm_find_cpuid_entry(vcpu
, 0, 0);
296 return best
&& is_guest_vendor_intel(best
->ebx
, best
->ecx
, best
->edx
);
299 static inline int guest_cpuid_family(struct kvm_vcpu
*vcpu
)
301 struct kvm_cpuid_entry2
*best
;
303 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
307 return x86_family(best
->eax
);
310 static inline int guest_cpuid_model(struct kvm_vcpu
*vcpu
)
312 struct kvm_cpuid_entry2
*best
;
314 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
318 return x86_model(best
->eax
);
321 static inline int guest_cpuid_stepping(struct kvm_vcpu
*vcpu
)
323 struct kvm_cpuid_entry2
*best
;
325 best
= kvm_find_cpuid_entry(vcpu
, 0x1, 0);
329 return x86_stepping(best
->eax
);
332 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu
*vcpu
)
334 return (guest_cpuid_has(vcpu
, X86_FEATURE_SPEC_CTRL
) ||
335 guest_cpuid_has(vcpu
, X86_FEATURE_AMD_STIBP
) ||
336 guest_cpuid_has(vcpu
, X86_FEATURE_AMD_IBRS
) ||
337 guest_cpuid_has(vcpu
, X86_FEATURE_AMD_SSBD
));
340 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu
*vcpu
)
342 return (guest_cpuid_has(vcpu
, X86_FEATURE_SPEC_CTRL
) ||
343 guest_cpuid_has(vcpu
, X86_FEATURE_AMD_IBPB
));
346 static inline bool supports_cpuid_fault(struct kvm_vcpu
*vcpu
)
348 return vcpu
->arch
.msr_platform_info
& MSR_PLATFORM_INFO_CPUID_FAULT
;
351 static inline bool cpuid_fault_enabled(struct kvm_vcpu
*vcpu
)
353 return vcpu
->arch
.msr_misc_features_enables
&
354 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT
;
357 static __always_inline
void kvm_cpu_cap_clear(unsigned int x86_feature
)
359 unsigned int x86_leaf
= __feature_leaf(x86_feature
);
361 reverse_cpuid_check(x86_leaf
);
362 kvm_cpu_caps
[x86_leaf
] &= ~__feature_bit(x86_feature
);
365 static __always_inline
void kvm_cpu_cap_set(unsigned int x86_feature
)
367 unsigned int x86_leaf
= __feature_leaf(x86_feature
);
369 reverse_cpuid_check(x86_leaf
);
370 kvm_cpu_caps
[x86_leaf
] |= __feature_bit(x86_feature
);
373 static __always_inline u32
kvm_cpu_cap_get(unsigned int x86_feature
)
375 unsigned int x86_leaf
= __feature_leaf(x86_feature
);
377 reverse_cpuid_check(x86_leaf
);
378 return kvm_cpu_caps
[x86_leaf
] & __feature_bit(x86_feature
);
381 static __always_inline
bool kvm_cpu_cap_has(unsigned int x86_feature
)
383 return !!kvm_cpu_cap_get(x86_feature
);
386 static __always_inline
void kvm_cpu_cap_check_and_set(unsigned int x86_feature
)
388 if (boot_cpu_has(x86_feature
))
389 kvm_cpu_cap_set(x86_feature
);
392 static __always_inline
bool guest_pv_has(struct kvm_vcpu
*vcpu
,
393 unsigned int kvm_feature
)
395 if (!vcpu
->arch
.pv_cpuid
.enforce
)
398 return vcpu
->arch
.pv_cpuid
.features
& (1u << kvm_feature
);