]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/bugs.c
ab112edf4e30d689dc326d96b34035fb4a5f2354
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
34
35 static void __init spectre_v2_select_mitigation(void);
36 static void __init ssb_select_mitigation(void);
37 static void __init l1tf_select_mitigation(void);
38 static void __init mds_select_mitigation(void);
39
40 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
41 u64 x86_spec_ctrl_base;
42 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
43 static DEFINE_MUTEX(spec_ctrl_mutex);
44
45 /*
46 * The vendor and possibly platform specific bits which can be modified in
47 * x86_spec_ctrl_base.
48 */
49 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
50
51 /*
52 * AMD specific MSR info for Speculative Store Bypass control.
53 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
54 */
55 u64 __ro_after_init x86_amd_ls_cfg_base;
56 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
57
58 /* Control conditional STIBP in switch_to() */
59 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
60 /* Control conditional IBPB in switch_mm() */
61 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
62 /* Control unconditional IBPB in switch_mm() */
63 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
64
65 /* Control MDS CPU buffer clear before returning to user space */
66 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
67 EXPORT_SYMBOL_GPL(mds_user_clear);
68 /* Control MDS CPU buffer clear before idling (halt, mwait) */
69 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
70 EXPORT_SYMBOL_GPL(mds_idle_clear);
71
72 void __init check_bugs(void)
73 {
74 identify_boot_cpu();
75
76 /*
77 * identify_boot_cpu() initialized SMT support information, let the
78 * core code know.
79 */
80 cpu_smt_check_topology_early();
81
82 if (!IS_ENABLED(CONFIG_SMP)) {
83 pr_info("CPU: ");
84 print_cpu_info(&boot_cpu_data);
85 }
86
87 /*
88 * Read the SPEC_CTRL MSR to account for reserved bits which may
89 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
90 * init code as it is not enumerated and depends on the family.
91 */
92 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
93 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
94
95 /* Allow STIBP in MSR_SPEC_CTRL if supported */
96 if (boot_cpu_has(X86_FEATURE_STIBP))
97 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
98
99 /* Select the proper spectre mitigation before patching alternatives */
100 spectre_v2_select_mitigation();
101
102 /*
103 * Select proper mitigation for any exposure to the Speculative Store
104 * Bypass vulnerability.
105 */
106 ssb_select_mitigation();
107
108 l1tf_select_mitigation();
109
110 mds_select_mitigation();
111
112 #ifdef CONFIG_X86_32
113 /*
114 * Check whether we are able to run this kernel safely on SMP.
115 *
116 * - i386 is no longer supported.
117 * - In order to run on anything without a TSC, we need to be
118 * compiled for a i486.
119 */
120 if (boot_cpu_data.x86 < 4)
121 panic("Kernel requires i486+ for 'invlpg' and other features");
122
123 init_utsname()->machine[1] =
124 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
125 alternative_instructions();
126
127 fpu__init_check_bugs();
128 #else /* CONFIG_X86_64 */
129 alternative_instructions();
130
131 /*
132 * Make sure the first 2MB area is not mapped by huge pages
133 * There are typically fixed size MTRRs in there and overlapping
134 * MTRRs into large pages causes slow downs.
135 *
136 * Right now we don't do that with gbpages because there seems
137 * very little benefit for that case.
138 */
139 if (!direct_gbpages)
140 set_memory_4k((unsigned long)__va(0), 1);
141 #endif
142 }
143
144 void
145 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
146 {
147 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
148 struct thread_info *ti = current_thread_info();
149
150 /* Is MSR_SPEC_CTRL implemented ? */
151 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
152 /*
153 * Restrict guest_spec_ctrl to supported values. Clear the
154 * modifiable bits in the host base value and or the
155 * modifiable bits from the guest value.
156 */
157 guestval = hostval & ~x86_spec_ctrl_mask;
158 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
159
160 /* SSBD controlled in MSR_SPEC_CTRL */
161 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
162 static_cpu_has(X86_FEATURE_AMD_SSBD))
163 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
164
165 /* Conditional STIBP enabled? */
166 if (static_branch_unlikely(&switch_to_cond_stibp))
167 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
168
169 if (hostval != guestval) {
170 msrval = setguest ? guestval : hostval;
171 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
172 }
173 }
174
175 /*
176 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
177 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
178 */
179 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
180 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
181 return;
182
183 /*
184 * If the host has SSBD mitigation enabled, force it in the host's
185 * virtual MSR value. If its not permanently enabled, evaluate
186 * current's TIF_SSBD thread flag.
187 */
188 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
189 hostval = SPEC_CTRL_SSBD;
190 else
191 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
192
193 /* Sanitize the guest value */
194 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
195
196 if (hostval != guestval) {
197 unsigned long tif;
198
199 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
200 ssbd_spec_ctrl_to_tif(hostval);
201
202 speculation_ctrl_update(tif);
203 }
204 }
205 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
206
207 static void x86_amd_ssb_disable(void)
208 {
209 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
210
211 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
212 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
213 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
214 wrmsrl(MSR_AMD64_LS_CFG, msrval);
215 }
216
217 #undef pr_fmt
218 #define pr_fmt(fmt) "MDS: " fmt
219
220 /* Default mitigation for L1TF-affected CPUs */
221 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
222 static bool mds_nosmt __ro_after_init = false;
223
224 static const char * const mds_strings[] = {
225 [MDS_MITIGATION_OFF] = "Vulnerable",
226 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
227 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
228 };
229
230 static void mds_select_mitigation(void)
231 {
232 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
233 mds_mitigation = MDS_MITIGATION_OFF;
234 return;
235 }
236
237 if (mds_mitigation == MDS_MITIGATION_FULL) {
238 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
239 mds_mitigation = MDS_MITIGATION_VMWERV;
240
241 static_branch_enable(&mds_user_clear);
242
243 if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
244 cpu_smt_disable(false);
245 }
246
247 pr_info("%s\n", mds_strings[mds_mitigation]);
248 }
249
250 static int __init mds_cmdline(char *str)
251 {
252 if (!boot_cpu_has_bug(X86_BUG_MDS))
253 return 0;
254
255 if (!str)
256 return -EINVAL;
257
258 if (!strcmp(str, "off"))
259 mds_mitigation = MDS_MITIGATION_OFF;
260 else if (!strcmp(str, "full"))
261 mds_mitigation = MDS_MITIGATION_FULL;
262 else if (!strcmp(str, "full,nosmt")) {
263 mds_mitigation = MDS_MITIGATION_FULL;
264 mds_nosmt = true;
265 }
266
267 return 0;
268 }
269 early_param("mds", mds_cmdline);
270
271 #undef pr_fmt
272 #define pr_fmt(fmt) "Spectre V2 : " fmt
273
274 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
275 SPECTRE_V2_NONE;
276
277 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
278 SPECTRE_V2_USER_NONE;
279
280 #ifdef CONFIG_RETPOLINE
281 static bool spectre_v2_bad_module;
282
283 bool retpoline_module_ok(bool has_retpoline)
284 {
285 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
286 return true;
287
288 pr_err("System may be vulnerable to spectre v2\n");
289 spectre_v2_bad_module = true;
290 return false;
291 }
292
293 static inline const char *spectre_v2_module_string(void)
294 {
295 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
296 }
297 #else
298 static inline const char *spectre_v2_module_string(void) { return ""; }
299 #endif
300
301 static inline bool match_option(const char *arg, int arglen, const char *opt)
302 {
303 int len = strlen(opt);
304
305 return len == arglen && !strncmp(arg, opt, len);
306 }
307
308 /* The kernel command line selection for spectre v2 */
309 enum spectre_v2_mitigation_cmd {
310 SPECTRE_V2_CMD_NONE,
311 SPECTRE_V2_CMD_AUTO,
312 SPECTRE_V2_CMD_FORCE,
313 SPECTRE_V2_CMD_RETPOLINE,
314 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
315 SPECTRE_V2_CMD_RETPOLINE_AMD,
316 };
317
318 enum spectre_v2_user_cmd {
319 SPECTRE_V2_USER_CMD_NONE,
320 SPECTRE_V2_USER_CMD_AUTO,
321 SPECTRE_V2_USER_CMD_FORCE,
322 SPECTRE_V2_USER_CMD_PRCTL,
323 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
324 SPECTRE_V2_USER_CMD_SECCOMP,
325 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
326 };
327
328 static const char * const spectre_v2_user_strings[] = {
329 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
330 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
331 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
332 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
333 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
334 };
335
336 static const struct {
337 const char *option;
338 enum spectre_v2_user_cmd cmd;
339 bool secure;
340 } v2_user_options[] __initdata = {
341 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
342 { "off", SPECTRE_V2_USER_CMD_NONE, false },
343 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
344 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
345 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
346 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
347 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
348 };
349
350 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
351 {
352 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
353 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
354 }
355
356 static enum spectre_v2_user_cmd __init
357 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
358 {
359 char arg[20];
360 int ret, i;
361
362 switch (v2_cmd) {
363 case SPECTRE_V2_CMD_NONE:
364 return SPECTRE_V2_USER_CMD_NONE;
365 case SPECTRE_V2_CMD_FORCE:
366 return SPECTRE_V2_USER_CMD_FORCE;
367 default:
368 break;
369 }
370
371 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
372 arg, sizeof(arg));
373 if (ret < 0)
374 return SPECTRE_V2_USER_CMD_AUTO;
375
376 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
377 if (match_option(arg, ret, v2_user_options[i].option)) {
378 spec_v2_user_print_cond(v2_user_options[i].option,
379 v2_user_options[i].secure);
380 return v2_user_options[i].cmd;
381 }
382 }
383
384 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
385 return SPECTRE_V2_USER_CMD_AUTO;
386 }
387
388 static void __init
389 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
390 {
391 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
392 bool smt_possible = IS_ENABLED(CONFIG_SMP);
393 enum spectre_v2_user_cmd cmd;
394
395 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
396 return;
397
398 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
399 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
400 smt_possible = false;
401
402 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
403 switch (cmd) {
404 case SPECTRE_V2_USER_CMD_NONE:
405 goto set_mode;
406 case SPECTRE_V2_USER_CMD_FORCE:
407 mode = SPECTRE_V2_USER_STRICT;
408 break;
409 case SPECTRE_V2_USER_CMD_PRCTL:
410 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
411 mode = SPECTRE_V2_USER_PRCTL;
412 break;
413 case SPECTRE_V2_USER_CMD_AUTO:
414 case SPECTRE_V2_USER_CMD_SECCOMP:
415 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
416 if (IS_ENABLED(CONFIG_SECCOMP))
417 mode = SPECTRE_V2_USER_SECCOMP;
418 else
419 mode = SPECTRE_V2_USER_PRCTL;
420 break;
421 }
422
423 /*
424 * At this point, an STIBP mode other than "off" has been set.
425 * If STIBP support is not being forced, check if STIBP always-on
426 * is preferred.
427 */
428 if (mode != SPECTRE_V2_USER_STRICT &&
429 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
430 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
431
432 /* Initialize Indirect Branch Prediction Barrier */
433 if (boot_cpu_has(X86_FEATURE_IBPB)) {
434 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
435
436 switch (cmd) {
437 case SPECTRE_V2_USER_CMD_FORCE:
438 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
439 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
440 static_branch_enable(&switch_mm_always_ibpb);
441 break;
442 case SPECTRE_V2_USER_CMD_PRCTL:
443 case SPECTRE_V2_USER_CMD_AUTO:
444 case SPECTRE_V2_USER_CMD_SECCOMP:
445 static_branch_enable(&switch_mm_cond_ibpb);
446 break;
447 default:
448 break;
449 }
450
451 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
452 static_key_enabled(&switch_mm_always_ibpb) ?
453 "always-on" : "conditional");
454 }
455
456 /* If enhanced IBRS is enabled no STIBP required */
457 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
458 return;
459
460 /*
461 * If SMT is not possible or STIBP is not available clear the STIBP
462 * mode.
463 */
464 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
465 mode = SPECTRE_V2_USER_NONE;
466 set_mode:
467 spectre_v2_user = mode;
468 /* Only print the STIBP mode when SMT possible */
469 if (smt_possible)
470 pr_info("%s\n", spectre_v2_user_strings[mode]);
471 }
472
473 static const char * const spectre_v2_strings[] = {
474 [SPECTRE_V2_NONE] = "Vulnerable",
475 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
476 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
477 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
478 };
479
480 static const struct {
481 const char *option;
482 enum spectre_v2_mitigation_cmd cmd;
483 bool secure;
484 } mitigation_options[] __initdata = {
485 { "off", SPECTRE_V2_CMD_NONE, false },
486 { "on", SPECTRE_V2_CMD_FORCE, true },
487 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
488 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
489 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
490 { "auto", SPECTRE_V2_CMD_AUTO, false },
491 };
492
493 static void __init spec_v2_print_cond(const char *reason, bool secure)
494 {
495 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
496 pr_info("%s selected on command line.\n", reason);
497 }
498
499 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
500 {
501 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
502 char arg[20];
503 int ret, i;
504
505 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
506 return SPECTRE_V2_CMD_NONE;
507
508 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
509 if (ret < 0)
510 return SPECTRE_V2_CMD_AUTO;
511
512 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
513 if (!match_option(arg, ret, mitigation_options[i].option))
514 continue;
515 cmd = mitigation_options[i].cmd;
516 break;
517 }
518
519 if (i >= ARRAY_SIZE(mitigation_options)) {
520 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
521 return SPECTRE_V2_CMD_AUTO;
522 }
523
524 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
525 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
526 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
527 !IS_ENABLED(CONFIG_RETPOLINE)) {
528 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
529 return SPECTRE_V2_CMD_AUTO;
530 }
531
532 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
533 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
534 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
535 return SPECTRE_V2_CMD_AUTO;
536 }
537
538 spec_v2_print_cond(mitigation_options[i].option,
539 mitigation_options[i].secure);
540 return cmd;
541 }
542
543 static void __init spectre_v2_select_mitigation(void)
544 {
545 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
546 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
547
548 /*
549 * If the CPU is not affected and the command line mode is NONE or AUTO
550 * then nothing to do.
551 */
552 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
553 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
554 return;
555
556 switch (cmd) {
557 case SPECTRE_V2_CMD_NONE:
558 return;
559
560 case SPECTRE_V2_CMD_FORCE:
561 case SPECTRE_V2_CMD_AUTO:
562 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
563 mode = SPECTRE_V2_IBRS_ENHANCED;
564 /* Force it so VMEXIT will restore correctly */
565 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
566 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
567 goto specv2_set_mode;
568 }
569 if (IS_ENABLED(CONFIG_RETPOLINE))
570 goto retpoline_auto;
571 break;
572 case SPECTRE_V2_CMD_RETPOLINE_AMD:
573 if (IS_ENABLED(CONFIG_RETPOLINE))
574 goto retpoline_amd;
575 break;
576 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
577 if (IS_ENABLED(CONFIG_RETPOLINE))
578 goto retpoline_generic;
579 break;
580 case SPECTRE_V2_CMD_RETPOLINE:
581 if (IS_ENABLED(CONFIG_RETPOLINE))
582 goto retpoline_auto;
583 break;
584 }
585 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
586 return;
587
588 retpoline_auto:
589 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
590 retpoline_amd:
591 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
592 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
593 goto retpoline_generic;
594 }
595 mode = SPECTRE_V2_RETPOLINE_AMD;
596 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
597 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
598 } else {
599 retpoline_generic:
600 mode = SPECTRE_V2_RETPOLINE_GENERIC;
601 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
602 }
603
604 specv2_set_mode:
605 spectre_v2_enabled = mode;
606 pr_info("%s\n", spectre_v2_strings[mode]);
607
608 /*
609 * If spectre v2 protection has been enabled, unconditionally fill
610 * RSB during a context switch; this protects against two independent
611 * issues:
612 *
613 * - RSB underflow (and switch to BTB) on Skylake+
614 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
615 */
616 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
617 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
618
619 /*
620 * Retpoline means the kernel is safe because it has no indirect
621 * branches. Enhanced IBRS protects firmware too, so, enable restricted
622 * speculation around firmware calls only when Enhanced IBRS isn't
623 * supported.
624 *
625 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
626 * the user might select retpoline on the kernel command line and if
627 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
628 * enable IBRS around firmware calls.
629 */
630 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
631 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
632 pr_info("Enabling Restricted Speculation for firmware calls\n");
633 }
634
635 /* Set up IBPB and STIBP depending on the general spectre V2 command */
636 spectre_v2_user_select_mitigation(cmd);
637
638 /* Enable STIBP if appropriate */
639 arch_smt_update();
640 }
641
642 static void update_stibp_msr(void * __unused)
643 {
644 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
645 }
646
647 /* Update x86_spec_ctrl_base in case SMT state changed. */
648 static void update_stibp_strict(void)
649 {
650 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
651
652 if (sched_smt_active())
653 mask |= SPEC_CTRL_STIBP;
654
655 if (mask == x86_spec_ctrl_base)
656 return;
657
658 pr_info("Update user space SMT mitigation: STIBP %s\n",
659 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
660 x86_spec_ctrl_base = mask;
661 on_each_cpu(update_stibp_msr, NULL, 1);
662 }
663
664 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
665 static void update_indir_branch_cond(void)
666 {
667 if (sched_smt_active())
668 static_branch_enable(&switch_to_cond_stibp);
669 else
670 static_branch_disable(&switch_to_cond_stibp);
671 }
672
673 /* Update the static key controlling the MDS CPU buffer clear in idle */
674 static void update_mds_branch_idle(void)
675 {
676 /*
677 * Enable the idle clearing on CPUs which are affected only by
678 * MDBDS and not any other MDS variant. The other variants cannot
679 * be mitigated when SMT is enabled, so clearing the buffers on
680 * idle would be a window dressing exercise.
681 */
682 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY))
683 return;
684
685 if (sched_smt_active())
686 static_branch_enable(&mds_idle_clear);
687 else
688 static_branch_disable(&mds_idle_clear);
689 }
690
691 void arch_smt_update(void)
692 {
693 /* Enhanced IBRS implies STIBP. No update required. */
694 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
695 return;
696
697 mutex_lock(&spec_ctrl_mutex);
698
699 switch (spectre_v2_user) {
700 case SPECTRE_V2_USER_NONE:
701 break;
702 case SPECTRE_V2_USER_STRICT:
703 case SPECTRE_V2_USER_STRICT_PREFERRED:
704 update_stibp_strict();
705 break;
706 case SPECTRE_V2_USER_PRCTL:
707 case SPECTRE_V2_USER_SECCOMP:
708 update_indir_branch_cond();
709 break;
710 }
711
712 switch(mds_mitigation) {
713 case MDS_MITIGATION_FULL:
714 case MDS_MITIGATION_VMWERV:
715 update_mds_branch_idle();
716 break;
717 case MDS_MITIGATION_OFF:
718 break;
719 }
720
721 mutex_unlock(&spec_ctrl_mutex);
722 }
723
724 #undef pr_fmt
725 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
726
727 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
728
729 /* The kernel command line selection */
730 enum ssb_mitigation_cmd {
731 SPEC_STORE_BYPASS_CMD_NONE,
732 SPEC_STORE_BYPASS_CMD_AUTO,
733 SPEC_STORE_BYPASS_CMD_ON,
734 SPEC_STORE_BYPASS_CMD_PRCTL,
735 SPEC_STORE_BYPASS_CMD_SECCOMP,
736 };
737
738 static const char * const ssb_strings[] = {
739 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
740 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
741 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
742 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
743 };
744
745 static const struct {
746 const char *option;
747 enum ssb_mitigation_cmd cmd;
748 } ssb_mitigation_options[] __initdata = {
749 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
750 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
751 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
752 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
753 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
754 };
755
756 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
757 {
758 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
759 char arg[20];
760 int ret, i;
761
762 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
763 return SPEC_STORE_BYPASS_CMD_NONE;
764 } else {
765 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
766 arg, sizeof(arg));
767 if (ret < 0)
768 return SPEC_STORE_BYPASS_CMD_AUTO;
769
770 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
771 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
772 continue;
773
774 cmd = ssb_mitigation_options[i].cmd;
775 break;
776 }
777
778 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
779 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
780 return SPEC_STORE_BYPASS_CMD_AUTO;
781 }
782 }
783
784 return cmd;
785 }
786
787 static enum ssb_mitigation __init __ssb_select_mitigation(void)
788 {
789 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
790 enum ssb_mitigation_cmd cmd;
791
792 if (!boot_cpu_has(X86_FEATURE_SSBD))
793 return mode;
794
795 cmd = ssb_parse_cmdline();
796 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
797 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
798 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
799 return mode;
800
801 switch (cmd) {
802 case SPEC_STORE_BYPASS_CMD_AUTO:
803 case SPEC_STORE_BYPASS_CMD_SECCOMP:
804 /*
805 * Choose prctl+seccomp as the default mode if seccomp is
806 * enabled.
807 */
808 if (IS_ENABLED(CONFIG_SECCOMP))
809 mode = SPEC_STORE_BYPASS_SECCOMP;
810 else
811 mode = SPEC_STORE_BYPASS_PRCTL;
812 break;
813 case SPEC_STORE_BYPASS_CMD_ON:
814 mode = SPEC_STORE_BYPASS_DISABLE;
815 break;
816 case SPEC_STORE_BYPASS_CMD_PRCTL:
817 mode = SPEC_STORE_BYPASS_PRCTL;
818 break;
819 case SPEC_STORE_BYPASS_CMD_NONE:
820 break;
821 }
822
823 /*
824 * We have three CPU feature flags that are in play here:
825 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
826 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
827 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
828 */
829 if (mode == SPEC_STORE_BYPASS_DISABLE) {
830 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
831 /*
832 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
833 * use a completely different MSR and bit dependent on family.
834 */
835 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
836 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
837 x86_amd_ssb_disable();
838 } else {
839 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
840 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
841 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
842 }
843 }
844
845 return mode;
846 }
847
848 static void ssb_select_mitigation(void)
849 {
850 ssb_mode = __ssb_select_mitigation();
851
852 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
853 pr_info("%s\n", ssb_strings[ssb_mode]);
854 }
855
856 #undef pr_fmt
857 #define pr_fmt(fmt) "Speculation prctl: " fmt
858
859 static void task_update_spec_tif(struct task_struct *tsk)
860 {
861 /* Force the update of the real TIF bits */
862 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
863
864 /*
865 * Immediately update the speculation control MSRs for the current
866 * task, but for a non-current task delay setting the CPU
867 * mitigation until it is scheduled next.
868 *
869 * This can only happen for SECCOMP mitigation. For PRCTL it's
870 * always the current task.
871 */
872 if (tsk == current)
873 speculation_ctrl_update_current();
874 }
875
876 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
877 {
878 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
879 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
880 return -ENXIO;
881
882 switch (ctrl) {
883 case PR_SPEC_ENABLE:
884 /* If speculation is force disabled, enable is not allowed */
885 if (task_spec_ssb_force_disable(task))
886 return -EPERM;
887 task_clear_spec_ssb_disable(task);
888 task_update_spec_tif(task);
889 break;
890 case PR_SPEC_DISABLE:
891 task_set_spec_ssb_disable(task);
892 task_update_spec_tif(task);
893 break;
894 case PR_SPEC_FORCE_DISABLE:
895 task_set_spec_ssb_disable(task);
896 task_set_spec_ssb_force_disable(task);
897 task_update_spec_tif(task);
898 break;
899 default:
900 return -ERANGE;
901 }
902 return 0;
903 }
904
905 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
906 {
907 switch (ctrl) {
908 case PR_SPEC_ENABLE:
909 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
910 return 0;
911 /*
912 * Indirect branch speculation is always disabled in strict
913 * mode.
914 */
915 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
916 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
917 return -EPERM;
918 task_clear_spec_ib_disable(task);
919 task_update_spec_tif(task);
920 break;
921 case PR_SPEC_DISABLE:
922 case PR_SPEC_FORCE_DISABLE:
923 /*
924 * Indirect branch speculation is always allowed when
925 * mitigation is force disabled.
926 */
927 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
928 return -EPERM;
929 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
930 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
931 return 0;
932 task_set_spec_ib_disable(task);
933 if (ctrl == PR_SPEC_FORCE_DISABLE)
934 task_set_spec_ib_force_disable(task);
935 task_update_spec_tif(task);
936 break;
937 default:
938 return -ERANGE;
939 }
940 return 0;
941 }
942
943 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
944 unsigned long ctrl)
945 {
946 switch (which) {
947 case PR_SPEC_STORE_BYPASS:
948 return ssb_prctl_set(task, ctrl);
949 case PR_SPEC_INDIRECT_BRANCH:
950 return ib_prctl_set(task, ctrl);
951 default:
952 return -ENODEV;
953 }
954 }
955
956 #ifdef CONFIG_SECCOMP
957 void arch_seccomp_spec_mitigate(struct task_struct *task)
958 {
959 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
960 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
961 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
962 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
963 }
964 #endif
965
966 static int ssb_prctl_get(struct task_struct *task)
967 {
968 switch (ssb_mode) {
969 case SPEC_STORE_BYPASS_DISABLE:
970 return PR_SPEC_DISABLE;
971 case SPEC_STORE_BYPASS_SECCOMP:
972 case SPEC_STORE_BYPASS_PRCTL:
973 if (task_spec_ssb_force_disable(task))
974 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
975 if (task_spec_ssb_disable(task))
976 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
977 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
978 default:
979 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
980 return PR_SPEC_ENABLE;
981 return PR_SPEC_NOT_AFFECTED;
982 }
983 }
984
985 static int ib_prctl_get(struct task_struct *task)
986 {
987 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
988 return PR_SPEC_NOT_AFFECTED;
989
990 switch (spectre_v2_user) {
991 case SPECTRE_V2_USER_NONE:
992 return PR_SPEC_ENABLE;
993 case SPECTRE_V2_USER_PRCTL:
994 case SPECTRE_V2_USER_SECCOMP:
995 if (task_spec_ib_force_disable(task))
996 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
997 if (task_spec_ib_disable(task))
998 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
999 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1000 case SPECTRE_V2_USER_STRICT:
1001 case SPECTRE_V2_USER_STRICT_PREFERRED:
1002 return PR_SPEC_DISABLE;
1003 default:
1004 return PR_SPEC_NOT_AFFECTED;
1005 }
1006 }
1007
1008 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1009 {
1010 switch (which) {
1011 case PR_SPEC_STORE_BYPASS:
1012 return ssb_prctl_get(task);
1013 case PR_SPEC_INDIRECT_BRANCH:
1014 return ib_prctl_get(task);
1015 default:
1016 return -ENODEV;
1017 }
1018 }
1019
1020 void x86_spec_ctrl_setup_ap(void)
1021 {
1022 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1023 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1024
1025 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1026 x86_amd_ssb_disable();
1027 }
1028
1029 #undef pr_fmt
1030 #define pr_fmt(fmt) "L1TF: " fmt
1031
1032 /* Default mitigation for L1TF-affected CPUs */
1033 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1034 #if IS_ENABLED(CONFIG_KVM_INTEL)
1035 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1036
1037 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1038 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1039 #endif
1040
1041 /*
1042 * These CPUs all support 44bits physical address space internally in the
1043 * cache but CPUID can report a smaller number of physical address bits.
1044 *
1045 * The L1TF mitigation uses the top most address bit for the inversion of
1046 * non present PTEs. When the installed memory reaches into the top most
1047 * address bit due to memory holes, which has been observed on machines
1048 * which report 36bits physical address bits and have 32G RAM installed,
1049 * then the mitigation range check in l1tf_select_mitigation() triggers.
1050 * This is a false positive because the mitigation is still possible due to
1051 * the fact that the cache uses 44bit internally. Use the cache bits
1052 * instead of the reported physical bits and adjust them on the affected
1053 * machines to 44bit if the reported bits are less than 44.
1054 */
1055 static void override_cache_bits(struct cpuinfo_x86 *c)
1056 {
1057 if (c->x86 != 6)
1058 return;
1059
1060 switch (c->x86_model) {
1061 case INTEL_FAM6_NEHALEM:
1062 case INTEL_FAM6_WESTMERE:
1063 case INTEL_FAM6_SANDYBRIDGE:
1064 case INTEL_FAM6_IVYBRIDGE:
1065 case INTEL_FAM6_HASWELL_CORE:
1066 case INTEL_FAM6_HASWELL_ULT:
1067 case INTEL_FAM6_HASWELL_GT3E:
1068 case INTEL_FAM6_BROADWELL_CORE:
1069 case INTEL_FAM6_BROADWELL_GT3E:
1070 case INTEL_FAM6_SKYLAKE_MOBILE:
1071 case INTEL_FAM6_SKYLAKE_DESKTOP:
1072 case INTEL_FAM6_KABYLAKE_MOBILE:
1073 case INTEL_FAM6_KABYLAKE_DESKTOP:
1074 if (c->x86_cache_bits < 44)
1075 c->x86_cache_bits = 44;
1076 break;
1077 }
1078 }
1079
1080 static void __init l1tf_select_mitigation(void)
1081 {
1082 u64 half_pa;
1083
1084 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1085 return;
1086
1087 override_cache_bits(&boot_cpu_data);
1088
1089 switch (l1tf_mitigation) {
1090 case L1TF_MITIGATION_OFF:
1091 case L1TF_MITIGATION_FLUSH_NOWARN:
1092 case L1TF_MITIGATION_FLUSH:
1093 break;
1094 case L1TF_MITIGATION_FLUSH_NOSMT:
1095 case L1TF_MITIGATION_FULL:
1096 cpu_smt_disable(false);
1097 break;
1098 case L1TF_MITIGATION_FULL_FORCE:
1099 cpu_smt_disable(true);
1100 break;
1101 }
1102
1103 #if CONFIG_PGTABLE_LEVELS == 2
1104 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1105 return;
1106 #endif
1107
1108 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1109 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1110 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1111 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1112 half_pa);
1113 pr_info("However, doing so will make a part of your RAM unusable.\n");
1114 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
1115 return;
1116 }
1117
1118 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1119 }
1120
1121 static int __init l1tf_cmdline(char *str)
1122 {
1123 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1124 return 0;
1125
1126 if (!str)
1127 return -EINVAL;
1128
1129 if (!strcmp(str, "off"))
1130 l1tf_mitigation = L1TF_MITIGATION_OFF;
1131 else if (!strcmp(str, "flush,nowarn"))
1132 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1133 else if (!strcmp(str, "flush"))
1134 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1135 else if (!strcmp(str, "flush,nosmt"))
1136 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1137 else if (!strcmp(str, "full"))
1138 l1tf_mitigation = L1TF_MITIGATION_FULL;
1139 else if (!strcmp(str, "full,force"))
1140 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1141
1142 return 0;
1143 }
1144 early_param("l1tf", l1tf_cmdline);
1145
1146 #undef pr_fmt
1147
1148 #ifdef CONFIG_SYSFS
1149
1150 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1151
1152 #if IS_ENABLED(CONFIG_KVM_INTEL)
1153 static const char * const l1tf_vmx_states[] = {
1154 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1155 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1156 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1157 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1158 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1159 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1160 };
1161
1162 static ssize_t l1tf_show_state(char *buf)
1163 {
1164 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1165 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1166
1167 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1168 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1169 sched_smt_active())) {
1170 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1171 l1tf_vmx_states[l1tf_vmx_mitigation]);
1172 }
1173
1174 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1175 l1tf_vmx_states[l1tf_vmx_mitigation],
1176 sched_smt_active() ? "vulnerable" : "disabled");
1177 }
1178 #else
1179 static ssize_t l1tf_show_state(char *buf)
1180 {
1181 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1182 }
1183 #endif
1184
1185 static ssize_t mds_show_state(char *buf)
1186 {
1187 if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
1188 return sprintf(buf, "%s; SMT Host state unknown\n",
1189 mds_strings[mds_mitigation]);
1190 }
1191
1192 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1193 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1194 sched_smt_active() ? "mitigated" : "disabled");
1195 }
1196
1197 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1198 sched_smt_active() ? "vulnerable" : "disabled");
1199 }
1200
1201 static char *stibp_state(void)
1202 {
1203 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1204 return "";
1205
1206 switch (spectre_v2_user) {
1207 case SPECTRE_V2_USER_NONE:
1208 return ", STIBP: disabled";
1209 case SPECTRE_V2_USER_STRICT:
1210 return ", STIBP: forced";
1211 case SPECTRE_V2_USER_STRICT_PREFERRED:
1212 return ", STIBP: always-on";
1213 case SPECTRE_V2_USER_PRCTL:
1214 case SPECTRE_V2_USER_SECCOMP:
1215 if (static_key_enabled(&switch_to_cond_stibp))
1216 return ", STIBP: conditional";
1217 }
1218 return "";
1219 }
1220
1221 static char *ibpb_state(void)
1222 {
1223 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1224 if (static_key_enabled(&switch_mm_always_ibpb))
1225 return ", IBPB: always-on";
1226 if (static_key_enabled(&switch_mm_cond_ibpb))
1227 return ", IBPB: conditional";
1228 return ", IBPB: disabled";
1229 }
1230 return "";
1231 }
1232
1233 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1234 char *buf, unsigned int bug)
1235 {
1236 if (!boot_cpu_has_bug(bug))
1237 return sprintf(buf, "Not affected\n");
1238
1239 switch (bug) {
1240 case X86_BUG_CPU_MELTDOWN:
1241 if (boot_cpu_has(X86_FEATURE_PTI))
1242 return sprintf(buf, "Mitigation: PTI\n");
1243
1244 break;
1245
1246 case X86_BUG_SPECTRE_V1:
1247 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1248
1249 case X86_BUG_SPECTRE_V2:
1250 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1251 ibpb_state(),
1252 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1253 stibp_state(),
1254 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1255 spectre_v2_module_string());
1256
1257 case X86_BUG_SPEC_STORE_BYPASS:
1258 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1259
1260 case X86_BUG_L1TF:
1261 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1262 return l1tf_show_state(buf);
1263 break;
1264
1265 case X86_BUG_MDS:
1266 return mds_show_state(buf);
1267
1268 default:
1269 break;
1270 }
1271
1272 return sprintf(buf, "Vulnerable\n");
1273 }
1274
1275 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1276 {
1277 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1278 }
1279
1280 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1281 {
1282 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1283 }
1284
1285 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1286 {
1287 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1288 }
1289
1290 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1291 {
1292 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1293 }
1294
1295 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1296 {
1297 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1298 }
1299
1300 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1301 {
1302 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1303 }
1304 #endif