]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/bugs.c
s390/speculation: Support 'mitigations=' cmdline option
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33 #include <asm/hypervisor.h>
34
35 static void __init spectre_v2_select_mitigation(void);
36 static void __init ssb_select_mitigation(void);
37 static void __init l1tf_select_mitigation(void);
38 static void __init mds_select_mitigation(void);
39
40 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
41 u64 x86_spec_ctrl_base;
42 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
43 static DEFINE_MUTEX(spec_ctrl_mutex);
44
45 /*
46 * The vendor and possibly platform specific bits which can be modified in
47 * x86_spec_ctrl_base.
48 */
49 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
50
51 /*
52 * AMD specific MSR info for Speculative Store Bypass control.
53 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
54 */
55 u64 __ro_after_init x86_amd_ls_cfg_base;
56 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
57
58 /* Control conditional STIBP in switch_to() */
59 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
60 /* Control conditional IBPB in switch_mm() */
61 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
62 /* Control unconditional IBPB in switch_mm() */
63 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
64
65 /* Control MDS CPU buffer clear before returning to user space */
66 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
67 EXPORT_SYMBOL_GPL(mds_user_clear);
68 /* Control MDS CPU buffer clear before idling (halt, mwait) */
69 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
70 EXPORT_SYMBOL_GPL(mds_idle_clear);
71
72 void __init check_bugs(void)
73 {
74 identify_boot_cpu();
75
76 /*
77 * identify_boot_cpu() initialized SMT support information, let the
78 * core code know.
79 */
80 cpu_smt_check_topology_early();
81
82 if (!IS_ENABLED(CONFIG_SMP)) {
83 pr_info("CPU: ");
84 print_cpu_info(&boot_cpu_data);
85 }
86
87 /*
88 * Read the SPEC_CTRL MSR to account for reserved bits which may
89 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
90 * init code as it is not enumerated and depends on the family.
91 */
92 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
93 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
94
95 /* Allow STIBP in MSR_SPEC_CTRL if supported */
96 if (boot_cpu_has(X86_FEATURE_STIBP))
97 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
98
99 /* Select the proper spectre mitigation before patching alternatives */
100 spectre_v2_select_mitigation();
101
102 /*
103 * Select proper mitigation for any exposure to the Speculative Store
104 * Bypass vulnerability.
105 */
106 ssb_select_mitigation();
107
108 l1tf_select_mitigation();
109
110 mds_select_mitigation();
111
112 arch_smt_update();
113
114 #ifdef CONFIG_X86_32
115 /*
116 * Check whether we are able to run this kernel safely on SMP.
117 *
118 * - i386 is no longer supported.
119 * - In order to run on anything without a TSC, we need to be
120 * compiled for a i486.
121 */
122 if (boot_cpu_data.x86 < 4)
123 panic("Kernel requires i486+ for 'invlpg' and other features");
124
125 init_utsname()->machine[1] =
126 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
127 alternative_instructions();
128
129 fpu__init_check_bugs();
130 #else /* CONFIG_X86_64 */
131 alternative_instructions();
132
133 /*
134 * Make sure the first 2MB area is not mapped by huge pages
135 * There are typically fixed size MTRRs in there and overlapping
136 * MTRRs into large pages causes slow downs.
137 *
138 * Right now we don't do that with gbpages because there seems
139 * very little benefit for that case.
140 */
141 if (!direct_gbpages)
142 set_memory_4k((unsigned long)__va(0), 1);
143 #endif
144 }
145
146 void
147 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
148 {
149 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
150 struct thread_info *ti = current_thread_info();
151
152 /* Is MSR_SPEC_CTRL implemented ? */
153 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
154 /*
155 * Restrict guest_spec_ctrl to supported values. Clear the
156 * modifiable bits in the host base value and or the
157 * modifiable bits from the guest value.
158 */
159 guestval = hostval & ~x86_spec_ctrl_mask;
160 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
161
162 /* SSBD controlled in MSR_SPEC_CTRL */
163 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
164 static_cpu_has(X86_FEATURE_AMD_SSBD))
165 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
166
167 /* Conditional STIBP enabled? */
168 if (static_branch_unlikely(&switch_to_cond_stibp))
169 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
170
171 if (hostval != guestval) {
172 msrval = setguest ? guestval : hostval;
173 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
174 }
175 }
176
177 /*
178 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
179 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
180 */
181 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
182 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
183 return;
184
185 /*
186 * If the host has SSBD mitigation enabled, force it in the host's
187 * virtual MSR value. If its not permanently enabled, evaluate
188 * current's TIF_SSBD thread flag.
189 */
190 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
191 hostval = SPEC_CTRL_SSBD;
192 else
193 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
194
195 /* Sanitize the guest value */
196 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
197
198 if (hostval != guestval) {
199 unsigned long tif;
200
201 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
202 ssbd_spec_ctrl_to_tif(hostval);
203
204 speculation_ctrl_update(tif);
205 }
206 }
207 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
208
209 static void x86_amd_ssb_disable(void)
210 {
211 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
212
213 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
214 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
215 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
216 wrmsrl(MSR_AMD64_LS_CFG, msrval);
217 }
218
219 #undef pr_fmt
220 #define pr_fmt(fmt) "MDS: " fmt
221
222 /* Default mitigation for MDS-affected CPUs */
223 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
224 static bool mds_nosmt __ro_after_init = false;
225
226 static const char * const mds_strings[] = {
227 [MDS_MITIGATION_OFF] = "Vulnerable",
228 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
229 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
230 };
231
232 static void mds_select_mitigation(void)
233 {
234 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
235 mds_mitigation = MDS_MITIGATION_OFF;
236 return;
237 }
238
239 if (mds_mitigation == MDS_MITIGATION_FULL) {
240 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
241 mds_mitigation = MDS_MITIGATION_VMWERV;
242
243 static_branch_enable(&mds_user_clear);
244
245 if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
246 cpu_smt_disable(false);
247 }
248
249 pr_info("%s\n", mds_strings[mds_mitigation]);
250 }
251
252 static int __init mds_cmdline(char *str)
253 {
254 if (!boot_cpu_has_bug(X86_BUG_MDS))
255 return 0;
256
257 if (!str)
258 return -EINVAL;
259
260 if (!strcmp(str, "off"))
261 mds_mitigation = MDS_MITIGATION_OFF;
262 else if (!strcmp(str, "full"))
263 mds_mitigation = MDS_MITIGATION_FULL;
264 else if (!strcmp(str, "full,nosmt")) {
265 mds_mitigation = MDS_MITIGATION_FULL;
266 mds_nosmt = true;
267 }
268
269 return 0;
270 }
271 early_param("mds", mds_cmdline);
272
273 #undef pr_fmt
274 #define pr_fmt(fmt) "Spectre V2 : " fmt
275
276 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
277 SPECTRE_V2_NONE;
278
279 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
280 SPECTRE_V2_USER_NONE;
281
282 #ifdef CONFIG_RETPOLINE
283 static bool spectre_v2_bad_module;
284
285 bool retpoline_module_ok(bool has_retpoline)
286 {
287 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
288 return true;
289
290 pr_err("System may be vulnerable to spectre v2\n");
291 spectre_v2_bad_module = true;
292 return false;
293 }
294
295 static inline const char *spectre_v2_module_string(void)
296 {
297 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
298 }
299 #else
300 static inline const char *spectre_v2_module_string(void) { return ""; }
301 #endif
302
303 static inline bool match_option(const char *arg, int arglen, const char *opt)
304 {
305 int len = strlen(opt);
306
307 return len == arglen && !strncmp(arg, opt, len);
308 }
309
310 /* The kernel command line selection for spectre v2 */
311 enum spectre_v2_mitigation_cmd {
312 SPECTRE_V2_CMD_NONE,
313 SPECTRE_V2_CMD_AUTO,
314 SPECTRE_V2_CMD_FORCE,
315 SPECTRE_V2_CMD_RETPOLINE,
316 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
317 SPECTRE_V2_CMD_RETPOLINE_AMD,
318 };
319
320 enum spectre_v2_user_cmd {
321 SPECTRE_V2_USER_CMD_NONE,
322 SPECTRE_V2_USER_CMD_AUTO,
323 SPECTRE_V2_USER_CMD_FORCE,
324 SPECTRE_V2_USER_CMD_PRCTL,
325 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
326 SPECTRE_V2_USER_CMD_SECCOMP,
327 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
328 };
329
330 static const char * const spectre_v2_user_strings[] = {
331 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
332 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
333 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
334 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
335 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
336 };
337
338 static const struct {
339 const char *option;
340 enum spectre_v2_user_cmd cmd;
341 bool secure;
342 } v2_user_options[] __initdata = {
343 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
344 { "off", SPECTRE_V2_USER_CMD_NONE, false },
345 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
346 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
347 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
348 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
349 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
350 };
351
352 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
353 {
354 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
355 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
356 }
357
358 static enum spectre_v2_user_cmd __init
359 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
360 {
361 char arg[20];
362 int ret, i;
363
364 switch (v2_cmd) {
365 case SPECTRE_V2_CMD_NONE:
366 return SPECTRE_V2_USER_CMD_NONE;
367 case SPECTRE_V2_CMD_FORCE:
368 return SPECTRE_V2_USER_CMD_FORCE;
369 default:
370 break;
371 }
372
373 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
374 arg, sizeof(arg));
375 if (ret < 0)
376 return SPECTRE_V2_USER_CMD_AUTO;
377
378 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
379 if (match_option(arg, ret, v2_user_options[i].option)) {
380 spec_v2_user_print_cond(v2_user_options[i].option,
381 v2_user_options[i].secure);
382 return v2_user_options[i].cmd;
383 }
384 }
385
386 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
387 return SPECTRE_V2_USER_CMD_AUTO;
388 }
389
390 static void __init
391 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
392 {
393 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
394 bool smt_possible = IS_ENABLED(CONFIG_SMP);
395 enum spectre_v2_user_cmd cmd;
396
397 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
398 return;
399
400 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
401 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
402 smt_possible = false;
403
404 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
405 switch (cmd) {
406 case SPECTRE_V2_USER_CMD_NONE:
407 goto set_mode;
408 case SPECTRE_V2_USER_CMD_FORCE:
409 mode = SPECTRE_V2_USER_STRICT;
410 break;
411 case SPECTRE_V2_USER_CMD_PRCTL:
412 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
413 mode = SPECTRE_V2_USER_PRCTL;
414 break;
415 case SPECTRE_V2_USER_CMD_AUTO:
416 case SPECTRE_V2_USER_CMD_SECCOMP:
417 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
418 if (IS_ENABLED(CONFIG_SECCOMP))
419 mode = SPECTRE_V2_USER_SECCOMP;
420 else
421 mode = SPECTRE_V2_USER_PRCTL;
422 break;
423 }
424
425 /*
426 * At this point, an STIBP mode other than "off" has been set.
427 * If STIBP support is not being forced, check if STIBP always-on
428 * is preferred.
429 */
430 if (mode != SPECTRE_V2_USER_STRICT &&
431 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
432 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
433
434 /* Initialize Indirect Branch Prediction Barrier */
435 if (boot_cpu_has(X86_FEATURE_IBPB)) {
436 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
437
438 switch (cmd) {
439 case SPECTRE_V2_USER_CMD_FORCE:
440 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
441 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
442 static_branch_enable(&switch_mm_always_ibpb);
443 break;
444 case SPECTRE_V2_USER_CMD_PRCTL:
445 case SPECTRE_V2_USER_CMD_AUTO:
446 case SPECTRE_V2_USER_CMD_SECCOMP:
447 static_branch_enable(&switch_mm_cond_ibpb);
448 break;
449 default:
450 break;
451 }
452
453 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
454 static_key_enabled(&switch_mm_always_ibpb) ?
455 "always-on" : "conditional");
456 }
457
458 /* If enhanced IBRS is enabled no STIBP required */
459 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
460 return;
461
462 /*
463 * If SMT is not possible or STIBP is not available clear the STIBP
464 * mode.
465 */
466 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
467 mode = SPECTRE_V2_USER_NONE;
468 set_mode:
469 spectre_v2_user = mode;
470 /* Only print the STIBP mode when SMT possible */
471 if (smt_possible)
472 pr_info("%s\n", spectre_v2_user_strings[mode]);
473 }
474
475 static const char * const spectre_v2_strings[] = {
476 [SPECTRE_V2_NONE] = "Vulnerable",
477 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
478 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
479 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
480 };
481
482 static const struct {
483 const char *option;
484 enum spectre_v2_mitigation_cmd cmd;
485 bool secure;
486 } mitigation_options[] __initdata = {
487 { "off", SPECTRE_V2_CMD_NONE, false },
488 { "on", SPECTRE_V2_CMD_FORCE, true },
489 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
490 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
491 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
492 { "auto", SPECTRE_V2_CMD_AUTO, false },
493 };
494
495 static void __init spec_v2_print_cond(const char *reason, bool secure)
496 {
497 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
498 pr_info("%s selected on command line.\n", reason);
499 }
500
501 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
502 {
503 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
504 char arg[20];
505 int ret, i;
506
507 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
508 cpu_mitigations_off())
509 return SPECTRE_V2_CMD_NONE;
510
511 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
512 if (ret < 0)
513 return SPECTRE_V2_CMD_AUTO;
514
515 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
516 if (!match_option(arg, ret, mitigation_options[i].option))
517 continue;
518 cmd = mitigation_options[i].cmd;
519 break;
520 }
521
522 if (i >= ARRAY_SIZE(mitigation_options)) {
523 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
524 return SPECTRE_V2_CMD_AUTO;
525 }
526
527 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
528 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
529 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
530 !IS_ENABLED(CONFIG_RETPOLINE)) {
531 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
532 return SPECTRE_V2_CMD_AUTO;
533 }
534
535 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
536 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
537 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
538 return SPECTRE_V2_CMD_AUTO;
539 }
540
541 spec_v2_print_cond(mitigation_options[i].option,
542 mitigation_options[i].secure);
543 return cmd;
544 }
545
546 static void __init spectre_v2_select_mitigation(void)
547 {
548 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
549 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
550
551 /*
552 * If the CPU is not affected and the command line mode is NONE or AUTO
553 * then nothing to do.
554 */
555 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
556 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
557 return;
558
559 switch (cmd) {
560 case SPECTRE_V2_CMD_NONE:
561 return;
562
563 case SPECTRE_V2_CMD_FORCE:
564 case SPECTRE_V2_CMD_AUTO:
565 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
566 mode = SPECTRE_V2_IBRS_ENHANCED;
567 /* Force it so VMEXIT will restore correctly */
568 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
569 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
570 goto specv2_set_mode;
571 }
572 if (IS_ENABLED(CONFIG_RETPOLINE))
573 goto retpoline_auto;
574 break;
575 case SPECTRE_V2_CMD_RETPOLINE_AMD:
576 if (IS_ENABLED(CONFIG_RETPOLINE))
577 goto retpoline_amd;
578 break;
579 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
580 if (IS_ENABLED(CONFIG_RETPOLINE))
581 goto retpoline_generic;
582 break;
583 case SPECTRE_V2_CMD_RETPOLINE:
584 if (IS_ENABLED(CONFIG_RETPOLINE))
585 goto retpoline_auto;
586 break;
587 }
588 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
589 return;
590
591 retpoline_auto:
592 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
593 retpoline_amd:
594 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
595 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
596 goto retpoline_generic;
597 }
598 mode = SPECTRE_V2_RETPOLINE_AMD;
599 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
600 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
601 } else {
602 retpoline_generic:
603 mode = SPECTRE_V2_RETPOLINE_GENERIC;
604 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
605 }
606
607 specv2_set_mode:
608 spectre_v2_enabled = mode;
609 pr_info("%s\n", spectre_v2_strings[mode]);
610
611 /*
612 * If spectre v2 protection has been enabled, unconditionally fill
613 * RSB during a context switch; this protects against two independent
614 * issues:
615 *
616 * - RSB underflow (and switch to BTB) on Skylake+
617 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
618 */
619 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
620 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
621
622 /*
623 * Retpoline means the kernel is safe because it has no indirect
624 * branches. Enhanced IBRS protects firmware too, so, enable restricted
625 * speculation around firmware calls only when Enhanced IBRS isn't
626 * supported.
627 *
628 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
629 * the user might select retpoline on the kernel command line and if
630 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
631 * enable IBRS around firmware calls.
632 */
633 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
634 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
635 pr_info("Enabling Restricted Speculation for firmware calls\n");
636 }
637
638 /* Set up IBPB and STIBP depending on the general spectre V2 command */
639 spectre_v2_user_select_mitigation(cmd);
640 }
641
642 static void update_stibp_msr(void * __unused)
643 {
644 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
645 }
646
647 /* Update x86_spec_ctrl_base in case SMT state changed. */
648 static void update_stibp_strict(void)
649 {
650 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
651
652 if (sched_smt_active())
653 mask |= SPEC_CTRL_STIBP;
654
655 if (mask == x86_spec_ctrl_base)
656 return;
657
658 pr_info("Update user space SMT mitigation: STIBP %s\n",
659 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
660 x86_spec_ctrl_base = mask;
661 on_each_cpu(update_stibp_msr, NULL, 1);
662 }
663
664 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
665 static void update_indir_branch_cond(void)
666 {
667 if (sched_smt_active())
668 static_branch_enable(&switch_to_cond_stibp);
669 else
670 static_branch_disable(&switch_to_cond_stibp);
671 }
672
673 #undef pr_fmt
674 #define pr_fmt(fmt) fmt
675
676 /* Update the static key controlling the MDS CPU buffer clear in idle */
677 static void update_mds_branch_idle(void)
678 {
679 /*
680 * Enable the idle clearing on CPUs which are affected only by
681 * MDBDS and not any other MDS variant. The other variants cannot
682 * be mitigated when SMT is enabled, so clearing the buffers on
683 * idle would be a window dressing exercise.
684 */
685 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY))
686 return;
687
688 if (sched_smt_active())
689 static_branch_enable(&mds_idle_clear);
690 else
691 static_branch_disable(&mds_idle_clear);
692 }
693
694 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
695
696 void arch_smt_update(void)
697 {
698 /* Enhanced IBRS implies STIBP. No update required. */
699 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
700 return;
701
702 mutex_lock(&spec_ctrl_mutex);
703
704 switch (spectre_v2_user) {
705 case SPECTRE_V2_USER_NONE:
706 break;
707 case SPECTRE_V2_USER_STRICT:
708 case SPECTRE_V2_USER_STRICT_PREFERRED:
709 update_stibp_strict();
710 break;
711 case SPECTRE_V2_USER_PRCTL:
712 case SPECTRE_V2_USER_SECCOMP:
713 update_indir_branch_cond();
714 break;
715 }
716
717 switch(mds_mitigation) {
718 case MDS_MITIGATION_FULL:
719 case MDS_MITIGATION_VMWERV:
720 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
721 pr_warn_once(MDS_MSG_SMT);
722 update_mds_branch_idle();
723 break;
724 case MDS_MITIGATION_OFF:
725 break;
726 }
727
728 mutex_unlock(&spec_ctrl_mutex);
729 }
730
731 #undef pr_fmt
732 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
733
734 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
735
736 /* The kernel command line selection */
737 enum ssb_mitigation_cmd {
738 SPEC_STORE_BYPASS_CMD_NONE,
739 SPEC_STORE_BYPASS_CMD_AUTO,
740 SPEC_STORE_BYPASS_CMD_ON,
741 SPEC_STORE_BYPASS_CMD_PRCTL,
742 SPEC_STORE_BYPASS_CMD_SECCOMP,
743 };
744
745 static const char * const ssb_strings[] = {
746 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
747 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
748 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
749 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
750 };
751
752 static const struct {
753 const char *option;
754 enum ssb_mitigation_cmd cmd;
755 } ssb_mitigation_options[] __initdata = {
756 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
757 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
758 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
759 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
760 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
761 };
762
763 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
764 {
765 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
766 char arg[20];
767 int ret, i;
768
769 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
770 cpu_mitigations_off()) {
771 return SPEC_STORE_BYPASS_CMD_NONE;
772 } else {
773 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
774 arg, sizeof(arg));
775 if (ret < 0)
776 return SPEC_STORE_BYPASS_CMD_AUTO;
777
778 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
779 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
780 continue;
781
782 cmd = ssb_mitigation_options[i].cmd;
783 break;
784 }
785
786 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
787 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
788 return SPEC_STORE_BYPASS_CMD_AUTO;
789 }
790 }
791
792 return cmd;
793 }
794
795 static enum ssb_mitigation __init __ssb_select_mitigation(void)
796 {
797 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
798 enum ssb_mitigation_cmd cmd;
799
800 if (!boot_cpu_has(X86_FEATURE_SSBD))
801 return mode;
802
803 cmd = ssb_parse_cmdline();
804 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
805 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
806 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
807 return mode;
808
809 switch (cmd) {
810 case SPEC_STORE_BYPASS_CMD_AUTO:
811 case SPEC_STORE_BYPASS_CMD_SECCOMP:
812 /*
813 * Choose prctl+seccomp as the default mode if seccomp is
814 * enabled.
815 */
816 if (IS_ENABLED(CONFIG_SECCOMP))
817 mode = SPEC_STORE_BYPASS_SECCOMP;
818 else
819 mode = SPEC_STORE_BYPASS_PRCTL;
820 break;
821 case SPEC_STORE_BYPASS_CMD_ON:
822 mode = SPEC_STORE_BYPASS_DISABLE;
823 break;
824 case SPEC_STORE_BYPASS_CMD_PRCTL:
825 mode = SPEC_STORE_BYPASS_PRCTL;
826 break;
827 case SPEC_STORE_BYPASS_CMD_NONE:
828 break;
829 }
830
831 /*
832 * We have three CPU feature flags that are in play here:
833 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
834 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
835 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
836 */
837 if (mode == SPEC_STORE_BYPASS_DISABLE) {
838 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
839 /*
840 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
841 * use a completely different MSR and bit dependent on family.
842 */
843 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
844 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
845 x86_amd_ssb_disable();
846 } else {
847 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
848 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
849 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
850 }
851 }
852
853 return mode;
854 }
855
856 static void ssb_select_mitigation(void)
857 {
858 ssb_mode = __ssb_select_mitigation();
859
860 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
861 pr_info("%s\n", ssb_strings[ssb_mode]);
862 }
863
864 #undef pr_fmt
865 #define pr_fmt(fmt) "Speculation prctl: " fmt
866
867 static void task_update_spec_tif(struct task_struct *tsk)
868 {
869 /* Force the update of the real TIF bits */
870 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
871
872 /*
873 * Immediately update the speculation control MSRs for the current
874 * task, but for a non-current task delay setting the CPU
875 * mitigation until it is scheduled next.
876 *
877 * This can only happen for SECCOMP mitigation. For PRCTL it's
878 * always the current task.
879 */
880 if (tsk == current)
881 speculation_ctrl_update_current();
882 }
883
884 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
885 {
886 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
887 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
888 return -ENXIO;
889
890 switch (ctrl) {
891 case PR_SPEC_ENABLE:
892 /* If speculation is force disabled, enable is not allowed */
893 if (task_spec_ssb_force_disable(task))
894 return -EPERM;
895 task_clear_spec_ssb_disable(task);
896 task_update_spec_tif(task);
897 break;
898 case PR_SPEC_DISABLE:
899 task_set_spec_ssb_disable(task);
900 task_update_spec_tif(task);
901 break;
902 case PR_SPEC_FORCE_DISABLE:
903 task_set_spec_ssb_disable(task);
904 task_set_spec_ssb_force_disable(task);
905 task_update_spec_tif(task);
906 break;
907 default:
908 return -ERANGE;
909 }
910 return 0;
911 }
912
913 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
914 {
915 switch (ctrl) {
916 case PR_SPEC_ENABLE:
917 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
918 return 0;
919 /*
920 * Indirect branch speculation is always disabled in strict
921 * mode.
922 */
923 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
924 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
925 return -EPERM;
926 task_clear_spec_ib_disable(task);
927 task_update_spec_tif(task);
928 break;
929 case PR_SPEC_DISABLE:
930 case PR_SPEC_FORCE_DISABLE:
931 /*
932 * Indirect branch speculation is always allowed when
933 * mitigation is force disabled.
934 */
935 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
936 return -EPERM;
937 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
938 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
939 return 0;
940 task_set_spec_ib_disable(task);
941 if (ctrl == PR_SPEC_FORCE_DISABLE)
942 task_set_spec_ib_force_disable(task);
943 task_update_spec_tif(task);
944 break;
945 default:
946 return -ERANGE;
947 }
948 return 0;
949 }
950
951 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
952 unsigned long ctrl)
953 {
954 switch (which) {
955 case PR_SPEC_STORE_BYPASS:
956 return ssb_prctl_set(task, ctrl);
957 case PR_SPEC_INDIRECT_BRANCH:
958 return ib_prctl_set(task, ctrl);
959 default:
960 return -ENODEV;
961 }
962 }
963
964 #ifdef CONFIG_SECCOMP
965 void arch_seccomp_spec_mitigate(struct task_struct *task)
966 {
967 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
968 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
969 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
970 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
971 }
972 #endif
973
974 static int ssb_prctl_get(struct task_struct *task)
975 {
976 switch (ssb_mode) {
977 case SPEC_STORE_BYPASS_DISABLE:
978 return PR_SPEC_DISABLE;
979 case SPEC_STORE_BYPASS_SECCOMP:
980 case SPEC_STORE_BYPASS_PRCTL:
981 if (task_spec_ssb_force_disable(task))
982 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
983 if (task_spec_ssb_disable(task))
984 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
985 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
986 default:
987 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
988 return PR_SPEC_ENABLE;
989 return PR_SPEC_NOT_AFFECTED;
990 }
991 }
992
993 static int ib_prctl_get(struct task_struct *task)
994 {
995 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
996 return PR_SPEC_NOT_AFFECTED;
997
998 switch (spectre_v2_user) {
999 case SPECTRE_V2_USER_NONE:
1000 return PR_SPEC_ENABLE;
1001 case SPECTRE_V2_USER_PRCTL:
1002 case SPECTRE_V2_USER_SECCOMP:
1003 if (task_spec_ib_force_disable(task))
1004 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1005 if (task_spec_ib_disable(task))
1006 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1007 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1008 case SPECTRE_V2_USER_STRICT:
1009 case SPECTRE_V2_USER_STRICT_PREFERRED:
1010 return PR_SPEC_DISABLE;
1011 default:
1012 return PR_SPEC_NOT_AFFECTED;
1013 }
1014 }
1015
1016 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1017 {
1018 switch (which) {
1019 case PR_SPEC_STORE_BYPASS:
1020 return ssb_prctl_get(task);
1021 case PR_SPEC_INDIRECT_BRANCH:
1022 return ib_prctl_get(task);
1023 default:
1024 return -ENODEV;
1025 }
1026 }
1027
1028 void x86_spec_ctrl_setup_ap(void)
1029 {
1030 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1031 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1032
1033 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1034 x86_amd_ssb_disable();
1035 }
1036
1037 #undef pr_fmt
1038 #define pr_fmt(fmt) "L1TF: " fmt
1039
1040 /* Default mitigation for L1TF-affected CPUs */
1041 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1042 #if IS_ENABLED(CONFIG_KVM_INTEL)
1043 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1044
1045 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1046 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1047 #endif
1048
1049 /*
1050 * These CPUs all support 44bits physical address space internally in the
1051 * cache but CPUID can report a smaller number of physical address bits.
1052 *
1053 * The L1TF mitigation uses the top most address bit for the inversion of
1054 * non present PTEs. When the installed memory reaches into the top most
1055 * address bit due to memory holes, which has been observed on machines
1056 * which report 36bits physical address bits and have 32G RAM installed,
1057 * then the mitigation range check in l1tf_select_mitigation() triggers.
1058 * This is a false positive because the mitigation is still possible due to
1059 * the fact that the cache uses 44bit internally. Use the cache bits
1060 * instead of the reported physical bits and adjust them on the affected
1061 * machines to 44bit if the reported bits are less than 44.
1062 */
1063 static void override_cache_bits(struct cpuinfo_x86 *c)
1064 {
1065 if (c->x86 != 6)
1066 return;
1067
1068 switch (c->x86_model) {
1069 case INTEL_FAM6_NEHALEM:
1070 case INTEL_FAM6_WESTMERE:
1071 case INTEL_FAM6_SANDYBRIDGE:
1072 case INTEL_FAM6_IVYBRIDGE:
1073 case INTEL_FAM6_HASWELL_CORE:
1074 case INTEL_FAM6_HASWELL_ULT:
1075 case INTEL_FAM6_HASWELL_GT3E:
1076 case INTEL_FAM6_BROADWELL_CORE:
1077 case INTEL_FAM6_BROADWELL_GT3E:
1078 case INTEL_FAM6_SKYLAKE_MOBILE:
1079 case INTEL_FAM6_SKYLAKE_DESKTOP:
1080 case INTEL_FAM6_KABYLAKE_MOBILE:
1081 case INTEL_FAM6_KABYLAKE_DESKTOP:
1082 if (c->x86_cache_bits < 44)
1083 c->x86_cache_bits = 44;
1084 break;
1085 }
1086 }
1087
1088 static void __init l1tf_select_mitigation(void)
1089 {
1090 u64 half_pa;
1091
1092 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1093 return;
1094
1095 if (cpu_mitigations_off())
1096 l1tf_mitigation = L1TF_MITIGATION_OFF;
1097 else if (cpu_mitigations_auto_nosmt())
1098 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1099
1100 override_cache_bits(&boot_cpu_data);
1101
1102 switch (l1tf_mitigation) {
1103 case L1TF_MITIGATION_OFF:
1104 case L1TF_MITIGATION_FLUSH_NOWARN:
1105 case L1TF_MITIGATION_FLUSH:
1106 break;
1107 case L1TF_MITIGATION_FLUSH_NOSMT:
1108 case L1TF_MITIGATION_FULL:
1109 cpu_smt_disable(false);
1110 break;
1111 case L1TF_MITIGATION_FULL_FORCE:
1112 cpu_smt_disable(true);
1113 break;
1114 }
1115
1116 #if CONFIG_PGTABLE_LEVELS == 2
1117 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1118 return;
1119 #endif
1120
1121 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1122 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1123 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1124 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1125 half_pa);
1126 pr_info("However, doing so will make a part of your RAM unusable.\n");
1127 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
1128 return;
1129 }
1130
1131 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1132 }
1133
1134 static int __init l1tf_cmdline(char *str)
1135 {
1136 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1137 return 0;
1138
1139 if (!str)
1140 return -EINVAL;
1141
1142 if (!strcmp(str, "off"))
1143 l1tf_mitigation = L1TF_MITIGATION_OFF;
1144 else if (!strcmp(str, "flush,nowarn"))
1145 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1146 else if (!strcmp(str, "flush"))
1147 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1148 else if (!strcmp(str, "flush,nosmt"))
1149 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1150 else if (!strcmp(str, "full"))
1151 l1tf_mitigation = L1TF_MITIGATION_FULL;
1152 else if (!strcmp(str, "full,force"))
1153 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1154
1155 return 0;
1156 }
1157 early_param("l1tf", l1tf_cmdline);
1158
1159 #undef pr_fmt
1160 #define pr_fmt(fmt) fmt
1161
1162 #ifdef CONFIG_SYSFS
1163
1164 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1165
1166 #if IS_ENABLED(CONFIG_KVM_INTEL)
1167 static const char * const l1tf_vmx_states[] = {
1168 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1169 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1170 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1171 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1172 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1173 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1174 };
1175
1176 static ssize_t l1tf_show_state(char *buf)
1177 {
1178 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1179 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1180
1181 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1182 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1183 sched_smt_active())) {
1184 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1185 l1tf_vmx_states[l1tf_vmx_mitigation]);
1186 }
1187
1188 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1189 l1tf_vmx_states[l1tf_vmx_mitigation],
1190 sched_smt_active() ? "vulnerable" : "disabled");
1191 }
1192 #else
1193 static ssize_t l1tf_show_state(char *buf)
1194 {
1195 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1196 }
1197 #endif
1198
1199 static ssize_t mds_show_state(char *buf)
1200 {
1201 if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
1202 return sprintf(buf, "%s; SMT Host state unknown\n",
1203 mds_strings[mds_mitigation]);
1204 }
1205
1206 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1207 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1208 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1209 sched_smt_active() ? "mitigated" : "disabled"));
1210 }
1211
1212 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1213 sched_smt_active() ? "vulnerable" : "disabled");
1214 }
1215
1216 static char *stibp_state(void)
1217 {
1218 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1219 return "";
1220
1221 switch (spectre_v2_user) {
1222 case SPECTRE_V2_USER_NONE:
1223 return ", STIBP: disabled";
1224 case SPECTRE_V2_USER_STRICT:
1225 return ", STIBP: forced";
1226 case SPECTRE_V2_USER_STRICT_PREFERRED:
1227 return ", STIBP: always-on";
1228 case SPECTRE_V2_USER_PRCTL:
1229 case SPECTRE_V2_USER_SECCOMP:
1230 if (static_key_enabled(&switch_to_cond_stibp))
1231 return ", STIBP: conditional";
1232 }
1233 return "";
1234 }
1235
1236 static char *ibpb_state(void)
1237 {
1238 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1239 if (static_key_enabled(&switch_mm_always_ibpb))
1240 return ", IBPB: always-on";
1241 if (static_key_enabled(&switch_mm_cond_ibpb))
1242 return ", IBPB: conditional";
1243 return ", IBPB: disabled";
1244 }
1245 return "";
1246 }
1247
1248 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1249 char *buf, unsigned int bug)
1250 {
1251 if (!boot_cpu_has_bug(bug))
1252 return sprintf(buf, "Not affected\n");
1253
1254 switch (bug) {
1255 case X86_BUG_CPU_MELTDOWN:
1256 if (boot_cpu_has(X86_FEATURE_PTI))
1257 return sprintf(buf, "Mitigation: PTI\n");
1258
1259 break;
1260
1261 case X86_BUG_SPECTRE_V1:
1262 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1263
1264 case X86_BUG_SPECTRE_V2:
1265 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1266 ibpb_state(),
1267 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1268 stibp_state(),
1269 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1270 spectre_v2_module_string());
1271
1272 case X86_BUG_SPEC_STORE_BYPASS:
1273 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1274
1275 case X86_BUG_L1TF:
1276 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1277 return l1tf_show_state(buf);
1278 break;
1279
1280 case X86_BUG_MDS:
1281 return mds_show_state(buf);
1282
1283 default:
1284 break;
1285 }
1286
1287 return sprintf(buf, "Vulnerable\n");
1288 }
1289
1290 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1291 {
1292 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1293 }
1294
1295 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1296 {
1297 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1298 }
1299
1300 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1301 {
1302 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1303 }
1304
1305 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1306 {
1307 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1308 }
1309
1310 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1311 {
1312 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1313 }
1314
1315 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1316 {
1317 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1318 }
1319 #endif