]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/cpu/bugs.c
x86/speculation: Move arch_smt_update() call to after mitigation decisions
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / cpu / bugs.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1353ebb4 2/*
1353ebb4
JF
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11#include <linux/init.h>
12#include <linux/utsname.h>
61dc0f55 13#include <linux/cpu.h>
caf7501a 14#include <linux/module.h>
a73ec77e
TG
15#include <linux/nospec.h>
16#include <linux/prctl.h>
a74cfffb 17#include <linux/sched/smt.h>
da285121 18
28a27752 19#include <asm/spec-ctrl.h>
da285121 20#include <asm/cmdline.h>
91eb1b79 21#include <asm/bugs.h>
1353ebb4 22#include <asm/processor.h>
7ebad705 23#include <asm/processor-flags.h>
952f07ec 24#include <asm/fpu/internal.h>
1353ebb4 25#include <asm/msr.h>
72c6d2db 26#include <asm/vmx.h>
1353ebb4
JF
27#include <asm/paravirt.h>
28#include <asm/alternative.h>
62a67e12 29#include <asm/pgtable.h>
d1163651 30#include <asm/set_memory.h>
c995efd5 31#include <asm/intel-family.h>
17dbca11 32#include <asm/e820/api.h>
6cb2b08f 33#include <asm/hypervisor.h>
1353ebb4 34
ad3bc25a
BP
35#include "cpu.h"
36
da285121 37static void __init spectre_v2_select_mitigation(void);
24f7fc83 38static void __init ssb_select_mitigation(void);
17dbca11 39static void __init l1tf_select_mitigation(void);
bc124170 40static void __init mds_select_mitigation(void);
da285121 41
53c613fe
JK
42/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
43u64 x86_spec_ctrl_base;
fa8ac498 44EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
53c613fe 45static DEFINE_MUTEX(spec_ctrl_mutex);
1b86883c 46
1115a859
KRW
47/*
48 * The vendor and possibly platform specific bits which can be modified in
49 * x86_spec_ctrl_base.
50 */
be6fcb54 51static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
1115a859 52
764f3c21
KRW
53/*
54 * AMD specific MSR info for Speculative Store Bypass control.
9f65fb29 55 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
764f3c21
KRW
56 */
57u64 __ro_after_init x86_amd_ls_cfg_base;
9f65fb29 58u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
764f3c21 59
aa77bfb3 60/* Control conditional STIBP in switch_to() */
fa1202ef 61DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
4c71a2b6
TG
62/* Control conditional IBPB in switch_mm() */
63DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
64/* Control unconditional IBPB in switch_mm() */
65DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
fa1202ef 66
04dcbdb8
TG
67/* Control MDS CPU buffer clear before returning to user space */
68DEFINE_STATIC_KEY_FALSE(mds_user_clear);
650b68a0 69EXPORT_SYMBOL_GPL(mds_user_clear);
07f07f55
TG
70/* Control MDS CPU buffer clear before idling (halt, mwait) */
71DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
72EXPORT_SYMBOL_GPL(mds_idle_clear);
04dcbdb8 73
1353ebb4
JF
74void __init check_bugs(void)
75{
76 identify_boot_cpu();
55a36b65 77
fee0aede
TG
78 /*
79 * identify_boot_cpu() initialized SMT support information, let the
80 * core code know.
81 */
b284909a 82 cpu_smt_check_topology();
fee0aede 83
62a67e12
BP
84 if (!IS_ENABLED(CONFIG_SMP)) {
85 pr_info("CPU: ");
86 print_cpu_info(&boot_cpu_data);
87 }
88
1b86883c
KRW
89 /*
90 * Read the SPEC_CTRL MSR to account for reserved bits which may
764f3c21
KRW
91 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
92 * init code as it is not enumerated and depends on the family.
1b86883c 93 */
7eb8956a 94 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1b86883c
KRW
95 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
96
be6fcb54
TG
97 /* Allow STIBP in MSR_SPEC_CTRL if supported */
98 if (boot_cpu_has(X86_FEATURE_STIBP))
99 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
100
da285121
DW
101 /* Select the proper spectre mitigation before patching alternatives */
102 spectre_v2_select_mitigation();
103
24f7fc83
KRW
104 /*
105 * Select proper mitigation for any exposure to the Speculative Store
106 * Bypass vulnerability.
107 */
108 ssb_select_mitigation();
109
17dbca11
AK
110 l1tf_select_mitigation();
111
bc124170
TG
112 mds_select_mitigation();
113
7c3658b2
JP
114 arch_smt_update();
115
62a67e12 116#ifdef CONFIG_X86_32
55a36b65
BP
117 /*
118 * Check whether we are able to run this kernel safely on SMP.
119 *
120 * - i386 is no longer supported.
121 * - In order to run on anything without a TSC, we need to be
122 * compiled for a i486.
123 */
124 if (boot_cpu_data.x86 < 4)
125 panic("Kernel requires i486+ for 'invlpg' and other features");
126
bfe4bb15
MV
127 init_utsname()->machine[1] =
128 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
1353ebb4 129 alternative_instructions();
304bceda 130
4d164092 131 fpu__init_check_bugs();
62a67e12
BP
132#else /* CONFIG_X86_64 */
133 alternative_instructions();
134
135 /*
136 * Make sure the first 2MB area is not mapped by huge pages
137 * There are typically fixed size MTRRs in there and overlapping
138 * MTRRs into large pages causes slow downs.
139 *
140 * Right now we don't do that with gbpages because there seems
141 * very little benefit for that case.
142 */
143 if (!direct_gbpages)
144 set_memory_4k((unsigned long)__va(0), 1);
145#endif
1353ebb4 146}
61dc0f55 147
cc69b349
BP
148void
149x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
5cf68754 150{
be6fcb54 151 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
cc69b349 152 struct thread_info *ti = current_thread_info();
885f82bf 153
7eb8956a 154 /* Is MSR_SPEC_CTRL implemented ? */
cc69b349 155 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
be6fcb54
TG
156 /*
157 * Restrict guest_spec_ctrl to supported values. Clear the
158 * modifiable bits in the host base value and or the
159 * modifiable bits from the guest value.
160 */
161 guestval = hostval & ~x86_spec_ctrl_mask;
162 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
163
cc69b349 164 /* SSBD controlled in MSR_SPEC_CTRL */
612bc3b3
TL
165 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
166 static_cpu_has(X86_FEATURE_AMD_SSBD))
be6fcb54 167 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
cc69b349 168
5bfbe3ad
TC
169 /* Conditional STIBP enabled? */
170 if (static_branch_unlikely(&switch_to_cond_stibp))
171 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
172
be6fcb54
TG
173 if (hostval != guestval) {
174 msrval = setguest ? guestval : hostval;
175 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
cc69b349
BP
176 }
177 }
47c61b39
TG
178
179 /*
180 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
181 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
182 */
183 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
184 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
185 return;
186
187 /*
188 * If the host has SSBD mitigation enabled, force it in the host's
189 * virtual MSR value. If its not permanently enabled, evaluate
190 * current's TIF_SSBD thread flag.
191 */
192 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
193 hostval = SPEC_CTRL_SSBD;
194 else
195 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
196
197 /* Sanitize the guest value */
198 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
199
200 if (hostval != guestval) {
201 unsigned long tif;
202
203 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
204 ssbd_spec_ctrl_to_tif(hostval);
205
26c4d75b 206 speculation_ctrl_update(tif);
47c61b39 207 }
5cf68754 208}
cc69b349 209EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
5cf68754 210
9f65fb29 211static void x86_amd_ssb_disable(void)
764f3c21 212{
9f65fb29 213 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
764f3c21 214
11fb0683
TL
215 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
216 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
217 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
764f3c21
KRW
218 wrmsrl(MSR_AMD64_LS_CFG, msrval);
219}
220
bc124170
TG
221#undef pr_fmt
222#define pr_fmt(fmt) "MDS: " fmt
223
224/* Default mitigation for L1TF-affected CPUs */
225static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
d71eb0ce 226static bool mds_nosmt __ro_after_init = false;
bc124170
TG
227
228static const char * const mds_strings[] = {
229 [MDS_MITIGATION_OFF] = "Vulnerable",
22dd8365
TG
230 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
231 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
bc124170
TG
232};
233
234static void __init mds_select_mitigation(void)
235{
236 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
237 mds_mitigation = MDS_MITIGATION_OFF;
238 return;
239 }
240
241 if (mds_mitigation == MDS_MITIGATION_FULL) {
22dd8365
TG
242 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
243 mds_mitigation = MDS_MITIGATION_VMWERV;
d71eb0ce 244
22dd8365 245 static_branch_enable(&mds_user_clear);
d71eb0ce
JP
246
247 if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
248 cpu_smt_disable(false);
bc124170 249 }
d71eb0ce 250
bc124170
TG
251 pr_info("%s\n", mds_strings[mds_mitigation]);
252}
253
254static int __init mds_cmdline(char *str)
255{
256 if (!boot_cpu_has_bug(X86_BUG_MDS))
257 return 0;
258
259 if (!str)
260 return -EINVAL;
261
262 if (!strcmp(str, "off"))
263 mds_mitigation = MDS_MITIGATION_OFF;
264 else if (!strcmp(str, "full"))
265 mds_mitigation = MDS_MITIGATION_FULL;
d71eb0ce
JP
266 else if (!strcmp(str, "full,nosmt")) {
267 mds_mitigation = MDS_MITIGATION_FULL;
268 mds_nosmt = true;
269 }
bc124170
TG
270
271 return 0;
272}
273early_param("mds", mds_cmdline);
274
15d6b7aa
TG
275#undef pr_fmt
276#define pr_fmt(fmt) "Spectre V2 : " fmt
277
278static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
279 SPECTRE_V2_NONE;
280
fa1202ef
TG
281static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
282 SPECTRE_V2_USER_NONE;
283
e4f35891 284#ifdef CONFIG_RETPOLINE
e383095c
TG
285static bool spectre_v2_bad_module;
286
caf7501a
AK
287bool retpoline_module_ok(bool has_retpoline)
288{
289 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
290 return true;
291
e698dcdf 292 pr_err("System may be vulnerable to spectre v2\n");
caf7501a
AK
293 spectre_v2_bad_module = true;
294 return false;
295}
e383095c
TG
296
297static inline const char *spectre_v2_module_string(void)
298{
299 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
300}
301#else
302static inline const char *spectre_v2_module_string(void) { return ""; }
caf7501a 303#endif
da285121 304
da285121
DW
305static inline bool match_option(const char *arg, int arglen, const char *opt)
306{
307 int len = strlen(opt);
308
309 return len == arglen && !strncmp(arg, opt, len);
310}
311
15d6b7aa
TG
312/* The kernel command line selection for spectre v2 */
313enum spectre_v2_mitigation_cmd {
314 SPECTRE_V2_CMD_NONE,
315 SPECTRE_V2_CMD_AUTO,
316 SPECTRE_V2_CMD_FORCE,
317 SPECTRE_V2_CMD_RETPOLINE,
318 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
319 SPECTRE_V2_CMD_RETPOLINE_AMD,
320};
321
fa1202ef
TG
322enum spectre_v2_user_cmd {
323 SPECTRE_V2_USER_CMD_NONE,
324 SPECTRE_V2_USER_CMD_AUTO,
325 SPECTRE_V2_USER_CMD_FORCE,
7cc765a6 326 SPECTRE_V2_USER_CMD_PRCTL,
55a97402 327 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
6b3e64c2 328 SPECTRE_V2_USER_CMD_SECCOMP,
55a97402 329 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
fa1202ef
TG
330};
331
332static const char * const spectre_v2_user_strings[] = {
20c3a2c3
TL
333 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
334 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
335 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
336 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
337 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
fa1202ef
TG
338};
339
340static const struct {
341 const char *option;
342 enum spectre_v2_user_cmd cmd;
343 bool secure;
344} v2_user_options[] __initdata = {
55a97402
TG
345 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
346 { "off", SPECTRE_V2_USER_CMD_NONE, false },
347 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
348 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
349 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
350 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
351 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
fa1202ef
TG
352};
353
354static void __init spec_v2_user_print_cond(const char *reason, bool secure)
355{
356 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
357 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
358}
359
360static enum spectre_v2_user_cmd __init
361spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
362{
363 char arg[20];
364 int ret, i;
365
366 switch (v2_cmd) {
367 case SPECTRE_V2_CMD_NONE:
368 return SPECTRE_V2_USER_CMD_NONE;
369 case SPECTRE_V2_CMD_FORCE:
370 return SPECTRE_V2_USER_CMD_FORCE;
371 default:
372 break;
373 }
374
375 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
376 arg, sizeof(arg));
377 if (ret < 0)
378 return SPECTRE_V2_USER_CMD_AUTO;
379
380 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
381 if (match_option(arg, ret, v2_user_options[i].option)) {
382 spec_v2_user_print_cond(v2_user_options[i].option,
383 v2_user_options[i].secure);
384 return v2_user_options[i].cmd;
385 }
386 }
387
388 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
389 return SPECTRE_V2_USER_CMD_AUTO;
390}
391
392static void __init
393spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
394{
395 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
396 bool smt_possible = IS_ENABLED(CONFIG_SMP);
55a97402 397 enum spectre_v2_user_cmd cmd;
fa1202ef
TG
398
399 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
400 return;
401
402 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
403 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
404 smt_possible = false;
405
55a97402
TG
406 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
407 switch (cmd) {
fa1202ef
TG
408 case SPECTRE_V2_USER_CMD_NONE:
409 goto set_mode;
410 case SPECTRE_V2_USER_CMD_FORCE:
411 mode = SPECTRE_V2_USER_STRICT;
412 break;
7cc765a6 413 case SPECTRE_V2_USER_CMD_PRCTL:
55a97402 414 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
7cc765a6
TG
415 mode = SPECTRE_V2_USER_PRCTL;
416 break;
6b3e64c2
TG
417 case SPECTRE_V2_USER_CMD_AUTO:
418 case SPECTRE_V2_USER_CMD_SECCOMP:
55a97402 419 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
6b3e64c2
TG
420 if (IS_ENABLED(CONFIG_SECCOMP))
421 mode = SPECTRE_V2_USER_SECCOMP;
422 else
423 mode = SPECTRE_V2_USER_PRCTL;
424 break;
fa1202ef
TG
425 }
426
20c3a2c3
TL
427 /*
428 * At this point, an STIBP mode other than "off" has been set.
429 * If STIBP support is not being forced, check if STIBP always-on
430 * is preferred.
431 */
432 if (mode != SPECTRE_V2_USER_STRICT &&
433 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
434 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
435
fa1202ef
TG
436 /* Initialize Indirect Branch Prediction Barrier */
437 if (boot_cpu_has(X86_FEATURE_IBPB)) {
438 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
4c71a2b6 439
55a97402
TG
440 switch (cmd) {
441 case SPECTRE_V2_USER_CMD_FORCE:
442 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
443 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
4c71a2b6
TG
444 static_branch_enable(&switch_mm_always_ibpb);
445 break;
55a97402
TG
446 case SPECTRE_V2_USER_CMD_PRCTL:
447 case SPECTRE_V2_USER_CMD_AUTO:
448 case SPECTRE_V2_USER_CMD_SECCOMP:
7cc765a6
TG
449 static_branch_enable(&switch_mm_cond_ibpb);
450 break;
4c71a2b6
TG
451 default:
452 break;
453 }
454
455 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
55a97402
TG
456 static_key_enabled(&switch_mm_always_ibpb) ?
457 "always-on" : "conditional");
fa1202ef
TG
458 }
459
aa77bfb3 460 /* If enhanced IBRS is enabled no STIBP required */
fa1202ef
TG
461 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
462 return;
463
7cc765a6 464 /*
aa77bfb3 465 * If SMT is not possible or STIBP is not available clear the STIBP
7cc765a6
TG
466 * mode.
467 */
468 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
469 mode = SPECTRE_V2_USER_NONE;
fa1202ef
TG
470set_mode:
471 spectre_v2_user = mode;
472 /* Only print the STIBP mode when SMT possible */
473 if (smt_possible)
474 pr_info("%s\n", spectre_v2_user_strings[mode]);
475}
476
8770709f 477static const char * const spectre_v2_strings[] = {
15d6b7aa
TG
478 [SPECTRE_V2_NONE] = "Vulnerable",
479 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
480 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
481 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
482};
483
9005c683
KA
484static const struct {
485 const char *option;
486 enum spectre_v2_mitigation_cmd cmd;
487 bool secure;
30ba72a9 488} mitigation_options[] __initdata = {
15d6b7aa
TG
489 { "off", SPECTRE_V2_CMD_NONE, false },
490 { "on", SPECTRE_V2_CMD_FORCE, true },
491 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
492 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
493 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
494 { "auto", SPECTRE_V2_CMD_AUTO, false },
9005c683
KA
495};
496
495d470e 497static void __init spec_v2_print_cond(const char *reason, bool secure)
15d6b7aa 498{
495d470e 499 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
15d6b7aa
TG
500 pr_info("%s selected on command line.\n", reason);
501}
502
da285121
DW
503static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
504{
15d6b7aa 505 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
da285121 506 char arg[20];
9005c683 507 int ret, i;
9005c683
KA
508
509 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
510 return SPECTRE_V2_CMD_NONE;
9005c683 511
24848509
TC
512 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
513 if (ret < 0)
514 return SPECTRE_V2_CMD_AUTO;
515
516 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
517 if (!match_option(arg, ret, mitigation_options[i].option))
518 continue;
519 cmd = mitigation_options[i].cmd;
520 break;
521 }
522
523 if (i >= ARRAY_SIZE(mitigation_options)) {
524 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
525 return SPECTRE_V2_CMD_AUTO;
da285121
DW
526 }
527
9005c683
KA
528 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
529 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
530 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
531 !IS_ENABLED(CONFIG_RETPOLINE)) {
21e433bd 532 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
da285121 533 return SPECTRE_V2_CMD_AUTO;
9005c683
KA
534 }
535
536 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
1a576b23 537 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
9005c683
KA
538 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
539 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
540 return SPECTRE_V2_CMD_AUTO;
541 }
542
495d470e
TG
543 spec_v2_print_cond(mitigation_options[i].option,
544 mitigation_options[i].secure);
9005c683 545 return cmd;
da285121
DW
546}
547
548static void __init spectre_v2_select_mitigation(void)
549{
550 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
551 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
552
553 /*
554 * If the CPU is not affected and the command line mode is NONE or AUTO
555 * then nothing to do.
556 */
557 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
558 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
559 return;
560
561 switch (cmd) {
562 case SPECTRE_V2_CMD_NONE:
563 return;
564
565 case SPECTRE_V2_CMD_FORCE:
da285121 566 case SPECTRE_V2_CMD_AUTO:
706d5168
SP
567 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
568 mode = SPECTRE_V2_IBRS_ENHANCED;
569 /* Force it so VMEXIT will restore correctly */
570 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
571 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
572 goto specv2_set_mode;
573 }
9471eee9
DL
574 if (IS_ENABLED(CONFIG_RETPOLINE))
575 goto retpoline_auto;
576 break;
da285121
DW
577 case SPECTRE_V2_CMD_RETPOLINE_AMD:
578 if (IS_ENABLED(CONFIG_RETPOLINE))
579 goto retpoline_amd;
580 break;
581 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
582 if (IS_ENABLED(CONFIG_RETPOLINE))
583 goto retpoline_generic;
584 break;
585 case SPECTRE_V2_CMD_RETPOLINE:
586 if (IS_ENABLED(CONFIG_RETPOLINE))
587 goto retpoline_auto;
588 break;
589 }
21e433bd 590 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
da285121
DW
591 return;
592
593retpoline_auto:
1a576b23
PW
594 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
595 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
da285121
DW
596 retpoline_amd:
597 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
21e433bd 598 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
da285121
DW
599 goto retpoline_generic;
600 }
ef014aae 601 mode = SPECTRE_V2_RETPOLINE_AMD;
da285121
DW
602 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
603 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
604 } else {
605 retpoline_generic:
ef014aae 606 mode = SPECTRE_V2_RETPOLINE_GENERIC;
da285121
DW
607 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
608 }
609
706d5168 610specv2_set_mode:
da285121
DW
611 spectre_v2_enabled = mode;
612 pr_info("%s\n", spectre_v2_strings[mode]);
c995efd5
DW
613
614 /*
fdf82a78
JK
615 * If spectre v2 protection has been enabled, unconditionally fill
616 * RSB during a context switch; this protects against two independent
617 * issues:
c995efd5 618 *
fdf82a78
JK
619 * - RSB underflow (and switch to BTB) on Skylake+
620 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
c995efd5 621 */
fdf82a78
JK
622 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
623 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
20ffa1ca 624
dd84441a
DW
625 /*
626 * Retpoline means the kernel is safe because it has no indirect
706d5168
SP
627 * branches. Enhanced IBRS protects firmware too, so, enable restricted
628 * speculation around firmware calls only when Enhanced IBRS isn't
629 * supported.
630 *
631 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
632 * the user might select retpoline on the kernel command line and if
633 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
634 * enable IBRS around firmware calls.
dd84441a 635 */
706d5168 636 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
dd84441a
DW
637 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
638 pr_info("Enabling Restricted Speculation for firmware calls\n");
639 }
53c613fe 640
fa1202ef
TG
641 /* Set up IBPB and STIBP depending on the general spectre V2 command */
642 spectre_v2_user_select_mitigation(cmd);
da285121
DW
643}
644
6893a959 645static void update_stibp_msr(void * __unused)
15d6b7aa 646{
6893a959 647 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
15d6b7aa
TG
648}
649
6893a959
TG
650/* Update x86_spec_ctrl_base in case SMT state changed. */
651static void update_stibp_strict(void)
15d6b7aa 652{
6893a959
TG
653 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
654
655 if (sched_smt_active())
656 mask |= SPEC_CTRL_STIBP;
657
658 if (mask == x86_spec_ctrl_base)
659 return;
660
661 pr_info("Update user space SMT mitigation: STIBP %s\n",
662 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
663 x86_spec_ctrl_base = mask;
664 on_each_cpu(update_stibp_msr, NULL, 1);
15d6b7aa
TG
665}
666
7cc765a6
TG
667/* Update the static key controlling the evaluation of TIF_SPEC_IB */
668static void update_indir_branch_cond(void)
669{
670 if (sched_smt_active())
671 static_branch_enable(&switch_to_cond_stibp);
672 else
673 static_branch_disable(&switch_to_cond_stibp);
674}
675
bc124170
TG
676/* Update the static key controlling the MDS CPU buffer clear in idle */
677static void update_mds_branch_idle(void)
678{
679 /*
680 * Enable the idle clearing if SMT is active on CPUs which are
681 * affected only by MSBDS and not any other MDS variant.
682 *
683 * The other variants cannot be mitigated when SMT is enabled, so
684 * clearing the buffers on idle just to prevent the Store Buffer
685 * repartitioning leak would be a window dressing exercise.
686 */
687 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
688 return;
689
690 if (sched_smt_active())
691 static_branch_enable(&mds_idle_clear);
692 else
693 static_branch_disable(&mds_idle_clear);
694}
695
15d6b7aa
TG
696void arch_smt_update(void)
697{
6893a959
TG
698 /* Enhanced IBRS implies STIBP. No update required. */
699 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
15d6b7aa
TG
700 return;
701
702 mutex_lock(&spec_ctrl_mutex);
703
6893a959
TG
704 switch (spectre_v2_user) {
705 case SPECTRE_V2_USER_NONE:
706 break;
707 case SPECTRE_V2_USER_STRICT:
20c3a2c3 708 case SPECTRE_V2_USER_STRICT_PREFERRED:
6893a959
TG
709 update_stibp_strict();
710 break;
9137bb27 711 case SPECTRE_V2_USER_PRCTL:
6b3e64c2 712 case SPECTRE_V2_USER_SECCOMP:
7cc765a6 713 update_indir_branch_cond();
9137bb27 714 break;
15d6b7aa 715 }
6893a959 716
22dd8365
TG
717 switch (mds_mitigation) {
718 case MDS_MITIGATION_FULL:
719 case MDS_MITIGATION_VMWERV:
bc124170 720 update_mds_branch_idle();
22dd8365
TG
721 break;
722 case MDS_MITIGATION_OFF:
723 break;
724 }
bc124170 725
15d6b7aa
TG
726 mutex_unlock(&spec_ctrl_mutex);
727}
728
24f7fc83
KRW
729#undef pr_fmt
730#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
731
f9544b2b 732static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
24f7fc83
KRW
733
734/* The kernel command line selection */
735enum ssb_mitigation_cmd {
736 SPEC_STORE_BYPASS_CMD_NONE,
737 SPEC_STORE_BYPASS_CMD_AUTO,
738 SPEC_STORE_BYPASS_CMD_ON,
a73ec77e 739 SPEC_STORE_BYPASS_CMD_PRCTL,
f21b53b2 740 SPEC_STORE_BYPASS_CMD_SECCOMP,
24f7fc83
KRW
741};
742
8770709f 743static const char * const ssb_strings[] = {
24f7fc83 744 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
a73ec77e 745 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
f21b53b2
KC
746 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
747 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
24f7fc83
KRW
748};
749
750static const struct {
751 const char *option;
752 enum ssb_mitigation_cmd cmd;
30ba72a9 753} ssb_mitigation_options[] __initdata = {
f21b53b2
KC
754 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
755 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
756 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
757 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
758 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
24f7fc83
KRW
759};
760
761static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
762{
763 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
764 char arg[20];
765 int ret, i;
766
767 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
768 return SPEC_STORE_BYPASS_CMD_NONE;
769 } else {
770 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
771 arg, sizeof(arg));
772 if (ret < 0)
773 return SPEC_STORE_BYPASS_CMD_AUTO;
774
775 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
776 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
777 continue;
778
779 cmd = ssb_mitigation_options[i].cmd;
780 break;
781 }
782
783 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
784 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
785 return SPEC_STORE_BYPASS_CMD_AUTO;
786 }
787 }
788
789 return cmd;
790}
791
d66d8ff3 792static enum ssb_mitigation __init __ssb_select_mitigation(void)
24f7fc83
KRW
793{
794 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
795 enum ssb_mitigation_cmd cmd;
796
9f65fb29 797 if (!boot_cpu_has(X86_FEATURE_SSBD))
24f7fc83
KRW
798 return mode;
799
800 cmd = ssb_parse_cmdline();
801 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
802 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
803 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
804 return mode;
805
806 switch (cmd) {
807 case SPEC_STORE_BYPASS_CMD_AUTO:
f21b53b2
KC
808 case SPEC_STORE_BYPASS_CMD_SECCOMP:
809 /*
810 * Choose prctl+seccomp as the default mode if seccomp is
811 * enabled.
812 */
813 if (IS_ENABLED(CONFIG_SECCOMP))
814 mode = SPEC_STORE_BYPASS_SECCOMP;
815 else
816 mode = SPEC_STORE_BYPASS_PRCTL;
a73ec77e 817 break;
24f7fc83
KRW
818 case SPEC_STORE_BYPASS_CMD_ON:
819 mode = SPEC_STORE_BYPASS_DISABLE;
820 break;
a73ec77e
TG
821 case SPEC_STORE_BYPASS_CMD_PRCTL:
822 mode = SPEC_STORE_BYPASS_PRCTL;
823 break;
24f7fc83
KRW
824 case SPEC_STORE_BYPASS_CMD_NONE:
825 break;
826 }
827
77243971
KRW
828 /*
829 * We have three CPU feature flags that are in play here:
830 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
9f65fb29 831 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
77243971
KRW
832 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
833 */
a73ec77e 834 if (mode == SPEC_STORE_BYPASS_DISABLE) {
24f7fc83 835 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
77243971 836 /*
6ac2f49e
KRW
837 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
838 * use a completely different MSR and bit dependent on family.
77243971 839 */
612bc3b3
TL
840 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
841 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
108fab4b 842 x86_amd_ssb_disable();
612bc3b3 843 } else {
9f65fb29 844 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
be6fcb54 845 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
4b59bdb5 846 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
77243971
KRW
847 }
848 }
849
24f7fc83
KRW
850 return mode;
851}
852
ffed645e 853static void ssb_select_mitigation(void)
24f7fc83
KRW
854{
855 ssb_mode = __ssb_select_mitigation();
856
857 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
858 pr_info("%s\n", ssb_strings[ssb_mode]);
859}
860
da285121 861#undef pr_fmt
f21b53b2 862#define pr_fmt(fmt) "Speculation prctl: " fmt
da285121 863
6d991ba5 864static void task_update_spec_tif(struct task_struct *tsk)
a73ec77e 865{
6d991ba5
TG
866 /* Force the update of the real TIF bits */
867 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
e6da8bb6
TG
868
869 /*
870 * Immediately update the speculation control MSRs for the current
871 * task, but for a non-current task delay setting the CPU
872 * mitigation until it is scheduled next.
873 *
874 * This can only happen for SECCOMP mitigation. For PRCTL it's
875 * always the current task.
876 */
6d991ba5 877 if (tsk == current)
e6da8bb6
TG
878 speculation_ctrl_update_current();
879}
880
881static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
882{
f21b53b2
KC
883 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
884 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
a73ec77e
TG
885 return -ENXIO;
886
356e4bff
TG
887 switch (ctrl) {
888 case PR_SPEC_ENABLE:
889 /* If speculation is force disabled, enable is not allowed */
890 if (task_spec_ssb_force_disable(task))
891 return -EPERM;
892 task_clear_spec_ssb_disable(task);
6d991ba5 893 task_update_spec_tif(task);
356e4bff
TG
894 break;
895 case PR_SPEC_DISABLE:
896 task_set_spec_ssb_disable(task);
6d991ba5 897 task_update_spec_tif(task);
356e4bff
TG
898 break;
899 case PR_SPEC_FORCE_DISABLE:
900 task_set_spec_ssb_disable(task);
901 task_set_spec_ssb_force_disable(task);
6d991ba5 902 task_update_spec_tif(task);
356e4bff
TG
903 break;
904 default:
905 return -ERANGE;
906 }
a73ec77e
TG
907 return 0;
908}
909
9137bb27
TG
910static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
911{
912 switch (ctrl) {
913 case PR_SPEC_ENABLE:
914 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
915 return 0;
916 /*
917 * Indirect branch speculation is always disabled in strict
918 * mode.
919 */
20c3a2c3
TL
920 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
921 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
9137bb27
TG
922 return -EPERM;
923 task_clear_spec_ib_disable(task);
924 task_update_spec_tif(task);
925 break;
926 case PR_SPEC_DISABLE:
927 case PR_SPEC_FORCE_DISABLE:
928 /*
929 * Indirect branch speculation is always allowed when
930 * mitigation is force disabled.
931 */
932 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
933 return -EPERM;
20c3a2c3
TL
934 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
935 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
9137bb27
TG
936 return 0;
937 task_set_spec_ib_disable(task);
938 if (ctrl == PR_SPEC_FORCE_DISABLE)
939 task_set_spec_ib_force_disable(task);
940 task_update_spec_tif(task);
941 break;
942 default:
943 return -ERANGE;
944 }
945 return 0;
946}
947
8bf37d8c
TG
948int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
949 unsigned long ctrl)
950{
951 switch (which) {
952 case PR_SPEC_STORE_BYPASS:
953 return ssb_prctl_set(task, ctrl);
9137bb27
TG
954 case PR_SPEC_INDIRECT_BRANCH:
955 return ib_prctl_set(task, ctrl);
8bf37d8c
TG
956 default:
957 return -ENODEV;
958 }
959}
960
961#ifdef CONFIG_SECCOMP
962void arch_seccomp_spec_mitigate(struct task_struct *task)
963{
f21b53b2
KC
964 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
965 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
6b3e64c2
TG
966 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
967 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
8bf37d8c
TG
968}
969#endif
970
7bbf1373 971static int ssb_prctl_get(struct task_struct *task)
a73ec77e
TG
972{
973 switch (ssb_mode) {
974 case SPEC_STORE_BYPASS_DISABLE:
975 return PR_SPEC_DISABLE;
f21b53b2 976 case SPEC_STORE_BYPASS_SECCOMP:
a73ec77e 977 case SPEC_STORE_BYPASS_PRCTL:
356e4bff
TG
978 if (task_spec_ssb_force_disable(task))
979 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
980 if (task_spec_ssb_disable(task))
a73ec77e
TG
981 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
982 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
983 default:
984 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
985 return PR_SPEC_ENABLE;
986 return PR_SPEC_NOT_AFFECTED;
987 }
988}
989
9137bb27
TG
990static int ib_prctl_get(struct task_struct *task)
991{
992 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
993 return PR_SPEC_NOT_AFFECTED;
994
995 switch (spectre_v2_user) {
996 case SPECTRE_V2_USER_NONE:
997 return PR_SPEC_ENABLE;
998 case SPECTRE_V2_USER_PRCTL:
6b3e64c2 999 case SPECTRE_V2_USER_SECCOMP:
9137bb27
TG
1000 if (task_spec_ib_force_disable(task))
1001 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1002 if (task_spec_ib_disable(task))
1003 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1004 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1005 case SPECTRE_V2_USER_STRICT:
20c3a2c3 1006 case SPECTRE_V2_USER_STRICT_PREFERRED:
9137bb27
TG
1007 return PR_SPEC_DISABLE;
1008 default:
1009 return PR_SPEC_NOT_AFFECTED;
1010 }
1011}
1012
7bbf1373 1013int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
a73ec77e
TG
1014{
1015 switch (which) {
1016 case PR_SPEC_STORE_BYPASS:
7bbf1373 1017 return ssb_prctl_get(task);
9137bb27
TG
1018 case PR_SPEC_INDIRECT_BRANCH:
1019 return ib_prctl_get(task);
a73ec77e
TG
1020 default:
1021 return -ENODEV;
1022 }
1023}
1024
77243971
KRW
1025void x86_spec_ctrl_setup_ap(void)
1026{
7eb8956a 1027 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
4b59bdb5 1028 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
764f3c21
KRW
1029
1030 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
9f65fb29 1031 x86_amd_ssb_disable();
77243971
KRW
1032}
1033
56563f53
KRW
1034#undef pr_fmt
1035#define pr_fmt(fmt) "L1TF: " fmt
72c6d2db 1036
d90a7a0e
JK
1037/* Default mitigation for L1TF-affected CPUs */
1038enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
72c6d2db 1039#if IS_ENABLED(CONFIG_KVM_INTEL)
d90a7a0e 1040EXPORT_SYMBOL_GPL(l1tf_mitigation);
1eb46908 1041#endif
895ae47f 1042enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
72c6d2db 1043EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
72c6d2db 1044
cc51e542
AK
1045/*
1046 * These CPUs all support 44bits physical address space internally in the
1047 * cache but CPUID can report a smaller number of physical address bits.
1048 *
1049 * The L1TF mitigation uses the top most address bit for the inversion of
1050 * non present PTEs. When the installed memory reaches into the top most
1051 * address bit due to memory holes, which has been observed on machines
1052 * which report 36bits physical address bits and have 32G RAM installed,
1053 * then the mitigation range check in l1tf_select_mitigation() triggers.
1054 * This is a false positive because the mitigation is still possible due to
1055 * the fact that the cache uses 44bit internally. Use the cache bits
1056 * instead of the reported physical bits and adjust them on the affected
1057 * machines to 44bit if the reported bits are less than 44.
1058 */
1059static void override_cache_bits(struct cpuinfo_x86 *c)
1060{
1061 if (c->x86 != 6)
1062 return;
1063
1064 switch (c->x86_model) {
1065 case INTEL_FAM6_NEHALEM:
1066 case INTEL_FAM6_WESTMERE:
1067 case INTEL_FAM6_SANDYBRIDGE:
1068 case INTEL_FAM6_IVYBRIDGE:
1069 case INTEL_FAM6_HASWELL_CORE:
1070 case INTEL_FAM6_HASWELL_ULT:
1071 case INTEL_FAM6_HASWELL_GT3E:
1072 case INTEL_FAM6_BROADWELL_CORE:
1073 case INTEL_FAM6_BROADWELL_GT3E:
1074 case INTEL_FAM6_SKYLAKE_MOBILE:
1075 case INTEL_FAM6_SKYLAKE_DESKTOP:
1076 case INTEL_FAM6_KABYLAKE_MOBILE:
1077 case INTEL_FAM6_KABYLAKE_DESKTOP:
1078 if (c->x86_cache_bits < 44)
1079 c->x86_cache_bits = 44;
1080 break;
1081 }
1082}
1083
56563f53
KRW
1084static void __init l1tf_select_mitigation(void)
1085{
1086 u64 half_pa;
1087
1088 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1089 return;
1090
cc51e542
AK
1091 override_cache_bits(&boot_cpu_data);
1092
d90a7a0e
JK
1093 switch (l1tf_mitigation) {
1094 case L1TF_MITIGATION_OFF:
1095 case L1TF_MITIGATION_FLUSH_NOWARN:
1096 case L1TF_MITIGATION_FLUSH:
1097 break;
1098 case L1TF_MITIGATION_FLUSH_NOSMT:
1099 case L1TF_MITIGATION_FULL:
1100 cpu_smt_disable(false);
1101 break;
1102 case L1TF_MITIGATION_FULL_FORCE:
1103 cpu_smt_disable(true);
1104 break;
1105 }
1106
56563f53
KRW
1107#if CONFIG_PGTABLE_LEVELS == 2
1108 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1109 return;
1110#endif
1111
56563f53 1112 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
5b5e4d62
MH
1113 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1114 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
56563f53 1115 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
6a012288
VB
1116 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1117 half_pa);
1118 pr_info("However, doing so will make a part of your RAM unusable.\n");
65fd4cb6 1119 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
56563f53
KRW
1120 return;
1121 }
1122
1123 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1124}
d90a7a0e
JK
1125
1126static int __init l1tf_cmdline(char *str)
1127{
1128 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1129 return 0;
1130
1131 if (!str)
1132 return -EINVAL;
1133
1134 if (!strcmp(str, "off"))
1135 l1tf_mitigation = L1TF_MITIGATION_OFF;
1136 else if (!strcmp(str, "flush,nowarn"))
1137 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1138 else if (!strcmp(str, "flush"))
1139 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1140 else if (!strcmp(str, "flush,nosmt"))
1141 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1142 else if (!strcmp(str, "full"))
1143 l1tf_mitigation = L1TF_MITIGATION_FULL;
1144 else if (!strcmp(str, "full,force"))
1145 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1146
1147 return 0;
1148}
1149early_param("l1tf", l1tf_cmdline);
1150
56563f53
KRW
1151#undef pr_fmt
1152
61dc0f55 1153#ifdef CONFIG_SYSFS
d1059518 1154
72c6d2db
TG
1155#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1156
1157#if IS_ENABLED(CONFIG_KVM_INTEL)
8770709f 1158static const char * const l1tf_vmx_states[] = {
a7b9020b
TG
1159 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1160 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1161 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1162 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1163 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
8e0b2b91 1164 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
72c6d2db
TG
1165};
1166
1167static ssize_t l1tf_show_state(char *buf)
1168{
1169 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1170 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1171
ea156d19
PB
1172 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1173 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
130d6f94 1174 sched_smt_active())) {
ea156d19
PB
1175 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1176 l1tf_vmx_states[l1tf_vmx_mitigation]);
130d6f94 1177 }
ea156d19
PB
1178
1179 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1180 l1tf_vmx_states[l1tf_vmx_mitigation],
130d6f94 1181 sched_smt_active() ? "vulnerable" : "disabled");
72c6d2db
TG
1182}
1183#else
1184static ssize_t l1tf_show_state(char *buf)
1185{
1186 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1187}
1188#endif
1189
8a4b06d3
TG
1190static ssize_t mds_show_state(char *buf)
1191{
1192 if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
1193 return sprintf(buf, "%s; SMT Host state unknown\n",
1194 mds_strings[mds_mitigation]);
1195 }
1196
1197 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1198 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1199 sched_smt_active() ? "mitigated" : "disabled");
1200 }
1201
1202 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1203 sched_smt_active() ? "vulnerable" : "disabled");
1204}
1205
a8f76ae4
TC
1206static char *stibp_state(void)
1207{
34bce7c9
TC
1208 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1209 return "";
1210
fa1202ef
TG
1211 switch (spectre_v2_user) {
1212 case SPECTRE_V2_USER_NONE:
1213 return ", STIBP: disabled";
1214 case SPECTRE_V2_USER_STRICT:
1215 return ", STIBP: forced";
20c3a2c3
TL
1216 case SPECTRE_V2_USER_STRICT_PREFERRED:
1217 return ", STIBP: always-on";
9137bb27 1218 case SPECTRE_V2_USER_PRCTL:
6b3e64c2 1219 case SPECTRE_V2_USER_SECCOMP:
7cc765a6
TG
1220 if (static_key_enabled(&switch_to_cond_stibp))
1221 return ", STIBP: conditional";
fa1202ef
TG
1222 }
1223 return "";
a8f76ae4
TC
1224}
1225
1226static char *ibpb_state(void)
1227{
4c71a2b6 1228 if (boot_cpu_has(X86_FEATURE_IBPB)) {
7cc765a6 1229 if (static_key_enabled(&switch_mm_always_ibpb))
4c71a2b6 1230 return ", IBPB: always-on";
7cc765a6
TG
1231 if (static_key_enabled(&switch_mm_cond_ibpb))
1232 return ", IBPB: conditional";
1233 return ", IBPB: disabled";
4c71a2b6
TG
1234 }
1235 return "";
a8f76ae4
TC
1236}
1237
7bb4d366 1238static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
ffed645e 1239 char *buf, unsigned int bug)
61dc0f55 1240{
d1059518 1241 if (!boot_cpu_has_bug(bug))
61dc0f55 1242 return sprintf(buf, "Not affected\n");
d1059518
KRW
1243
1244 switch (bug) {
1245 case X86_BUG_CPU_MELTDOWN:
1246 if (boot_cpu_has(X86_FEATURE_PTI))
1247 return sprintf(buf, "Mitigation: PTI\n");
1248
6cb2b08f
JK
1249 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1250 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1251
d1059518
KRW
1252 break;
1253
1254 case X86_BUG_SPECTRE_V1:
1255 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1256
1257 case X86_BUG_SPECTRE_V2:
b86bda04 1258 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
a8f76ae4 1259 ibpb_state(),
d1059518 1260 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
a8f76ae4 1261 stibp_state(),
bb4b3b77 1262 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
d1059518
KRW
1263 spectre_v2_module_string());
1264
24f7fc83
KRW
1265 case X86_BUG_SPEC_STORE_BYPASS:
1266 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1267
17dbca11
AK
1268 case X86_BUG_L1TF:
1269 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
72c6d2db 1270 return l1tf_show_state(buf);
17dbca11 1271 break;
8a4b06d3
TG
1272
1273 case X86_BUG_MDS:
1274 return mds_show_state(buf);
1275
d1059518
KRW
1276 default:
1277 break;
1278 }
1279
61dc0f55
TG
1280 return sprintf(buf, "Vulnerable\n");
1281}
1282
d1059518
KRW
1283ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1284{
1285 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1286}
1287
21e433bd 1288ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
61dc0f55 1289{
d1059518 1290 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
61dc0f55
TG
1291}
1292
21e433bd 1293ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
61dc0f55 1294{
d1059518 1295 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
61dc0f55 1296}
c456442c
KRW
1297
1298ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1299{
1300 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1301}
17dbca11
AK
1302
1303ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1304{
1305 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1306}
8a4b06d3
TG
1307
1308ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1309{
1310 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1311}
61dc0f55 1312#endif