]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/bugs.c
x86/speculation/mds: Conditionally clear CPU buffers on idle entry
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/pgtable.h>
30 #include <asm/set_memory.h>
31 #include <asm/intel-family.h>
32 #include <asm/e820/api.h>
33
34 static void __init spectre_v2_select_mitigation(void);
35 static void __init ssb_select_mitigation(void);
36 static void __init l1tf_select_mitigation(void);
37
38 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
39 u64 x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41 static DEFINE_MUTEX(spec_ctrl_mutex);
42
43 /*
44 * The vendor and possibly platform specific bits which can be modified in
45 * x86_spec_ctrl_base.
46 */
47 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
48
49 /*
50 * AMD specific MSR info for Speculative Store Bypass control.
51 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
52 */
53 u64 __ro_after_init x86_amd_ls_cfg_base;
54 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
55
56 /* Control conditional STIBP in switch_to() */
57 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
58 /* Control conditional IBPB in switch_mm() */
59 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
60 /* Control unconditional IBPB in switch_mm() */
61 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
62
63 /* Control MDS CPU buffer clear before returning to user space */
64 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
65 EXPORT_SYMBOL_GPL(mds_user_clear);
66 /* Control MDS CPU buffer clear before idling (halt, mwait) */
67 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
68 EXPORT_SYMBOL_GPL(mds_idle_clear);
69
70 void __init check_bugs(void)
71 {
72 identify_boot_cpu();
73
74 /*
75 * identify_boot_cpu() initialized SMT support information, let the
76 * core code know.
77 */
78 cpu_smt_check_topology_early();
79
80 if (!IS_ENABLED(CONFIG_SMP)) {
81 pr_info("CPU: ");
82 print_cpu_info(&boot_cpu_data);
83 }
84
85 /*
86 * Read the SPEC_CTRL MSR to account for reserved bits which may
87 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
88 * init code as it is not enumerated and depends on the family.
89 */
90 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
91 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
92
93 /* Allow STIBP in MSR_SPEC_CTRL if supported */
94 if (boot_cpu_has(X86_FEATURE_STIBP))
95 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
96
97 /* Select the proper spectre mitigation before patching alternatives */
98 spectre_v2_select_mitigation();
99
100 /*
101 * Select proper mitigation for any exposure to the Speculative Store
102 * Bypass vulnerability.
103 */
104 ssb_select_mitigation();
105
106 l1tf_select_mitigation();
107
108 #ifdef CONFIG_X86_32
109 /*
110 * Check whether we are able to run this kernel safely on SMP.
111 *
112 * - i386 is no longer supported.
113 * - In order to run on anything without a TSC, we need to be
114 * compiled for a i486.
115 */
116 if (boot_cpu_data.x86 < 4)
117 panic("Kernel requires i486+ for 'invlpg' and other features");
118
119 init_utsname()->machine[1] =
120 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
121 alternative_instructions();
122
123 fpu__init_check_bugs();
124 #else /* CONFIG_X86_64 */
125 alternative_instructions();
126
127 /*
128 * Make sure the first 2MB area is not mapped by huge pages
129 * There are typically fixed size MTRRs in there and overlapping
130 * MTRRs into large pages causes slow downs.
131 *
132 * Right now we don't do that with gbpages because there seems
133 * very little benefit for that case.
134 */
135 if (!direct_gbpages)
136 set_memory_4k((unsigned long)__va(0), 1);
137 #endif
138 }
139
140 void
141 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
142 {
143 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
144 struct thread_info *ti = current_thread_info();
145
146 /* Is MSR_SPEC_CTRL implemented ? */
147 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
148 /*
149 * Restrict guest_spec_ctrl to supported values. Clear the
150 * modifiable bits in the host base value and or the
151 * modifiable bits from the guest value.
152 */
153 guestval = hostval & ~x86_spec_ctrl_mask;
154 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
155
156 /* SSBD controlled in MSR_SPEC_CTRL */
157 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
158 static_cpu_has(X86_FEATURE_AMD_SSBD))
159 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
160
161 /* Conditional STIBP enabled? */
162 if (static_branch_unlikely(&switch_to_cond_stibp))
163 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
164
165 if (hostval != guestval) {
166 msrval = setguest ? guestval : hostval;
167 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
168 }
169 }
170
171 /*
172 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
173 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
174 */
175 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
176 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
177 return;
178
179 /*
180 * If the host has SSBD mitigation enabled, force it in the host's
181 * virtual MSR value. If its not permanently enabled, evaluate
182 * current's TIF_SSBD thread flag.
183 */
184 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
185 hostval = SPEC_CTRL_SSBD;
186 else
187 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
188
189 /* Sanitize the guest value */
190 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
191
192 if (hostval != guestval) {
193 unsigned long tif;
194
195 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
196 ssbd_spec_ctrl_to_tif(hostval);
197
198 speculation_ctrl_update(tif);
199 }
200 }
201 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
202
203 static void x86_amd_ssb_disable(void)
204 {
205 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
206
207 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
208 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
209 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
210 wrmsrl(MSR_AMD64_LS_CFG, msrval);
211 }
212
213 #undef pr_fmt
214 #define pr_fmt(fmt) "Spectre V2 : " fmt
215
216 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
217 SPECTRE_V2_NONE;
218
219 static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
220 SPECTRE_V2_USER_NONE;
221
222 #ifdef CONFIG_RETPOLINE
223 static bool spectre_v2_bad_module;
224
225 bool retpoline_module_ok(bool has_retpoline)
226 {
227 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
228 return true;
229
230 pr_err("System may be vulnerable to spectre v2\n");
231 spectre_v2_bad_module = true;
232 return false;
233 }
234
235 static inline const char *spectre_v2_module_string(void)
236 {
237 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
238 }
239 #else
240 static inline const char *spectre_v2_module_string(void) { return ""; }
241 #endif
242
243 static inline bool match_option(const char *arg, int arglen, const char *opt)
244 {
245 int len = strlen(opt);
246
247 return len == arglen && !strncmp(arg, opt, len);
248 }
249
250 /* The kernel command line selection for spectre v2 */
251 enum spectre_v2_mitigation_cmd {
252 SPECTRE_V2_CMD_NONE,
253 SPECTRE_V2_CMD_AUTO,
254 SPECTRE_V2_CMD_FORCE,
255 SPECTRE_V2_CMD_RETPOLINE,
256 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
257 SPECTRE_V2_CMD_RETPOLINE_AMD,
258 };
259
260 enum spectre_v2_user_cmd {
261 SPECTRE_V2_USER_CMD_NONE,
262 SPECTRE_V2_USER_CMD_AUTO,
263 SPECTRE_V2_USER_CMD_FORCE,
264 SPECTRE_V2_USER_CMD_PRCTL,
265 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
266 SPECTRE_V2_USER_CMD_SECCOMP,
267 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
268 };
269
270 static const char * const spectre_v2_user_strings[] = {
271 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
272 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
273 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
274 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
275 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
276 };
277
278 static const struct {
279 const char *option;
280 enum spectre_v2_user_cmd cmd;
281 bool secure;
282 } v2_user_options[] __initdata = {
283 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
284 { "off", SPECTRE_V2_USER_CMD_NONE, false },
285 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
286 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
287 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
288 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
289 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
290 };
291
292 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
293 {
294 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
295 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
296 }
297
298 static enum spectre_v2_user_cmd __init
299 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
300 {
301 char arg[20];
302 int ret, i;
303
304 switch (v2_cmd) {
305 case SPECTRE_V2_CMD_NONE:
306 return SPECTRE_V2_USER_CMD_NONE;
307 case SPECTRE_V2_CMD_FORCE:
308 return SPECTRE_V2_USER_CMD_FORCE;
309 default:
310 break;
311 }
312
313 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
314 arg, sizeof(arg));
315 if (ret < 0)
316 return SPECTRE_V2_USER_CMD_AUTO;
317
318 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
319 if (match_option(arg, ret, v2_user_options[i].option)) {
320 spec_v2_user_print_cond(v2_user_options[i].option,
321 v2_user_options[i].secure);
322 return v2_user_options[i].cmd;
323 }
324 }
325
326 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
327 return SPECTRE_V2_USER_CMD_AUTO;
328 }
329
330 static void __init
331 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
332 {
333 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
334 bool smt_possible = IS_ENABLED(CONFIG_SMP);
335 enum spectre_v2_user_cmd cmd;
336
337 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
338 return;
339
340 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
341 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
342 smt_possible = false;
343
344 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
345 switch (cmd) {
346 case SPECTRE_V2_USER_CMD_NONE:
347 goto set_mode;
348 case SPECTRE_V2_USER_CMD_FORCE:
349 mode = SPECTRE_V2_USER_STRICT;
350 break;
351 case SPECTRE_V2_USER_CMD_PRCTL:
352 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
353 mode = SPECTRE_V2_USER_PRCTL;
354 break;
355 case SPECTRE_V2_USER_CMD_AUTO:
356 case SPECTRE_V2_USER_CMD_SECCOMP:
357 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
358 if (IS_ENABLED(CONFIG_SECCOMP))
359 mode = SPECTRE_V2_USER_SECCOMP;
360 else
361 mode = SPECTRE_V2_USER_PRCTL;
362 break;
363 }
364
365 /*
366 * At this point, an STIBP mode other than "off" has been set.
367 * If STIBP support is not being forced, check if STIBP always-on
368 * is preferred.
369 */
370 if (mode != SPECTRE_V2_USER_STRICT &&
371 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
372 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
373
374 /* Initialize Indirect Branch Prediction Barrier */
375 if (boot_cpu_has(X86_FEATURE_IBPB)) {
376 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
377
378 switch (cmd) {
379 case SPECTRE_V2_USER_CMD_FORCE:
380 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
381 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
382 static_branch_enable(&switch_mm_always_ibpb);
383 break;
384 case SPECTRE_V2_USER_CMD_PRCTL:
385 case SPECTRE_V2_USER_CMD_AUTO:
386 case SPECTRE_V2_USER_CMD_SECCOMP:
387 static_branch_enable(&switch_mm_cond_ibpb);
388 break;
389 default:
390 break;
391 }
392
393 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
394 static_key_enabled(&switch_mm_always_ibpb) ?
395 "always-on" : "conditional");
396 }
397
398 /* If enhanced IBRS is enabled no STIBP required */
399 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
400 return;
401
402 /*
403 * If SMT is not possible or STIBP is not available clear the STIBP
404 * mode.
405 */
406 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
407 mode = SPECTRE_V2_USER_NONE;
408 set_mode:
409 spectre_v2_user = mode;
410 /* Only print the STIBP mode when SMT possible */
411 if (smt_possible)
412 pr_info("%s\n", spectre_v2_user_strings[mode]);
413 }
414
415 static const char * const spectre_v2_strings[] = {
416 [SPECTRE_V2_NONE] = "Vulnerable",
417 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
418 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
419 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
420 };
421
422 static const struct {
423 const char *option;
424 enum spectre_v2_mitigation_cmd cmd;
425 bool secure;
426 } mitigation_options[] __initdata = {
427 { "off", SPECTRE_V2_CMD_NONE, false },
428 { "on", SPECTRE_V2_CMD_FORCE, true },
429 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
430 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
431 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
432 { "auto", SPECTRE_V2_CMD_AUTO, false },
433 };
434
435 static void __init spec_v2_print_cond(const char *reason, bool secure)
436 {
437 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
438 pr_info("%s selected on command line.\n", reason);
439 }
440
441 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
442 {
443 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
444 char arg[20];
445 int ret, i;
446
447 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
448 return SPECTRE_V2_CMD_NONE;
449
450 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
451 if (ret < 0)
452 return SPECTRE_V2_CMD_AUTO;
453
454 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
455 if (!match_option(arg, ret, mitigation_options[i].option))
456 continue;
457 cmd = mitigation_options[i].cmd;
458 break;
459 }
460
461 if (i >= ARRAY_SIZE(mitigation_options)) {
462 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
463 return SPECTRE_V2_CMD_AUTO;
464 }
465
466 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
467 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
468 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
469 !IS_ENABLED(CONFIG_RETPOLINE)) {
470 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
471 return SPECTRE_V2_CMD_AUTO;
472 }
473
474 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
475 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
476 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
477 return SPECTRE_V2_CMD_AUTO;
478 }
479
480 spec_v2_print_cond(mitigation_options[i].option,
481 mitigation_options[i].secure);
482 return cmd;
483 }
484
485 static void __init spectre_v2_select_mitigation(void)
486 {
487 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
488 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
489
490 /*
491 * If the CPU is not affected and the command line mode is NONE or AUTO
492 * then nothing to do.
493 */
494 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
495 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
496 return;
497
498 switch (cmd) {
499 case SPECTRE_V2_CMD_NONE:
500 return;
501
502 case SPECTRE_V2_CMD_FORCE:
503 case SPECTRE_V2_CMD_AUTO:
504 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
505 mode = SPECTRE_V2_IBRS_ENHANCED;
506 /* Force it so VMEXIT will restore correctly */
507 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
508 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
509 goto specv2_set_mode;
510 }
511 if (IS_ENABLED(CONFIG_RETPOLINE))
512 goto retpoline_auto;
513 break;
514 case SPECTRE_V2_CMD_RETPOLINE_AMD:
515 if (IS_ENABLED(CONFIG_RETPOLINE))
516 goto retpoline_amd;
517 break;
518 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
519 if (IS_ENABLED(CONFIG_RETPOLINE))
520 goto retpoline_generic;
521 break;
522 case SPECTRE_V2_CMD_RETPOLINE:
523 if (IS_ENABLED(CONFIG_RETPOLINE))
524 goto retpoline_auto;
525 break;
526 }
527 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
528 return;
529
530 retpoline_auto:
531 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
532 retpoline_amd:
533 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
534 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
535 goto retpoline_generic;
536 }
537 mode = SPECTRE_V2_RETPOLINE_AMD;
538 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
539 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
540 } else {
541 retpoline_generic:
542 mode = SPECTRE_V2_RETPOLINE_GENERIC;
543 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
544 }
545
546 specv2_set_mode:
547 spectre_v2_enabled = mode;
548 pr_info("%s\n", spectre_v2_strings[mode]);
549
550 /*
551 * If spectre v2 protection has been enabled, unconditionally fill
552 * RSB during a context switch; this protects against two independent
553 * issues:
554 *
555 * - RSB underflow (and switch to BTB) on Skylake+
556 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
557 */
558 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
559 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
560
561 /*
562 * Retpoline means the kernel is safe because it has no indirect
563 * branches. Enhanced IBRS protects firmware too, so, enable restricted
564 * speculation around firmware calls only when Enhanced IBRS isn't
565 * supported.
566 *
567 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
568 * the user might select retpoline on the kernel command line and if
569 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
570 * enable IBRS around firmware calls.
571 */
572 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
573 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
574 pr_info("Enabling Restricted Speculation for firmware calls\n");
575 }
576
577 /* Set up IBPB and STIBP depending on the general spectre V2 command */
578 spectre_v2_user_select_mitigation(cmd);
579
580 /* Enable STIBP if appropriate */
581 arch_smt_update();
582 }
583
584 static void update_stibp_msr(void * __unused)
585 {
586 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
587 }
588
589 /* Update x86_spec_ctrl_base in case SMT state changed. */
590 static void update_stibp_strict(void)
591 {
592 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
593
594 if (sched_smt_active())
595 mask |= SPEC_CTRL_STIBP;
596
597 if (mask == x86_spec_ctrl_base)
598 return;
599
600 pr_info("Update user space SMT mitigation: STIBP %s\n",
601 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
602 x86_spec_ctrl_base = mask;
603 on_each_cpu(update_stibp_msr, NULL, 1);
604 }
605
606 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
607 static void update_indir_branch_cond(void)
608 {
609 if (sched_smt_active())
610 static_branch_enable(&switch_to_cond_stibp);
611 else
612 static_branch_disable(&switch_to_cond_stibp);
613 }
614
615 void arch_smt_update(void)
616 {
617 /* Enhanced IBRS implies STIBP. No update required. */
618 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
619 return;
620
621 mutex_lock(&spec_ctrl_mutex);
622
623 switch (spectre_v2_user) {
624 case SPECTRE_V2_USER_NONE:
625 break;
626 case SPECTRE_V2_USER_STRICT:
627 case SPECTRE_V2_USER_STRICT_PREFERRED:
628 update_stibp_strict();
629 break;
630 case SPECTRE_V2_USER_PRCTL:
631 case SPECTRE_V2_USER_SECCOMP:
632 update_indir_branch_cond();
633 break;
634 }
635
636 mutex_unlock(&spec_ctrl_mutex);
637 }
638
639 #undef pr_fmt
640 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
641
642 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
643
644 /* The kernel command line selection */
645 enum ssb_mitigation_cmd {
646 SPEC_STORE_BYPASS_CMD_NONE,
647 SPEC_STORE_BYPASS_CMD_AUTO,
648 SPEC_STORE_BYPASS_CMD_ON,
649 SPEC_STORE_BYPASS_CMD_PRCTL,
650 SPEC_STORE_BYPASS_CMD_SECCOMP,
651 };
652
653 static const char * const ssb_strings[] = {
654 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
655 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
656 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
657 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
658 };
659
660 static const struct {
661 const char *option;
662 enum ssb_mitigation_cmd cmd;
663 } ssb_mitigation_options[] __initdata = {
664 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
665 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
666 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
667 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
668 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
669 };
670
671 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
672 {
673 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
674 char arg[20];
675 int ret, i;
676
677 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
678 return SPEC_STORE_BYPASS_CMD_NONE;
679 } else {
680 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
681 arg, sizeof(arg));
682 if (ret < 0)
683 return SPEC_STORE_BYPASS_CMD_AUTO;
684
685 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
686 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
687 continue;
688
689 cmd = ssb_mitigation_options[i].cmd;
690 break;
691 }
692
693 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
694 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
695 return SPEC_STORE_BYPASS_CMD_AUTO;
696 }
697 }
698
699 return cmd;
700 }
701
702 static enum ssb_mitigation __init __ssb_select_mitigation(void)
703 {
704 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
705 enum ssb_mitigation_cmd cmd;
706
707 if (!boot_cpu_has(X86_FEATURE_SSBD))
708 return mode;
709
710 cmd = ssb_parse_cmdline();
711 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
712 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
713 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
714 return mode;
715
716 switch (cmd) {
717 case SPEC_STORE_BYPASS_CMD_AUTO:
718 case SPEC_STORE_BYPASS_CMD_SECCOMP:
719 /*
720 * Choose prctl+seccomp as the default mode if seccomp is
721 * enabled.
722 */
723 if (IS_ENABLED(CONFIG_SECCOMP))
724 mode = SPEC_STORE_BYPASS_SECCOMP;
725 else
726 mode = SPEC_STORE_BYPASS_PRCTL;
727 break;
728 case SPEC_STORE_BYPASS_CMD_ON:
729 mode = SPEC_STORE_BYPASS_DISABLE;
730 break;
731 case SPEC_STORE_BYPASS_CMD_PRCTL:
732 mode = SPEC_STORE_BYPASS_PRCTL;
733 break;
734 case SPEC_STORE_BYPASS_CMD_NONE:
735 break;
736 }
737
738 /*
739 * We have three CPU feature flags that are in play here:
740 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
741 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
742 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
743 */
744 if (mode == SPEC_STORE_BYPASS_DISABLE) {
745 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
746 /*
747 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
748 * use a completely different MSR and bit dependent on family.
749 */
750 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
751 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
752 x86_amd_ssb_disable();
753 } else {
754 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
755 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
756 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
757 }
758 }
759
760 return mode;
761 }
762
763 static void ssb_select_mitigation(void)
764 {
765 ssb_mode = __ssb_select_mitigation();
766
767 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
768 pr_info("%s\n", ssb_strings[ssb_mode]);
769 }
770
771 #undef pr_fmt
772 #define pr_fmt(fmt) "Speculation prctl: " fmt
773
774 static void task_update_spec_tif(struct task_struct *tsk)
775 {
776 /* Force the update of the real TIF bits */
777 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
778
779 /*
780 * Immediately update the speculation control MSRs for the current
781 * task, but for a non-current task delay setting the CPU
782 * mitigation until it is scheduled next.
783 *
784 * This can only happen for SECCOMP mitigation. For PRCTL it's
785 * always the current task.
786 */
787 if (tsk == current)
788 speculation_ctrl_update_current();
789 }
790
791 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
792 {
793 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
794 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
795 return -ENXIO;
796
797 switch (ctrl) {
798 case PR_SPEC_ENABLE:
799 /* If speculation is force disabled, enable is not allowed */
800 if (task_spec_ssb_force_disable(task))
801 return -EPERM;
802 task_clear_spec_ssb_disable(task);
803 task_update_spec_tif(task);
804 break;
805 case PR_SPEC_DISABLE:
806 task_set_spec_ssb_disable(task);
807 task_update_spec_tif(task);
808 break;
809 case PR_SPEC_FORCE_DISABLE:
810 task_set_spec_ssb_disable(task);
811 task_set_spec_ssb_force_disable(task);
812 task_update_spec_tif(task);
813 break;
814 default:
815 return -ERANGE;
816 }
817 return 0;
818 }
819
820 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
821 {
822 switch (ctrl) {
823 case PR_SPEC_ENABLE:
824 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
825 return 0;
826 /*
827 * Indirect branch speculation is always disabled in strict
828 * mode.
829 */
830 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
831 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
832 return -EPERM;
833 task_clear_spec_ib_disable(task);
834 task_update_spec_tif(task);
835 break;
836 case PR_SPEC_DISABLE:
837 case PR_SPEC_FORCE_DISABLE:
838 /*
839 * Indirect branch speculation is always allowed when
840 * mitigation is force disabled.
841 */
842 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
843 return -EPERM;
844 if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
845 spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
846 return 0;
847 task_set_spec_ib_disable(task);
848 if (ctrl == PR_SPEC_FORCE_DISABLE)
849 task_set_spec_ib_force_disable(task);
850 task_update_spec_tif(task);
851 break;
852 default:
853 return -ERANGE;
854 }
855 return 0;
856 }
857
858 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
859 unsigned long ctrl)
860 {
861 switch (which) {
862 case PR_SPEC_STORE_BYPASS:
863 return ssb_prctl_set(task, ctrl);
864 case PR_SPEC_INDIRECT_BRANCH:
865 return ib_prctl_set(task, ctrl);
866 default:
867 return -ENODEV;
868 }
869 }
870
871 #ifdef CONFIG_SECCOMP
872 void arch_seccomp_spec_mitigate(struct task_struct *task)
873 {
874 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
875 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
876 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
877 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
878 }
879 #endif
880
881 static int ssb_prctl_get(struct task_struct *task)
882 {
883 switch (ssb_mode) {
884 case SPEC_STORE_BYPASS_DISABLE:
885 return PR_SPEC_DISABLE;
886 case SPEC_STORE_BYPASS_SECCOMP:
887 case SPEC_STORE_BYPASS_PRCTL:
888 if (task_spec_ssb_force_disable(task))
889 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
890 if (task_spec_ssb_disable(task))
891 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
892 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
893 default:
894 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
895 return PR_SPEC_ENABLE;
896 return PR_SPEC_NOT_AFFECTED;
897 }
898 }
899
900 static int ib_prctl_get(struct task_struct *task)
901 {
902 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
903 return PR_SPEC_NOT_AFFECTED;
904
905 switch (spectre_v2_user) {
906 case SPECTRE_V2_USER_NONE:
907 return PR_SPEC_ENABLE;
908 case SPECTRE_V2_USER_PRCTL:
909 case SPECTRE_V2_USER_SECCOMP:
910 if (task_spec_ib_force_disable(task))
911 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
912 if (task_spec_ib_disable(task))
913 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
914 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
915 case SPECTRE_V2_USER_STRICT:
916 case SPECTRE_V2_USER_STRICT_PREFERRED:
917 return PR_SPEC_DISABLE;
918 default:
919 return PR_SPEC_NOT_AFFECTED;
920 }
921 }
922
923 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
924 {
925 switch (which) {
926 case PR_SPEC_STORE_BYPASS:
927 return ssb_prctl_get(task);
928 case PR_SPEC_INDIRECT_BRANCH:
929 return ib_prctl_get(task);
930 default:
931 return -ENODEV;
932 }
933 }
934
935 void x86_spec_ctrl_setup_ap(void)
936 {
937 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
938 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
939
940 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
941 x86_amd_ssb_disable();
942 }
943
944 #undef pr_fmt
945 #define pr_fmt(fmt) "L1TF: " fmt
946
947 /* Default mitigation for L1TF-affected CPUs */
948 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
949 #if IS_ENABLED(CONFIG_KVM_INTEL)
950 EXPORT_SYMBOL_GPL(l1tf_mitigation);
951
952 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
953 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
954 #endif
955
956 /*
957 * These CPUs all support 44bits physical address space internally in the
958 * cache but CPUID can report a smaller number of physical address bits.
959 *
960 * The L1TF mitigation uses the top most address bit for the inversion of
961 * non present PTEs. When the installed memory reaches into the top most
962 * address bit due to memory holes, which has been observed on machines
963 * which report 36bits physical address bits and have 32G RAM installed,
964 * then the mitigation range check in l1tf_select_mitigation() triggers.
965 * This is a false positive because the mitigation is still possible due to
966 * the fact that the cache uses 44bit internally. Use the cache bits
967 * instead of the reported physical bits and adjust them on the affected
968 * machines to 44bit if the reported bits are less than 44.
969 */
970 static void override_cache_bits(struct cpuinfo_x86 *c)
971 {
972 if (c->x86 != 6)
973 return;
974
975 switch (c->x86_model) {
976 case INTEL_FAM6_NEHALEM:
977 case INTEL_FAM6_WESTMERE:
978 case INTEL_FAM6_SANDYBRIDGE:
979 case INTEL_FAM6_IVYBRIDGE:
980 case INTEL_FAM6_HASWELL_CORE:
981 case INTEL_FAM6_HASWELL_ULT:
982 case INTEL_FAM6_HASWELL_GT3E:
983 case INTEL_FAM6_BROADWELL_CORE:
984 case INTEL_FAM6_BROADWELL_GT3E:
985 case INTEL_FAM6_SKYLAKE_MOBILE:
986 case INTEL_FAM6_SKYLAKE_DESKTOP:
987 case INTEL_FAM6_KABYLAKE_MOBILE:
988 case INTEL_FAM6_KABYLAKE_DESKTOP:
989 if (c->x86_cache_bits < 44)
990 c->x86_cache_bits = 44;
991 break;
992 }
993 }
994
995 static void __init l1tf_select_mitigation(void)
996 {
997 u64 half_pa;
998
999 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1000 return;
1001
1002 override_cache_bits(&boot_cpu_data);
1003
1004 switch (l1tf_mitigation) {
1005 case L1TF_MITIGATION_OFF:
1006 case L1TF_MITIGATION_FLUSH_NOWARN:
1007 case L1TF_MITIGATION_FLUSH:
1008 break;
1009 case L1TF_MITIGATION_FLUSH_NOSMT:
1010 case L1TF_MITIGATION_FULL:
1011 cpu_smt_disable(false);
1012 break;
1013 case L1TF_MITIGATION_FULL_FORCE:
1014 cpu_smt_disable(true);
1015 break;
1016 }
1017
1018 #if CONFIG_PGTABLE_LEVELS == 2
1019 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1020 return;
1021 #endif
1022
1023 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1024 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1025 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1026 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1027 half_pa);
1028 pr_info("However, doing so will make a part of your RAM unusable.\n");
1029 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
1030 return;
1031 }
1032
1033 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1034 }
1035
1036 static int __init l1tf_cmdline(char *str)
1037 {
1038 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1039 return 0;
1040
1041 if (!str)
1042 return -EINVAL;
1043
1044 if (!strcmp(str, "off"))
1045 l1tf_mitigation = L1TF_MITIGATION_OFF;
1046 else if (!strcmp(str, "flush,nowarn"))
1047 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1048 else if (!strcmp(str, "flush"))
1049 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1050 else if (!strcmp(str, "flush,nosmt"))
1051 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1052 else if (!strcmp(str, "full"))
1053 l1tf_mitigation = L1TF_MITIGATION_FULL;
1054 else if (!strcmp(str, "full,force"))
1055 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1056
1057 return 0;
1058 }
1059 early_param("l1tf", l1tf_cmdline);
1060
1061 #undef pr_fmt
1062
1063 #ifdef CONFIG_SYSFS
1064
1065 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1066
1067 #if IS_ENABLED(CONFIG_KVM_INTEL)
1068 static const char * const l1tf_vmx_states[] = {
1069 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1070 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1071 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1072 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1073 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1074 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1075 };
1076
1077 static ssize_t l1tf_show_state(char *buf)
1078 {
1079 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1080 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1081
1082 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1083 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1084 sched_smt_active())) {
1085 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1086 l1tf_vmx_states[l1tf_vmx_mitigation]);
1087 }
1088
1089 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1090 l1tf_vmx_states[l1tf_vmx_mitigation],
1091 sched_smt_active() ? "vulnerable" : "disabled");
1092 }
1093 #else
1094 static ssize_t l1tf_show_state(char *buf)
1095 {
1096 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1097 }
1098 #endif
1099
1100 static char *stibp_state(void)
1101 {
1102 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1103 return "";
1104
1105 switch (spectre_v2_user) {
1106 case SPECTRE_V2_USER_NONE:
1107 return ", STIBP: disabled";
1108 case SPECTRE_V2_USER_STRICT:
1109 return ", STIBP: forced";
1110 case SPECTRE_V2_USER_STRICT_PREFERRED:
1111 return ", STIBP: always-on";
1112 case SPECTRE_V2_USER_PRCTL:
1113 case SPECTRE_V2_USER_SECCOMP:
1114 if (static_key_enabled(&switch_to_cond_stibp))
1115 return ", STIBP: conditional";
1116 }
1117 return "";
1118 }
1119
1120 static char *ibpb_state(void)
1121 {
1122 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1123 if (static_key_enabled(&switch_mm_always_ibpb))
1124 return ", IBPB: always-on";
1125 if (static_key_enabled(&switch_mm_cond_ibpb))
1126 return ", IBPB: conditional";
1127 return ", IBPB: disabled";
1128 }
1129 return "";
1130 }
1131
1132 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1133 char *buf, unsigned int bug)
1134 {
1135 if (!boot_cpu_has_bug(bug))
1136 return sprintf(buf, "Not affected\n");
1137
1138 switch (bug) {
1139 case X86_BUG_CPU_MELTDOWN:
1140 if (boot_cpu_has(X86_FEATURE_PTI))
1141 return sprintf(buf, "Mitigation: PTI\n");
1142
1143 break;
1144
1145 case X86_BUG_SPECTRE_V1:
1146 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1147
1148 case X86_BUG_SPECTRE_V2:
1149 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1150 ibpb_state(),
1151 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1152 stibp_state(),
1153 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1154 spectre_v2_module_string());
1155
1156 case X86_BUG_SPEC_STORE_BYPASS:
1157 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1158
1159 case X86_BUG_L1TF:
1160 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1161 return l1tf_show_state(buf);
1162 break;
1163 default:
1164 break;
1165 }
1166
1167 return sprintf(buf, "Vulnerable\n");
1168 }
1169
1170 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1171 {
1172 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1173 }
1174
1175 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1176 {
1177 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1178 }
1179
1180 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1181 {
1182 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1183 }
1184
1185 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1186 {
1187 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1188 }
1189
1190 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1191 {
1192 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1193 }
1194 #endif