]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/cpu/bugs.c
x86/bugs: Optimize SPEC_CTRL MSR writes
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18 #include <linux/pgtable.h>
19 #include <linux/bpf.h>
20
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
23 #include <asm/bugs.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
27 #include <asm/msr.h>
28 #include <asm/vmx.h>
29 #include <asm/paravirt.h>
30 #include <asm/alternative.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <asm/hypervisor.h>
35 #include <asm/tlbflush.h>
36
37 #include "cpu.h"
38
39 static void __init spectre_v1_select_mitigation(void);
40 static void __init retbleed_select_mitigation(void);
41 static void __init spectre_v2_select_mitigation(void);
42 static void __init ssb_select_mitigation(void);
43 static void __init l1tf_select_mitigation(void);
44 static void __init mds_select_mitigation(void);
45 static void __init md_clear_update_mitigation(void);
46 static void __init md_clear_select_mitigation(void);
47 static void __init taa_select_mitigation(void);
48 static void __init mmio_select_mitigation(void);
49 static void __init srbds_select_mitigation(void);
50 static void __init l1d_flush_select_mitigation(void);
51
52 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
53 u64 x86_spec_ctrl_base;
54 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
55
56 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
57 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
58 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
59
60 static DEFINE_MUTEX(spec_ctrl_mutex);
61
62 /*
63 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
64 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
65 */
66 void write_spec_ctrl_current(u64 val, bool force)
67 {
68 if (this_cpu_read(x86_spec_ctrl_current) == val)
69 return;
70
71 this_cpu_write(x86_spec_ctrl_current, val);
72
73 /*
74 * When KERNEL_IBRS this MSR is written on return-to-user, unless
75 * forced the update can be delayed until that time.
76 */
77 if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
78 wrmsrl(MSR_IA32_SPEC_CTRL, val);
79 }
80
81 /*
82 * The vendor and possibly platform specific bits which can be modified in
83 * x86_spec_ctrl_base.
84 */
85 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
86
87 /*
88 * AMD specific MSR info for Speculative Store Bypass control.
89 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
90 */
91 u64 __ro_after_init x86_amd_ls_cfg_base;
92 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
93
94 /* Control conditional STIBP in switch_to() */
95 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
96 /* Control conditional IBPB in switch_mm() */
97 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
98 /* Control unconditional IBPB in switch_mm() */
99 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
100
101 /* Control MDS CPU buffer clear before returning to user space */
102 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
103 EXPORT_SYMBOL_GPL(mds_user_clear);
104 /* Control MDS CPU buffer clear before idling (halt, mwait) */
105 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
106 EXPORT_SYMBOL_GPL(mds_idle_clear);
107
108 /*
109 * Controls whether l1d flush based mitigations are enabled,
110 * based on hw features and admin setting via boot parameter
111 * defaults to false
112 */
113 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
114
115 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
116 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
117 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
118
119 void __init check_bugs(void)
120 {
121 identify_boot_cpu();
122
123 /*
124 * identify_boot_cpu() initialized SMT support information, let the
125 * core code know.
126 */
127 cpu_smt_check_topology();
128
129 if (!IS_ENABLED(CONFIG_SMP)) {
130 pr_info("CPU: ");
131 print_cpu_info(&boot_cpu_data);
132 }
133
134 /*
135 * Read the SPEC_CTRL MSR to account for reserved bits which may
136 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
137 * init code as it is not enumerated and depends on the family.
138 */
139 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
140 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
141
142 /* Allow STIBP in MSR_SPEC_CTRL if supported */
143 if (boot_cpu_has(X86_FEATURE_STIBP))
144 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
145
146 /* Select the proper CPU mitigations before patching alternatives: */
147 spectre_v1_select_mitigation();
148 retbleed_select_mitigation();
149 /*
150 * spectre_v2_select_mitigation() relies on the state set by
151 * retbleed_select_mitigation(); specifically the STIBP selection is
152 * forced for UNRET.
153 */
154 spectre_v2_select_mitigation();
155 ssb_select_mitigation();
156 l1tf_select_mitigation();
157 md_clear_select_mitigation();
158 srbds_select_mitigation();
159 l1d_flush_select_mitigation();
160
161 arch_smt_update();
162
163 #ifdef CONFIG_X86_32
164 /*
165 * Check whether we are able to run this kernel safely on SMP.
166 *
167 * - i386 is no longer supported.
168 * - In order to run on anything without a TSC, we need to be
169 * compiled for a i486.
170 */
171 if (boot_cpu_data.x86 < 4)
172 panic("Kernel requires i486+ for 'invlpg' and other features");
173
174 init_utsname()->machine[1] =
175 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
176 alternative_instructions();
177
178 fpu__init_check_bugs();
179 #else /* CONFIG_X86_64 */
180 alternative_instructions();
181
182 /*
183 * Make sure the first 2MB area is not mapped by huge pages
184 * There are typically fixed size MTRRs in there and overlapping
185 * MTRRs into large pages causes slow downs.
186 *
187 * Right now we don't do that with gbpages because there seems
188 * very little benefit for that case.
189 */
190 if (!direct_gbpages)
191 set_memory_4k((unsigned long)__va(0), 1);
192 #endif
193 }
194
195 void
196 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
197 {
198 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
199 struct thread_info *ti = current_thread_info();
200
201 /* Is MSR_SPEC_CTRL implemented ? */
202 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
203 /*
204 * Restrict guest_spec_ctrl to supported values. Clear the
205 * modifiable bits in the host base value and or the
206 * modifiable bits from the guest value.
207 */
208 guestval = hostval & ~x86_spec_ctrl_mask;
209 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
210
211 /* SSBD controlled in MSR_SPEC_CTRL */
212 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
213 static_cpu_has(X86_FEATURE_AMD_SSBD))
214 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
215
216 /* Conditional STIBP enabled? */
217 if (static_branch_unlikely(&switch_to_cond_stibp))
218 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
219
220 if (hostval != guestval) {
221 msrval = setguest ? guestval : hostval;
222 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
223 }
224 }
225
226 /*
227 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
228 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
229 */
230 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
231 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
232 return;
233
234 /*
235 * If the host has SSBD mitigation enabled, force it in the host's
236 * virtual MSR value. If its not permanently enabled, evaluate
237 * current's TIF_SSBD thread flag.
238 */
239 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
240 hostval = SPEC_CTRL_SSBD;
241 else
242 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
243
244 /* Sanitize the guest value */
245 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
246
247 if (hostval != guestval) {
248 unsigned long tif;
249
250 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
251 ssbd_spec_ctrl_to_tif(hostval);
252
253 speculation_ctrl_update(tif);
254 }
255 }
256 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
257
258 static void x86_amd_ssb_disable(void)
259 {
260 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
261
262 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
263 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
264 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
265 wrmsrl(MSR_AMD64_LS_CFG, msrval);
266 }
267
268 #undef pr_fmt
269 #define pr_fmt(fmt) "MDS: " fmt
270
271 /* Default mitigation for MDS-affected CPUs */
272 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
273 static bool mds_nosmt __ro_after_init = false;
274
275 static const char * const mds_strings[] = {
276 [MDS_MITIGATION_OFF] = "Vulnerable",
277 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
278 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
279 };
280
281 static void __init mds_select_mitigation(void)
282 {
283 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
284 mds_mitigation = MDS_MITIGATION_OFF;
285 return;
286 }
287
288 if (mds_mitigation == MDS_MITIGATION_FULL) {
289 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
290 mds_mitigation = MDS_MITIGATION_VMWERV;
291
292 static_branch_enable(&mds_user_clear);
293
294 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
295 (mds_nosmt || cpu_mitigations_auto_nosmt()))
296 cpu_smt_disable(false);
297 }
298 }
299
300 static int __init mds_cmdline(char *str)
301 {
302 if (!boot_cpu_has_bug(X86_BUG_MDS))
303 return 0;
304
305 if (!str)
306 return -EINVAL;
307
308 if (!strcmp(str, "off"))
309 mds_mitigation = MDS_MITIGATION_OFF;
310 else if (!strcmp(str, "full"))
311 mds_mitigation = MDS_MITIGATION_FULL;
312 else if (!strcmp(str, "full,nosmt")) {
313 mds_mitigation = MDS_MITIGATION_FULL;
314 mds_nosmt = true;
315 }
316
317 return 0;
318 }
319 early_param("mds", mds_cmdline);
320
321 #undef pr_fmt
322 #define pr_fmt(fmt) "TAA: " fmt
323
324 enum taa_mitigations {
325 TAA_MITIGATION_OFF,
326 TAA_MITIGATION_UCODE_NEEDED,
327 TAA_MITIGATION_VERW,
328 TAA_MITIGATION_TSX_DISABLED,
329 };
330
331 /* Default mitigation for TAA-affected CPUs */
332 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
333 static bool taa_nosmt __ro_after_init;
334
335 static const char * const taa_strings[] = {
336 [TAA_MITIGATION_OFF] = "Vulnerable",
337 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
338 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
339 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
340 };
341
342 static void __init taa_select_mitigation(void)
343 {
344 u64 ia32_cap;
345
346 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
347 taa_mitigation = TAA_MITIGATION_OFF;
348 return;
349 }
350
351 /* TSX previously disabled by tsx=off */
352 if (!boot_cpu_has(X86_FEATURE_RTM)) {
353 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
354 return;
355 }
356
357 if (cpu_mitigations_off()) {
358 taa_mitigation = TAA_MITIGATION_OFF;
359 return;
360 }
361
362 /*
363 * TAA mitigation via VERW is turned off if both
364 * tsx_async_abort=off and mds=off are specified.
365 */
366 if (taa_mitigation == TAA_MITIGATION_OFF &&
367 mds_mitigation == MDS_MITIGATION_OFF)
368 return;
369
370 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
371 taa_mitigation = TAA_MITIGATION_VERW;
372 else
373 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
374
375 /*
376 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
377 * A microcode update fixes this behavior to clear CPU buffers. It also
378 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
379 * ARCH_CAP_TSX_CTRL_MSR bit.
380 *
381 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
382 * update is required.
383 */
384 ia32_cap = x86_read_arch_cap_msr();
385 if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
386 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
387 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
388
389 /*
390 * TSX is enabled, select alternate mitigation for TAA which is
391 * the same as MDS. Enable MDS static branch to clear CPU buffers.
392 *
393 * For guests that can't determine whether the correct microcode is
394 * present on host, enable the mitigation for UCODE_NEEDED as well.
395 */
396 static_branch_enable(&mds_user_clear);
397
398 if (taa_nosmt || cpu_mitigations_auto_nosmt())
399 cpu_smt_disable(false);
400 }
401
402 static int __init tsx_async_abort_parse_cmdline(char *str)
403 {
404 if (!boot_cpu_has_bug(X86_BUG_TAA))
405 return 0;
406
407 if (!str)
408 return -EINVAL;
409
410 if (!strcmp(str, "off")) {
411 taa_mitigation = TAA_MITIGATION_OFF;
412 } else if (!strcmp(str, "full")) {
413 taa_mitigation = TAA_MITIGATION_VERW;
414 } else if (!strcmp(str, "full,nosmt")) {
415 taa_mitigation = TAA_MITIGATION_VERW;
416 taa_nosmt = true;
417 }
418
419 return 0;
420 }
421 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
422
423 #undef pr_fmt
424 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
425
426 enum mmio_mitigations {
427 MMIO_MITIGATION_OFF,
428 MMIO_MITIGATION_UCODE_NEEDED,
429 MMIO_MITIGATION_VERW,
430 };
431
432 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
433 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
434 static bool mmio_nosmt __ro_after_init = false;
435
436 static const char * const mmio_strings[] = {
437 [MMIO_MITIGATION_OFF] = "Vulnerable",
438 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
439 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
440 };
441
442 static void __init mmio_select_mitigation(void)
443 {
444 u64 ia32_cap;
445
446 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
447 cpu_mitigations_off()) {
448 mmio_mitigation = MMIO_MITIGATION_OFF;
449 return;
450 }
451
452 if (mmio_mitigation == MMIO_MITIGATION_OFF)
453 return;
454
455 ia32_cap = x86_read_arch_cap_msr();
456
457 /*
458 * Enable CPU buffer clear mitigation for host and VMM, if also affected
459 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
460 */
461 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
462 boot_cpu_has(X86_FEATURE_RTM)))
463 static_branch_enable(&mds_user_clear);
464 else
465 static_branch_enable(&mmio_stale_data_clear);
466
467 /*
468 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
469 * be propagated to uncore buffers, clearing the Fill buffers on idle
470 * is required irrespective of SMT state.
471 */
472 if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
473 static_branch_enable(&mds_idle_clear);
474
475 /*
476 * Check if the system has the right microcode.
477 *
478 * CPU Fill buffer clear mitigation is enumerated by either an explicit
479 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
480 * affected systems.
481 */
482 if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
483 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
484 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
485 !(ia32_cap & ARCH_CAP_MDS_NO)))
486 mmio_mitigation = MMIO_MITIGATION_VERW;
487 else
488 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
489
490 if (mmio_nosmt || cpu_mitigations_auto_nosmt())
491 cpu_smt_disable(false);
492 }
493
494 static int __init mmio_stale_data_parse_cmdline(char *str)
495 {
496 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
497 return 0;
498
499 if (!str)
500 return -EINVAL;
501
502 if (!strcmp(str, "off")) {
503 mmio_mitigation = MMIO_MITIGATION_OFF;
504 } else if (!strcmp(str, "full")) {
505 mmio_mitigation = MMIO_MITIGATION_VERW;
506 } else if (!strcmp(str, "full,nosmt")) {
507 mmio_mitigation = MMIO_MITIGATION_VERW;
508 mmio_nosmt = true;
509 }
510
511 return 0;
512 }
513 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
514
515 #undef pr_fmt
516 #define pr_fmt(fmt) "" fmt
517
518 static void __init md_clear_update_mitigation(void)
519 {
520 if (cpu_mitigations_off())
521 return;
522
523 if (!static_key_enabled(&mds_user_clear))
524 goto out;
525
526 /*
527 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
528 * mitigation, if necessary.
529 */
530 if (mds_mitigation == MDS_MITIGATION_OFF &&
531 boot_cpu_has_bug(X86_BUG_MDS)) {
532 mds_mitigation = MDS_MITIGATION_FULL;
533 mds_select_mitigation();
534 }
535 if (taa_mitigation == TAA_MITIGATION_OFF &&
536 boot_cpu_has_bug(X86_BUG_TAA)) {
537 taa_mitigation = TAA_MITIGATION_VERW;
538 taa_select_mitigation();
539 }
540 if (mmio_mitigation == MMIO_MITIGATION_OFF &&
541 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
542 mmio_mitigation = MMIO_MITIGATION_VERW;
543 mmio_select_mitigation();
544 }
545 out:
546 if (boot_cpu_has_bug(X86_BUG_MDS))
547 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
548 if (boot_cpu_has_bug(X86_BUG_TAA))
549 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
550 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
551 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
552 }
553
554 static void __init md_clear_select_mitigation(void)
555 {
556 mds_select_mitigation();
557 taa_select_mitigation();
558 mmio_select_mitigation();
559
560 /*
561 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
562 * and print their mitigation after MDS, TAA and MMIO Stale Data
563 * mitigation selection is done.
564 */
565 md_clear_update_mitigation();
566 }
567
568 #undef pr_fmt
569 #define pr_fmt(fmt) "SRBDS: " fmt
570
571 enum srbds_mitigations {
572 SRBDS_MITIGATION_OFF,
573 SRBDS_MITIGATION_UCODE_NEEDED,
574 SRBDS_MITIGATION_FULL,
575 SRBDS_MITIGATION_TSX_OFF,
576 SRBDS_MITIGATION_HYPERVISOR,
577 };
578
579 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
580
581 static const char * const srbds_strings[] = {
582 [SRBDS_MITIGATION_OFF] = "Vulnerable",
583 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
584 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
585 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
586 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
587 };
588
589 static bool srbds_off;
590
591 void update_srbds_msr(void)
592 {
593 u64 mcu_ctrl;
594
595 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
596 return;
597
598 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
599 return;
600
601 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
602 return;
603
604 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
605
606 switch (srbds_mitigation) {
607 case SRBDS_MITIGATION_OFF:
608 case SRBDS_MITIGATION_TSX_OFF:
609 mcu_ctrl |= RNGDS_MITG_DIS;
610 break;
611 case SRBDS_MITIGATION_FULL:
612 mcu_ctrl &= ~RNGDS_MITG_DIS;
613 break;
614 default:
615 break;
616 }
617
618 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
619 }
620
621 static void __init srbds_select_mitigation(void)
622 {
623 u64 ia32_cap;
624
625 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
626 return;
627
628 /*
629 * Check to see if this is one of the MDS_NO systems supporting TSX that
630 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
631 * by Processor MMIO Stale Data vulnerability.
632 */
633 ia32_cap = x86_read_arch_cap_msr();
634 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
635 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
636 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
637 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
638 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
639 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
640 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
641 else if (cpu_mitigations_off() || srbds_off)
642 srbds_mitigation = SRBDS_MITIGATION_OFF;
643
644 update_srbds_msr();
645 pr_info("%s\n", srbds_strings[srbds_mitigation]);
646 }
647
648 static int __init srbds_parse_cmdline(char *str)
649 {
650 if (!str)
651 return -EINVAL;
652
653 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
654 return 0;
655
656 srbds_off = !strcmp(str, "off");
657 return 0;
658 }
659 early_param("srbds", srbds_parse_cmdline);
660
661 #undef pr_fmt
662 #define pr_fmt(fmt) "L1D Flush : " fmt
663
664 enum l1d_flush_mitigations {
665 L1D_FLUSH_OFF = 0,
666 L1D_FLUSH_ON,
667 };
668
669 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
670
671 static void __init l1d_flush_select_mitigation(void)
672 {
673 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
674 return;
675
676 static_branch_enable(&switch_mm_cond_l1d_flush);
677 pr_info("Conditional flush on switch_mm() enabled\n");
678 }
679
680 static int __init l1d_flush_parse_cmdline(char *str)
681 {
682 if (!strcmp(str, "on"))
683 l1d_flush_mitigation = L1D_FLUSH_ON;
684
685 return 0;
686 }
687 early_param("l1d_flush", l1d_flush_parse_cmdline);
688
689 #undef pr_fmt
690 #define pr_fmt(fmt) "Spectre V1 : " fmt
691
692 enum spectre_v1_mitigation {
693 SPECTRE_V1_MITIGATION_NONE,
694 SPECTRE_V1_MITIGATION_AUTO,
695 };
696
697 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
698 SPECTRE_V1_MITIGATION_AUTO;
699
700 static const char * const spectre_v1_strings[] = {
701 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
702 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
703 };
704
705 /*
706 * Does SMAP provide full mitigation against speculative kernel access to
707 * userspace?
708 */
709 static bool smap_works_speculatively(void)
710 {
711 if (!boot_cpu_has(X86_FEATURE_SMAP))
712 return false;
713
714 /*
715 * On CPUs which are vulnerable to Meltdown, SMAP does not
716 * prevent speculative access to user data in the L1 cache.
717 * Consider SMAP to be non-functional as a mitigation on these
718 * CPUs.
719 */
720 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
721 return false;
722
723 return true;
724 }
725
726 static void __init spectre_v1_select_mitigation(void)
727 {
728 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
729 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
730 return;
731 }
732
733 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
734 /*
735 * With Spectre v1, a user can speculatively control either
736 * path of a conditional swapgs with a user-controlled GS
737 * value. The mitigation is to add lfences to both code paths.
738 *
739 * If FSGSBASE is enabled, the user can put a kernel address in
740 * GS, in which case SMAP provides no protection.
741 *
742 * If FSGSBASE is disabled, the user can only put a user space
743 * address in GS. That makes an attack harder, but still
744 * possible if there's no SMAP protection.
745 */
746 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
747 !smap_works_speculatively()) {
748 /*
749 * Mitigation can be provided from SWAPGS itself or
750 * PTI as the CR3 write in the Meltdown mitigation
751 * is serializing.
752 *
753 * If neither is there, mitigate with an LFENCE to
754 * stop speculation through swapgs.
755 */
756 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
757 !boot_cpu_has(X86_FEATURE_PTI))
758 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
759
760 /*
761 * Enable lfences in the kernel entry (non-swapgs)
762 * paths, to prevent user entry from speculatively
763 * skipping swapgs.
764 */
765 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
766 }
767 }
768
769 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
770 }
771
772 static int __init nospectre_v1_cmdline(char *str)
773 {
774 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
775 return 0;
776 }
777 early_param("nospectre_v1", nospectre_v1_cmdline);
778
779 #undef pr_fmt
780 #define pr_fmt(fmt) "RETBleed: " fmt
781
782 enum retbleed_mitigation {
783 RETBLEED_MITIGATION_NONE,
784 RETBLEED_MITIGATION_UNRET,
785 };
786
787 enum retbleed_mitigation_cmd {
788 RETBLEED_CMD_OFF,
789 RETBLEED_CMD_AUTO,
790 RETBLEED_CMD_UNRET,
791 };
792
793 const char * const retbleed_strings[] = {
794 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
795 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
796 };
797
798 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
799 RETBLEED_MITIGATION_NONE;
800 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
801 RETBLEED_CMD_AUTO;
802
803 static int __ro_after_init retbleed_nosmt = false;
804
805 static int __init retbleed_parse_cmdline(char *str)
806 {
807 if (!str)
808 return -EINVAL;
809
810 while (str) {
811 char *next = strchr(str, ',');
812 if (next) {
813 *next = 0;
814 next++;
815 }
816
817 if (!strcmp(str, "off")) {
818 retbleed_cmd = RETBLEED_CMD_OFF;
819 } else if (!strcmp(str, "auto")) {
820 retbleed_cmd = RETBLEED_CMD_AUTO;
821 } else if (!strcmp(str, "unret")) {
822 retbleed_cmd = RETBLEED_CMD_UNRET;
823 } else if (!strcmp(str, "nosmt")) {
824 retbleed_nosmt = true;
825 } else {
826 pr_err("Ignoring unknown retbleed option (%s).", str);
827 }
828
829 str = next;
830 }
831
832 return 0;
833 }
834 early_param("retbleed", retbleed_parse_cmdline);
835
836 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
837 #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
838
839 static void __init retbleed_select_mitigation(void)
840 {
841 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
842 return;
843
844 switch (retbleed_cmd) {
845 case RETBLEED_CMD_OFF:
846 return;
847
848 case RETBLEED_CMD_UNRET:
849 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
850 break;
851
852 case RETBLEED_CMD_AUTO:
853 default:
854 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
855 break;
856
857 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
858 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
859 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
860 break;
861 }
862
863 switch (retbleed_mitigation) {
864 case RETBLEED_MITIGATION_UNRET:
865
866 if (!IS_ENABLED(CONFIG_RETPOLINE) ||
867 !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
868 pr_err(RETBLEED_COMPILER_MSG);
869 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
870 break;
871 }
872
873 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
874 setup_force_cpu_cap(X86_FEATURE_UNRET);
875
876 if (!boot_cpu_has(X86_FEATURE_STIBP) &&
877 (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
878 cpu_smt_disable(false);
879
880 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
881 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
882 pr_err(RETBLEED_UNTRAIN_MSG);
883 break;
884
885 default:
886 break;
887 }
888
889 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
890 }
891
892 #undef pr_fmt
893 #define pr_fmt(fmt) "Spectre V2 : " fmt
894
895 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
896 SPECTRE_V2_NONE;
897
898 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
899 SPECTRE_V2_USER_NONE;
900 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
901 SPECTRE_V2_USER_NONE;
902
903 #ifdef CONFIG_RETPOLINE
904 static bool spectre_v2_bad_module;
905
906 bool retpoline_module_ok(bool has_retpoline)
907 {
908 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
909 return true;
910
911 pr_err("System may be vulnerable to spectre v2\n");
912 spectre_v2_bad_module = true;
913 return false;
914 }
915
916 static inline const char *spectre_v2_module_string(void)
917 {
918 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
919 }
920 #else
921 static inline const char *spectre_v2_module_string(void) { return ""; }
922 #endif
923
924 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
925 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
926 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
927
928 #ifdef CONFIG_BPF_SYSCALL
929 void unpriv_ebpf_notify(int new_state)
930 {
931 if (new_state)
932 return;
933
934 /* Unprivileged eBPF is enabled */
935
936 switch (spectre_v2_enabled) {
937 case SPECTRE_V2_EIBRS:
938 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
939 break;
940 case SPECTRE_V2_EIBRS_LFENCE:
941 if (sched_smt_active())
942 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
943 break;
944 default:
945 break;
946 }
947 }
948 #endif
949
950 static inline bool match_option(const char *arg, int arglen, const char *opt)
951 {
952 int len = strlen(opt);
953
954 return len == arglen && !strncmp(arg, opt, len);
955 }
956
957 /* The kernel command line selection for spectre v2 */
958 enum spectre_v2_mitigation_cmd {
959 SPECTRE_V2_CMD_NONE,
960 SPECTRE_V2_CMD_AUTO,
961 SPECTRE_V2_CMD_FORCE,
962 SPECTRE_V2_CMD_RETPOLINE,
963 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
964 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
965 SPECTRE_V2_CMD_EIBRS,
966 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
967 SPECTRE_V2_CMD_EIBRS_LFENCE,
968 };
969
970 enum spectre_v2_user_cmd {
971 SPECTRE_V2_USER_CMD_NONE,
972 SPECTRE_V2_USER_CMD_AUTO,
973 SPECTRE_V2_USER_CMD_FORCE,
974 SPECTRE_V2_USER_CMD_PRCTL,
975 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
976 SPECTRE_V2_USER_CMD_SECCOMP,
977 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
978 };
979
980 static const char * const spectre_v2_user_strings[] = {
981 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
982 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
983 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
984 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
985 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
986 };
987
988 static const struct {
989 const char *option;
990 enum spectre_v2_user_cmd cmd;
991 bool secure;
992 } v2_user_options[] __initconst = {
993 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
994 { "off", SPECTRE_V2_USER_CMD_NONE, false },
995 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
996 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
997 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
998 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
999 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
1000 };
1001
1002 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1003 {
1004 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1005 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1006 }
1007
1008 static enum spectre_v2_user_cmd __init
1009 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
1010 {
1011 char arg[20];
1012 int ret, i;
1013
1014 switch (v2_cmd) {
1015 case SPECTRE_V2_CMD_NONE:
1016 return SPECTRE_V2_USER_CMD_NONE;
1017 case SPECTRE_V2_CMD_FORCE:
1018 return SPECTRE_V2_USER_CMD_FORCE;
1019 default:
1020 break;
1021 }
1022
1023 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1024 arg, sizeof(arg));
1025 if (ret < 0)
1026 return SPECTRE_V2_USER_CMD_AUTO;
1027
1028 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1029 if (match_option(arg, ret, v2_user_options[i].option)) {
1030 spec_v2_user_print_cond(v2_user_options[i].option,
1031 v2_user_options[i].secure);
1032 return v2_user_options[i].cmd;
1033 }
1034 }
1035
1036 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
1037 return SPECTRE_V2_USER_CMD_AUTO;
1038 }
1039
1040 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
1041 {
1042 return (mode == SPECTRE_V2_EIBRS ||
1043 mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1044 mode == SPECTRE_V2_EIBRS_LFENCE);
1045 }
1046
1047 static void __init
1048 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
1049 {
1050 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
1051 bool smt_possible = IS_ENABLED(CONFIG_SMP);
1052 enum spectre_v2_user_cmd cmd;
1053
1054 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1055 return;
1056
1057 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
1058 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
1059 smt_possible = false;
1060
1061 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
1062 switch (cmd) {
1063 case SPECTRE_V2_USER_CMD_NONE:
1064 goto set_mode;
1065 case SPECTRE_V2_USER_CMD_FORCE:
1066 mode = SPECTRE_V2_USER_STRICT;
1067 break;
1068 case SPECTRE_V2_USER_CMD_PRCTL:
1069 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1070 mode = SPECTRE_V2_USER_PRCTL;
1071 break;
1072 case SPECTRE_V2_USER_CMD_AUTO:
1073 case SPECTRE_V2_USER_CMD_SECCOMP:
1074 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1075 if (IS_ENABLED(CONFIG_SECCOMP))
1076 mode = SPECTRE_V2_USER_SECCOMP;
1077 else
1078 mode = SPECTRE_V2_USER_PRCTL;
1079 break;
1080 }
1081
1082 /* Initialize Indirect Branch Prediction Barrier */
1083 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1084 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1085
1086 spectre_v2_user_ibpb = mode;
1087 switch (cmd) {
1088 case SPECTRE_V2_USER_CMD_FORCE:
1089 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1090 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1091 static_branch_enable(&switch_mm_always_ibpb);
1092 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1093 break;
1094 case SPECTRE_V2_USER_CMD_PRCTL:
1095 case SPECTRE_V2_USER_CMD_AUTO:
1096 case SPECTRE_V2_USER_CMD_SECCOMP:
1097 static_branch_enable(&switch_mm_cond_ibpb);
1098 break;
1099 default:
1100 break;
1101 }
1102
1103 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1104 static_key_enabled(&switch_mm_always_ibpb) ?
1105 "always-on" : "conditional");
1106 }
1107
1108 /*
1109 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
1110 * required.
1111 */
1112 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1113 !smt_possible ||
1114 spectre_v2_in_eibrs_mode(spectre_v2_enabled))
1115 return;
1116
1117 /*
1118 * At this point, an STIBP mode other than "off" has been set.
1119 * If STIBP support is not being forced, check if STIBP always-on
1120 * is preferred.
1121 */
1122 if (mode != SPECTRE_V2_USER_STRICT &&
1123 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1124 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1125
1126 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
1127 if (mode != SPECTRE_V2_USER_STRICT &&
1128 mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1129 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation'\n");
1130 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1131 }
1132
1133 spectre_v2_user_stibp = mode;
1134
1135 set_mode:
1136 pr_info("%s\n", spectre_v2_user_strings[mode]);
1137 }
1138
1139 static const char * const spectre_v2_strings[] = {
1140 [SPECTRE_V2_NONE] = "Vulnerable",
1141 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
1142 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
1143 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
1144 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
1145 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
1146 };
1147
1148 static const struct {
1149 const char *option;
1150 enum spectre_v2_mitigation_cmd cmd;
1151 bool secure;
1152 } mitigation_options[] __initconst = {
1153 { "off", SPECTRE_V2_CMD_NONE, false },
1154 { "on", SPECTRE_V2_CMD_FORCE, true },
1155 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
1156 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1157 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1158 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1159 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
1160 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
1161 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
1162 { "auto", SPECTRE_V2_CMD_AUTO, false },
1163 };
1164
1165 static void __init spec_v2_print_cond(const char *reason, bool secure)
1166 {
1167 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1168 pr_info("%s selected on command line.\n", reason);
1169 }
1170
1171 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1172 {
1173 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1174 char arg[20];
1175 int ret, i;
1176
1177 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1178 cpu_mitigations_off())
1179 return SPECTRE_V2_CMD_NONE;
1180
1181 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1182 if (ret < 0)
1183 return SPECTRE_V2_CMD_AUTO;
1184
1185 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1186 if (!match_option(arg, ret, mitigation_options[i].option))
1187 continue;
1188 cmd = mitigation_options[i].cmd;
1189 break;
1190 }
1191
1192 if (i >= ARRAY_SIZE(mitigation_options)) {
1193 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1194 return SPECTRE_V2_CMD_AUTO;
1195 }
1196
1197 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1198 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1199 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1200 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1201 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1202 !IS_ENABLED(CONFIG_RETPOLINE)) {
1203 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1204 mitigation_options[i].option);
1205 return SPECTRE_V2_CMD_AUTO;
1206 }
1207
1208 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1209 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1210 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1211 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1212 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1213 mitigation_options[i].option);
1214 return SPECTRE_V2_CMD_AUTO;
1215 }
1216
1217 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1218 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1219 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1220 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1221 mitigation_options[i].option);
1222 return SPECTRE_V2_CMD_AUTO;
1223 }
1224
1225 spec_v2_print_cond(mitigation_options[i].option,
1226 mitigation_options[i].secure);
1227 return cmd;
1228 }
1229
1230 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1231 {
1232 if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1233 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1234 return SPECTRE_V2_NONE;
1235 }
1236
1237 return SPECTRE_V2_RETPOLINE;
1238 }
1239
1240 static void __init spectre_v2_select_mitigation(void)
1241 {
1242 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1243 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1244
1245 /*
1246 * If the CPU is not affected and the command line mode is NONE or AUTO
1247 * then nothing to do.
1248 */
1249 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1250 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1251 return;
1252
1253 switch (cmd) {
1254 case SPECTRE_V2_CMD_NONE:
1255 return;
1256
1257 case SPECTRE_V2_CMD_FORCE:
1258 case SPECTRE_V2_CMD_AUTO:
1259 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1260 mode = SPECTRE_V2_EIBRS;
1261 break;
1262 }
1263
1264 mode = spectre_v2_select_retpoline();
1265 break;
1266
1267 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1268 pr_err(SPECTRE_V2_LFENCE_MSG);
1269 mode = SPECTRE_V2_LFENCE;
1270 break;
1271
1272 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1273 mode = SPECTRE_V2_RETPOLINE;
1274 break;
1275
1276 case SPECTRE_V2_CMD_RETPOLINE:
1277 mode = spectre_v2_select_retpoline();
1278 break;
1279
1280 case SPECTRE_V2_CMD_EIBRS:
1281 mode = SPECTRE_V2_EIBRS;
1282 break;
1283
1284 case SPECTRE_V2_CMD_EIBRS_LFENCE:
1285 mode = SPECTRE_V2_EIBRS_LFENCE;
1286 break;
1287
1288 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1289 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1290 break;
1291 }
1292
1293 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1294 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1295
1296 if (spectre_v2_in_eibrs_mode(mode)) {
1297 /* Force it so VMEXIT will restore correctly */
1298 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1299 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1300 }
1301
1302 switch (mode) {
1303 case SPECTRE_V2_NONE:
1304 case SPECTRE_V2_EIBRS:
1305 break;
1306
1307 case SPECTRE_V2_LFENCE:
1308 case SPECTRE_V2_EIBRS_LFENCE:
1309 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1310 fallthrough;
1311
1312 case SPECTRE_V2_RETPOLINE:
1313 case SPECTRE_V2_EIBRS_RETPOLINE:
1314 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1315 break;
1316 }
1317
1318 spectre_v2_enabled = mode;
1319 pr_info("%s\n", spectre_v2_strings[mode]);
1320
1321 /*
1322 * If spectre v2 protection has been enabled, unconditionally fill
1323 * RSB during a context switch; this protects against two independent
1324 * issues:
1325 *
1326 * - RSB underflow (and switch to BTB) on Skylake+
1327 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1328 */
1329 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1330 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1331
1332 /*
1333 * Retpoline means the kernel is safe because it has no indirect
1334 * branches. Enhanced IBRS protects firmware too, so, enable restricted
1335 * speculation around firmware calls only when Enhanced IBRS isn't
1336 * supported.
1337 *
1338 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1339 * the user might select retpoline on the kernel command line and if
1340 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1341 * enable IBRS around firmware calls.
1342 */
1343 if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
1344 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1345 pr_info("Enabling Restricted Speculation for firmware calls\n");
1346 }
1347
1348 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1349 spectre_v2_user_select_mitigation(cmd);
1350 }
1351
1352 static void update_stibp_msr(void * __unused)
1353 {
1354 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1355 }
1356
1357 /* Update x86_spec_ctrl_base in case SMT state changed. */
1358 static void update_stibp_strict(void)
1359 {
1360 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1361
1362 if (sched_smt_active())
1363 mask |= SPEC_CTRL_STIBP;
1364
1365 if (mask == x86_spec_ctrl_base)
1366 return;
1367
1368 pr_info("Update user space SMT mitigation: STIBP %s\n",
1369 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1370 x86_spec_ctrl_base = mask;
1371 on_each_cpu(update_stibp_msr, NULL, 1);
1372 }
1373
1374 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1375 static void update_indir_branch_cond(void)
1376 {
1377 if (sched_smt_active())
1378 static_branch_enable(&switch_to_cond_stibp);
1379 else
1380 static_branch_disable(&switch_to_cond_stibp);
1381 }
1382
1383 #undef pr_fmt
1384 #define pr_fmt(fmt) fmt
1385
1386 /* Update the static key controlling the MDS CPU buffer clear in idle */
1387 static void update_mds_branch_idle(void)
1388 {
1389 u64 ia32_cap = x86_read_arch_cap_msr();
1390
1391 /*
1392 * Enable the idle clearing if SMT is active on CPUs which are
1393 * affected only by MSBDS and not any other MDS variant.
1394 *
1395 * The other variants cannot be mitigated when SMT is enabled, so
1396 * clearing the buffers on idle just to prevent the Store Buffer
1397 * repartitioning leak would be a window dressing exercise.
1398 */
1399 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1400 return;
1401
1402 if (sched_smt_active()) {
1403 static_branch_enable(&mds_idle_clear);
1404 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1405 (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1406 static_branch_disable(&mds_idle_clear);
1407 }
1408 }
1409
1410 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1411 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1412 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1413
1414 void cpu_bugs_smt_update(void)
1415 {
1416 mutex_lock(&spec_ctrl_mutex);
1417
1418 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1419 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1420 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1421
1422 switch (spectre_v2_user_stibp) {
1423 case SPECTRE_V2_USER_NONE:
1424 break;
1425 case SPECTRE_V2_USER_STRICT:
1426 case SPECTRE_V2_USER_STRICT_PREFERRED:
1427 update_stibp_strict();
1428 break;
1429 case SPECTRE_V2_USER_PRCTL:
1430 case SPECTRE_V2_USER_SECCOMP:
1431 update_indir_branch_cond();
1432 break;
1433 }
1434
1435 switch (mds_mitigation) {
1436 case MDS_MITIGATION_FULL:
1437 case MDS_MITIGATION_VMWERV:
1438 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1439 pr_warn_once(MDS_MSG_SMT);
1440 update_mds_branch_idle();
1441 break;
1442 case MDS_MITIGATION_OFF:
1443 break;
1444 }
1445
1446 switch (taa_mitigation) {
1447 case TAA_MITIGATION_VERW:
1448 case TAA_MITIGATION_UCODE_NEEDED:
1449 if (sched_smt_active())
1450 pr_warn_once(TAA_MSG_SMT);
1451 break;
1452 case TAA_MITIGATION_TSX_DISABLED:
1453 case TAA_MITIGATION_OFF:
1454 break;
1455 }
1456
1457 switch (mmio_mitigation) {
1458 case MMIO_MITIGATION_VERW:
1459 case MMIO_MITIGATION_UCODE_NEEDED:
1460 if (sched_smt_active())
1461 pr_warn_once(MMIO_MSG_SMT);
1462 break;
1463 case MMIO_MITIGATION_OFF:
1464 break;
1465 }
1466
1467 mutex_unlock(&spec_ctrl_mutex);
1468 }
1469
1470 #undef pr_fmt
1471 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1472
1473 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1474
1475 /* The kernel command line selection */
1476 enum ssb_mitigation_cmd {
1477 SPEC_STORE_BYPASS_CMD_NONE,
1478 SPEC_STORE_BYPASS_CMD_AUTO,
1479 SPEC_STORE_BYPASS_CMD_ON,
1480 SPEC_STORE_BYPASS_CMD_PRCTL,
1481 SPEC_STORE_BYPASS_CMD_SECCOMP,
1482 };
1483
1484 static const char * const ssb_strings[] = {
1485 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1486 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1487 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1488 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1489 };
1490
1491 static const struct {
1492 const char *option;
1493 enum ssb_mitigation_cmd cmd;
1494 } ssb_mitigation_options[] __initconst = {
1495 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
1496 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
1497 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
1498 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
1499 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1500 };
1501
1502 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1503 {
1504 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1505 char arg[20];
1506 int ret, i;
1507
1508 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1509 cpu_mitigations_off()) {
1510 return SPEC_STORE_BYPASS_CMD_NONE;
1511 } else {
1512 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1513 arg, sizeof(arg));
1514 if (ret < 0)
1515 return SPEC_STORE_BYPASS_CMD_AUTO;
1516
1517 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1518 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1519 continue;
1520
1521 cmd = ssb_mitigation_options[i].cmd;
1522 break;
1523 }
1524
1525 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1526 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1527 return SPEC_STORE_BYPASS_CMD_AUTO;
1528 }
1529 }
1530
1531 return cmd;
1532 }
1533
1534 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1535 {
1536 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1537 enum ssb_mitigation_cmd cmd;
1538
1539 if (!boot_cpu_has(X86_FEATURE_SSBD))
1540 return mode;
1541
1542 cmd = ssb_parse_cmdline();
1543 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1544 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1545 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1546 return mode;
1547
1548 switch (cmd) {
1549 case SPEC_STORE_BYPASS_CMD_AUTO:
1550 case SPEC_STORE_BYPASS_CMD_SECCOMP:
1551 /*
1552 * Choose prctl+seccomp as the default mode if seccomp is
1553 * enabled.
1554 */
1555 if (IS_ENABLED(CONFIG_SECCOMP))
1556 mode = SPEC_STORE_BYPASS_SECCOMP;
1557 else
1558 mode = SPEC_STORE_BYPASS_PRCTL;
1559 break;
1560 case SPEC_STORE_BYPASS_CMD_ON:
1561 mode = SPEC_STORE_BYPASS_DISABLE;
1562 break;
1563 case SPEC_STORE_BYPASS_CMD_PRCTL:
1564 mode = SPEC_STORE_BYPASS_PRCTL;
1565 break;
1566 case SPEC_STORE_BYPASS_CMD_NONE:
1567 break;
1568 }
1569
1570 /*
1571 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1572 * bit in the mask to allow guests to use the mitigation even in the
1573 * case where the host does not enable it.
1574 */
1575 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1576 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1577 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1578 }
1579
1580 /*
1581 * We have three CPU feature flags that are in play here:
1582 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1583 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1584 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1585 */
1586 if (mode == SPEC_STORE_BYPASS_DISABLE) {
1587 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1588 /*
1589 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1590 * use a completely different MSR and bit dependent on family.
1591 */
1592 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1593 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1594 x86_amd_ssb_disable();
1595 } else {
1596 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1597 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1598 }
1599 }
1600
1601 return mode;
1602 }
1603
1604 static void ssb_select_mitigation(void)
1605 {
1606 ssb_mode = __ssb_select_mitigation();
1607
1608 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1609 pr_info("%s\n", ssb_strings[ssb_mode]);
1610 }
1611
1612 #undef pr_fmt
1613 #define pr_fmt(fmt) "Speculation prctl: " fmt
1614
1615 static void task_update_spec_tif(struct task_struct *tsk)
1616 {
1617 /* Force the update of the real TIF bits */
1618 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1619
1620 /*
1621 * Immediately update the speculation control MSRs for the current
1622 * task, but for a non-current task delay setting the CPU
1623 * mitigation until it is scheduled next.
1624 *
1625 * This can only happen for SECCOMP mitigation. For PRCTL it's
1626 * always the current task.
1627 */
1628 if (tsk == current)
1629 speculation_ctrl_update_current();
1630 }
1631
1632 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
1633 {
1634
1635 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1636 return -EPERM;
1637
1638 switch (ctrl) {
1639 case PR_SPEC_ENABLE:
1640 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1641 return 0;
1642 case PR_SPEC_DISABLE:
1643 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1644 return 0;
1645 default:
1646 return -ERANGE;
1647 }
1648 }
1649
1650 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1651 {
1652 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1653 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1654 return -ENXIO;
1655
1656 switch (ctrl) {
1657 case PR_SPEC_ENABLE:
1658 /* If speculation is force disabled, enable is not allowed */
1659 if (task_spec_ssb_force_disable(task))
1660 return -EPERM;
1661 task_clear_spec_ssb_disable(task);
1662 task_clear_spec_ssb_noexec(task);
1663 task_update_spec_tif(task);
1664 break;
1665 case PR_SPEC_DISABLE:
1666 task_set_spec_ssb_disable(task);
1667 task_clear_spec_ssb_noexec(task);
1668 task_update_spec_tif(task);
1669 break;
1670 case PR_SPEC_FORCE_DISABLE:
1671 task_set_spec_ssb_disable(task);
1672 task_set_spec_ssb_force_disable(task);
1673 task_clear_spec_ssb_noexec(task);
1674 task_update_spec_tif(task);
1675 break;
1676 case PR_SPEC_DISABLE_NOEXEC:
1677 if (task_spec_ssb_force_disable(task))
1678 return -EPERM;
1679 task_set_spec_ssb_disable(task);
1680 task_set_spec_ssb_noexec(task);
1681 task_update_spec_tif(task);
1682 break;
1683 default:
1684 return -ERANGE;
1685 }
1686 return 0;
1687 }
1688
1689 static bool is_spec_ib_user_controlled(void)
1690 {
1691 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1692 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1693 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1694 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1695 }
1696
1697 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1698 {
1699 switch (ctrl) {
1700 case PR_SPEC_ENABLE:
1701 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1702 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1703 return 0;
1704
1705 /*
1706 * With strict mode for both IBPB and STIBP, the instruction
1707 * code paths avoid checking this task flag and instead,
1708 * unconditionally run the instruction. However, STIBP and IBPB
1709 * are independent and either can be set to conditionally
1710 * enabled regardless of the mode of the other.
1711 *
1712 * If either is set to conditional, allow the task flag to be
1713 * updated, unless it was force-disabled by a previous prctl
1714 * call. Currently, this is possible on an AMD CPU which has the
1715 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1716 * kernel is booted with 'spectre_v2_user=seccomp', then
1717 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1718 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1719 */
1720 if (!is_spec_ib_user_controlled() ||
1721 task_spec_ib_force_disable(task))
1722 return -EPERM;
1723
1724 task_clear_spec_ib_disable(task);
1725 task_update_spec_tif(task);
1726 break;
1727 case PR_SPEC_DISABLE:
1728 case PR_SPEC_FORCE_DISABLE:
1729 /*
1730 * Indirect branch speculation is always allowed when
1731 * mitigation is force disabled.
1732 */
1733 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1734 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1735 return -EPERM;
1736
1737 if (!is_spec_ib_user_controlled())
1738 return 0;
1739
1740 task_set_spec_ib_disable(task);
1741 if (ctrl == PR_SPEC_FORCE_DISABLE)
1742 task_set_spec_ib_force_disable(task);
1743 task_update_spec_tif(task);
1744 break;
1745 default:
1746 return -ERANGE;
1747 }
1748 return 0;
1749 }
1750
1751 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1752 unsigned long ctrl)
1753 {
1754 switch (which) {
1755 case PR_SPEC_STORE_BYPASS:
1756 return ssb_prctl_set(task, ctrl);
1757 case PR_SPEC_INDIRECT_BRANCH:
1758 return ib_prctl_set(task, ctrl);
1759 case PR_SPEC_L1D_FLUSH:
1760 return l1d_flush_prctl_set(task, ctrl);
1761 default:
1762 return -ENODEV;
1763 }
1764 }
1765
1766 #ifdef CONFIG_SECCOMP
1767 void arch_seccomp_spec_mitigate(struct task_struct *task)
1768 {
1769 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1770 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1771 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1772 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1773 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1774 }
1775 #endif
1776
1777 static int l1d_flush_prctl_get(struct task_struct *task)
1778 {
1779 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1780 return PR_SPEC_FORCE_DISABLE;
1781
1782 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
1783 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1784 else
1785 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1786 }
1787
1788 static int ssb_prctl_get(struct task_struct *task)
1789 {
1790 switch (ssb_mode) {
1791 case SPEC_STORE_BYPASS_DISABLE:
1792 return PR_SPEC_DISABLE;
1793 case SPEC_STORE_BYPASS_SECCOMP:
1794 case SPEC_STORE_BYPASS_PRCTL:
1795 if (task_spec_ssb_force_disable(task))
1796 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1797 if (task_spec_ssb_noexec(task))
1798 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1799 if (task_spec_ssb_disable(task))
1800 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1801 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1802 default:
1803 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1804 return PR_SPEC_ENABLE;
1805 return PR_SPEC_NOT_AFFECTED;
1806 }
1807 }
1808
1809 static int ib_prctl_get(struct task_struct *task)
1810 {
1811 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1812 return PR_SPEC_NOT_AFFECTED;
1813
1814 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1815 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1816 return PR_SPEC_ENABLE;
1817 else if (is_spec_ib_user_controlled()) {
1818 if (task_spec_ib_force_disable(task))
1819 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1820 if (task_spec_ib_disable(task))
1821 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1822 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1823 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1824 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1825 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1826 return PR_SPEC_DISABLE;
1827 else
1828 return PR_SPEC_NOT_AFFECTED;
1829 }
1830
1831 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1832 {
1833 switch (which) {
1834 case PR_SPEC_STORE_BYPASS:
1835 return ssb_prctl_get(task);
1836 case PR_SPEC_INDIRECT_BRANCH:
1837 return ib_prctl_get(task);
1838 case PR_SPEC_L1D_FLUSH:
1839 return l1d_flush_prctl_get(task);
1840 default:
1841 return -ENODEV;
1842 }
1843 }
1844
1845 void x86_spec_ctrl_setup_ap(void)
1846 {
1847 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1848 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1849
1850 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1851 x86_amd_ssb_disable();
1852 }
1853
1854 bool itlb_multihit_kvm_mitigation;
1855 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1856
1857 #undef pr_fmt
1858 #define pr_fmt(fmt) "L1TF: " fmt
1859
1860 /* Default mitigation for L1TF-affected CPUs */
1861 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1862 #if IS_ENABLED(CONFIG_KVM_INTEL)
1863 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1864 #endif
1865 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1866 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1867
1868 /*
1869 * These CPUs all support 44bits physical address space internally in the
1870 * cache but CPUID can report a smaller number of physical address bits.
1871 *
1872 * The L1TF mitigation uses the top most address bit for the inversion of
1873 * non present PTEs. When the installed memory reaches into the top most
1874 * address bit due to memory holes, which has been observed on machines
1875 * which report 36bits physical address bits and have 32G RAM installed,
1876 * then the mitigation range check in l1tf_select_mitigation() triggers.
1877 * This is a false positive because the mitigation is still possible due to
1878 * the fact that the cache uses 44bit internally. Use the cache bits
1879 * instead of the reported physical bits and adjust them on the affected
1880 * machines to 44bit if the reported bits are less than 44.
1881 */
1882 static void override_cache_bits(struct cpuinfo_x86 *c)
1883 {
1884 if (c->x86 != 6)
1885 return;
1886
1887 switch (c->x86_model) {
1888 case INTEL_FAM6_NEHALEM:
1889 case INTEL_FAM6_WESTMERE:
1890 case INTEL_FAM6_SANDYBRIDGE:
1891 case INTEL_FAM6_IVYBRIDGE:
1892 case INTEL_FAM6_HASWELL:
1893 case INTEL_FAM6_HASWELL_L:
1894 case INTEL_FAM6_HASWELL_G:
1895 case INTEL_FAM6_BROADWELL:
1896 case INTEL_FAM6_BROADWELL_G:
1897 case INTEL_FAM6_SKYLAKE_L:
1898 case INTEL_FAM6_SKYLAKE:
1899 case INTEL_FAM6_KABYLAKE_L:
1900 case INTEL_FAM6_KABYLAKE:
1901 if (c->x86_cache_bits < 44)
1902 c->x86_cache_bits = 44;
1903 break;
1904 }
1905 }
1906
1907 static void __init l1tf_select_mitigation(void)
1908 {
1909 u64 half_pa;
1910
1911 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1912 return;
1913
1914 if (cpu_mitigations_off())
1915 l1tf_mitigation = L1TF_MITIGATION_OFF;
1916 else if (cpu_mitigations_auto_nosmt())
1917 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1918
1919 override_cache_bits(&boot_cpu_data);
1920
1921 switch (l1tf_mitigation) {
1922 case L1TF_MITIGATION_OFF:
1923 case L1TF_MITIGATION_FLUSH_NOWARN:
1924 case L1TF_MITIGATION_FLUSH:
1925 break;
1926 case L1TF_MITIGATION_FLUSH_NOSMT:
1927 case L1TF_MITIGATION_FULL:
1928 cpu_smt_disable(false);
1929 break;
1930 case L1TF_MITIGATION_FULL_FORCE:
1931 cpu_smt_disable(true);
1932 break;
1933 }
1934
1935 #if CONFIG_PGTABLE_LEVELS == 2
1936 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1937 return;
1938 #endif
1939
1940 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1941 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1942 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1943 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1944 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1945 half_pa);
1946 pr_info("However, doing so will make a part of your RAM unusable.\n");
1947 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1948 return;
1949 }
1950
1951 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1952 }
1953
1954 static int __init l1tf_cmdline(char *str)
1955 {
1956 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1957 return 0;
1958
1959 if (!str)
1960 return -EINVAL;
1961
1962 if (!strcmp(str, "off"))
1963 l1tf_mitigation = L1TF_MITIGATION_OFF;
1964 else if (!strcmp(str, "flush,nowarn"))
1965 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1966 else if (!strcmp(str, "flush"))
1967 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1968 else if (!strcmp(str, "flush,nosmt"))
1969 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1970 else if (!strcmp(str, "full"))
1971 l1tf_mitigation = L1TF_MITIGATION_FULL;
1972 else if (!strcmp(str, "full,force"))
1973 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1974
1975 return 0;
1976 }
1977 early_param("l1tf", l1tf_cmdline);
1978
1979 #undef pr_fmt
1980 #define pr_fmt(fmt) fmt
1981
1982 #ifdef CONFIG_SYSFS
1983
1984 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1985
1986 #if IS_ENABLED(CONFIG_KVM_INTEL)
1987 static const char * const l1tf_vmx_states[] = {
1988 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1989 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1990 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1991 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1992 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1993 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1994 };
1995
1996 static ssize_t l1tf_show_state(char *buf)
1997 {
1998 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1999 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2000
2001 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2002 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2003 sched_smt_active())) {
2004 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2005 l1tf_vmx_states[l1tf_vmx_mitigation]);
2006 }
2007
2008 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2009 l1tf_vmx_states[l1tf_vmx_mitigation],
2010 sched_smt_active() ? "vulnerable" : "disabled");
2011 }
2012
2013 static ssize_t itlb_multihit_show_state(char *buf)
2014 {
2015 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2016 !boot_cpu_has(X86_FEATURE_VMX))
2017 return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
2018 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2019 return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
2020 else if (itlb_multihit_kvm_mitigation)
2021 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
2022 else
2023 return sprintf(buf, "KVM: Vulnerable\n");
2024 }
2025 #else
2026 static ssize_t l1tf_show_state(char *buf)
2027 {
2028 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2029 }
2030
2031 static ssize_t itlb_multihit_show_state(char *buf)
2032 {
2033 return sprintf(buf, "Processor vulnerable\n");
2034 }
2035 #endif
2036
2037 static ssize_t mds_show_state(char *buf)
2038 {
2039 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2040 return sprintf(buf, "%s; SMT Host state unknown\n",
2041 mds_strings[mds_mitigation]);
2042 }
2043
2044 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2045 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2046 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2047 sched_smt_active() ? "mitigated" : "disabled"));
2048 }
2049
2050 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2051 sched_smt_active() ? "vulnerable" : "disabled");
2052 }
2053
2054 static ssize_t tsx_async_abort_show_state(char *buf)
2055 {
2056 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2057 (taa_mitigation == TAA_MITIGATION_OFF))
2058 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
2059
2060 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2061 return sprintf(buf, "%s; SMT Host state unknown\n",
2062 taa_strings[taa_mitigation]);
2063 }
2064
2065 return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2066 sched_smt_active() ? "vulnerable" : "disabled");
2067 }
2068
2069 static ssize_t mmio_stale_data_show_state(char *buf)
2070 {
2071 if (mmio_mitigation == MMIO_MITIGATION_OFF)
2072 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2073
2074 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2075 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2076 mmio_strings[mmio_mitigation]);
2077 }
2078
2079 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2080 sched_smt_active() ? "vulnerable" : "disabled");
2081 }
2082
2083 static char *stibp_state(void)
2084 {
2085 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
2086 return "";
2087
2088 switch (spectre_v2_user_stibp) {
2089 case SPECTRE_V2_USER_NONE:
2090 return ", STIBP: disabled";
2091 case SPECTRE_V2_USER_STRICT:
2092 return ", STIBP: forced";
2093 case SPECTRE_V2_USER_STRICT_PREFERRED:
2094 return ", STIBP: always-on";
2095 case SPECTRE_V2_USER_PRCTL:
2096 case SPECTRE_V2_USER_SECCOMP:
2097 if (static_key_enabled(&switch_to_cond_stibp))
2098 return ", STIBP: conditional";
2099 }
2100 return "";
2101 }
2102
2103 static char *ibpb_state(void)
2104 {
2105 if (boot_cpu_has(X86_FEATURE_IBPB)) {
2106 if (static_key_enabled(&switch_mm_always_ibpb))
2107 return ", IBPB: always-on";
2108 if (static_key_enabled(&switch_mm_cond_ibpb))
2109 return ", IBPB: conditional";
2110 return ", IBPB: disabled";
2111 }
2112 return "";
2113 }
2114
2115 static ssize_t spectre_v2_show_state(char *buf)
2116 {
2117 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2118 return sprintf(buf, "Vulnerable: LFENCE\n");
2119
2120 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2121 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2122
2123 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2124 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2125 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2126
2127 return sprintf(buf, "%s%s%s%s%s%s\n",
2128 spectre_v2_strings[spectre_v2_enabled],
2129 ibpb_state(),
2130 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2131 stibp_state(),
2132 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2133 spectre_v2_module_string());
2134 }
2135
2136 static ssize_t srbds_show_state(char *buf)
2137 {
2138 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
2139 }
2140
2141 static ssize_t retbleed_show_state(char *buf)
2142 {
2143 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) {
2144 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2145 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2146 return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n");
2147
2148 return sprintf(buf, "%s; SMT %s\n",
2149 retbleed_strings[retbleed_mitigation],
2150 !sched_smt_active() ? "disabled" :
2151 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2152 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2153 "enabled with STIBP protection" : "vulnerable");
2154 }
2155
2156 return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2157 }
2158
2159 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2160 char *buf, unsigned int bug)
2161 {
2162 if (!boot_cpu_has_bug(bug))
2163 return sprintf(buf, "Not affected\n");
2164
2165 switch (bug) {
2166 case X86_BUG_CPU_MELTDOWN:
2167 if (boot_cpu_has(X86_FEATURE_PTI))
2168 return sprintf(buf, "Mitigation: PTI\n");
2169
2170 if (hypervisor_is_type(X86_HYPER_XEN_PV))
2171 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2172
2173 break;
2174
2175 case X86_BUG_SPECTRE_V1:
2176 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2177
2178 case X86_BUG_SPECTRE_V2:
2179 return spectre_v2_show_state(buf);
2180
2181 case X86_BUG_SPEC_STORE_BYPASS:
2182 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
2183
2184 case X86_BUG_L1TF:
2185 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2186 return l1tf_show_state(buf);
2187 break;
2188
2189 case X86_BUG_MDS:
2190 return mds_show_state(buf);
2191
2192 case X86_BUG_TAA:
2193 return tsx_async_abort_show_state(buf);
2194
2195 case X86_BUG_ITLB_MULTIHIT:
2196 return itlb_multihit_show_state(buf);
2197
2198 case X86_BUG_SRBDS:
2199 return srbds_show_state(buf);
2200
2201 case X86_BUG_MMIO_STALE_DATA:
2202 return mmio_stale_data_show_state(buf);
2203
2204 case X86_BUG_RETBLEED:
2205 return retbleed_show_state(buf);
2206
2207 default:
2208 break;
2209 }
2210
2211 return sprintf(buf, "Vulnerable\n");
2212 }
2213
2214 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2215 {
2216 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2217 }
2218
2219 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2220 {
2221 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2222 }
2223
2224 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2225 {
2226 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2227 }
2228
2229 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2230 {
2231 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2232 }
2233
2234 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2235 {
2236 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2237 }
2238
2239 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2240 {
2241 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2242 }
2243
2244 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2245 {
2246 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2247 }
2248
2249 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2250 {
2251 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2252 }
2253
2254 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2255 {
2256 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2257 }
2258
2259 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2260 {
2261 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2262 }
2263
2264 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2265 {
2266 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2267 }
2268 #endif