]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/cpu/bugs.c
x86/process: Allow runtime control of Speculative Store Bypass
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
9 */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/smp.h>
14
15 #include <asm/spec-ctrl.h>
16 #include <asm/cmdline.h>
17 #include <asm/bugs.h>
18 #include <asm/processor.h>
19 #include <asm/processor-flags.h>
20 #include <asm/fpu/internal.h>
21 #include <asm/msr.h>
22 #include <asm/paravirt.h>
23 #include <asm/alternative.h>
24 #include <asm/pgtable.h>
25 #include <asm/set_memory.h>
26 #include <asm/intel-family.h>
27
28 static void __init spectre_v2_select_mitigation(void);
29 static void __init ssb_select_mitigation(void);
30
31 /*
32 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
33 * writes to SPEC_CTRL contain whatever reserved bits have been set.
34 */
35 u64 __ro_after_init x86_spec_ctrl_base;
36
37 /*
38 * The vendor and possibly platform specific bits which can be modified in
39 * x86_spec_ctrl_base.
40 */
41 static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
42
43 /*
44 * AMD specific MSR info for Speculative Store Bypass control.
45 * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
46 */
47 u64 __ro_after_init x86_amd_ls_cfg_base;
48 u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
49
50 void __init check_bugs(void)
51 {
52 identify_boot_cpu();
53
54 if (!IS_ENABLED(CONFIG_SMP)) {
55 pr_info("CPU: ");
56 print_cpu_info(&boot_cpu_data);
57 }
58
59 /*
60 * Read the SPEC_CTRL MSR to account for reserved bits which may
61 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
62 * init code as it is not enumerated and depends on the family.
63 */
64 if (ibrs_inuse)
65 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
66
67 /* Select the proper spectre mitigation before patching alternatives */
68 spectre_v2_select_mitigation();
69
70 /*
71 * Select proper mitigation for any exposure to the Speculative Store
72 * Bypass vulnerability.
73 */
74 ssb_select_mitigation();
75
76 #ifdef CONFIG_X86_32
77 /*
78 * Check whether we are able to run this kernel safely on SMP.
79 *
80 * - i386 is no longer supported.
81 * - In order to run on anything without a TSC, we need to be
82 * compiled for a i486.
83 */
84 if (boot_cpu_data.x86 < 4)
85 panic("Kernel requires i486+ for 'invlpg' and other features");
86
87 init_utsname()->machine[1] =
88 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
89 alternative_instructions();
90
91 fpu__init_check_bugs();
92 #else /* CONFIG_X86_64 */
93 alternative_instructions();
94
95 /*
96 * Make sure the first 2MB area is not mapped by huge pages
97 * There are typically fixed size MTRRs in there and overlapping
98 * MTRRs into large pages causes slow downs.
99 *
100 * Right now we don't do that with gbpages because there seems
101 * very little benefit for that case.
102 */
103 if (!direct_gbpages)
104 set_memory_4k((unsigned long)__va(0), 1);
105 #endif
106 }
107
108 /* The kernel command line selection */
109 enum spectre_v2_mitigation_cmd {
110 SPECTRE_V2_CMD_NONE,
111 SPECTRE_V2_CMD_AUTO,
112 SPECTRE_V2_CMD_FORCE,
113 SPECTRE_V2_CMD_RETPOLINE,
114 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
115 SPECTRE_V2_CMD_RETPOLINE_AMD,
116 };
117
118 static const char *spectre_v2_strings[] = {
119 [SPECTRE_V2_NONE] = "Vulnerable",
120 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
121 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
122 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
123 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
124 };
125
126 #undef pr_fmt
127 #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
128
129 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
130
131 void x86_spec_ctrl_set(u64 val)
132 {
133 if (val & x86_spec_ctrl_mask)
134 WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
135 else
136 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
137 }
138 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
139
140 u64 x86_spec_ctrl_get_default(void)
141 {
142 u64 msrval = x86_spec_ctrl_base;
143
144 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
145 msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
146 return msrval;
147 }
148 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
149
150 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
151 {
152 u64 host = x86_spec_ctrl_base;
153
154 if (!ibrs_inuse)
155 return;
156
157 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
158 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
159
160 if (host != guest_spec_ctrl)
161 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
162 }
163 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
164
165 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
166 {
167 u64 host = x86_spec_ctrl_base;
168
169 if (!ibrs_inuse)
170 return;
171
172 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
173 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
174
175 if (host != guest_spec_ctrl)
176 wrmsrl(MSR_IA32_SPEC_CTRL, host);
177 }
178 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
179
180 static void x86_amd_rds_enable(void)
181 {
182 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
183
184 if (boot_cpu_has(X86_FEATURE_AMD_RDS))
185 wrmsrl(MSR_AMD64_LS_CFG, msrval);
186 }
187
188 static void __init spec2_print_if_insecure(const char *reason)
189 {
190 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
191 pr_info("%s\n", reason);
192 }
193
194 static void __init spec2_print_if_secure(const char *reason)
195 {
196 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
197 pr_info("%s\n", reason);
198 }
199
200 static inline bool retp_compiler(void)
201 {
202 return __is_defined(RETPOLINE);
203 }
204
205 static inline bool match_option(const char *arg, int arglen, const char *opt)
206 {
207 int len = strlen(opt);
208
209 return len == arglen && !strncmp(arg, opt, len);
210 }
211
212 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
213 {
214 char arg[20];
215 int ret;
216
217 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
218 sizeof(arg));
219 if (ret > 0) {
220 if (match_option(arg, ret, "off")) {
221 goto disable;
222 } else if (match_option(arg, ret, "on")) {
223 spec2_print_if_secure("force enabled on command line.");
224 return SPECTRE_V2_CMD_FORCE;
225 } else if (match_option(arg, ret, "retpoline")) {
226 spec2_print_if_insecure("retpoline selected on command line.");
227 return SPECTRE_V2_CMD_RETPOLINE;
228 } else if (match_option(arg, ret, "retpoline,amd")) {
229 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
230 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
231 return SPECTRE_V2_CMD_AUTO;
232 }
233 spec2_print_if_insecure("AMD retpoline selected on command line.");
234 return SPECTRE_V2_CMD_RETPOLINE_AMD;
235 } else if (match_option(arg, ret, "retpoline,generic")) {
236 spec2_print_if_insecure("generic retpoline selected on command line.");
237 return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
238 } else if (match_option(arg, ret, "auto")) {
239 return SPECTRE_V2_CMD_AUTO;
240 }
241 }
242
243 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
244 return SPECTRE_V2_CMD_AUTO;
245 disable:
246 spec2_print_if_insecure("disabled on command line.");
247 return SPECTRE_V2_CMD_NONE;
248 }
249
250 /* Check for Skylake-like CPUs (for RSB handling) */
251 static bool __init is_skylake_era(void)
252 {
253 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
254 boot_cpu_data.x86 == 6) {
255 switch (boot_cpu_data.x86_model) {
256 case INTEL_FAM6_SKYLAKE_MOBILE:
257 case INTEL_FAM6_SKYLAKE_DESKTOP:
258 case INTEL_FAM6_SKYLAKE_X:
259 case INTEL_FAM6_KABYLAKE_MOBILE:
260 case INTEL_FAM6_KABYLAKE_DESKTOP:
261 return true;
262 }
263 }
264 return false;
265 }
266
267 static void __init spectre_v2_select_mitigation(void)
268 {
269 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
270 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
271
272 /*
273 * If the CPU is not affected and the command line mode is NONE or AUTO
274 * then nothing to do.
275 */
276 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
277 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
278 return;
279
280 switch (cmd) {
281 case SPECTRE_V2_CMD_NONE:
282 return;
283
284 case SPECTRE_V2_CMD_FORCE:
285 /* FALLTRHU */
286 case SPECTRE_V2_CMD_AUTO:
287 goto retpoline_auto;
288
289 case SPECTRE_V2_CMD_RETPOLINE_AMD:
290 if (IS_ENABLED(CONFIG_RETPOLINE))
291 goto retpoline_amd;
292 break;
293 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
294 if (IS_ENABLED(CONFIG_RETPOLINE))
295 goto retpoline_generic;
296 break;
297 case SPECTRE_V2_CMD_RETPOLINE:
298 if (IS_ENABLED(CONFIG_RETPOLINE))
299 goto retpoline_auto;
300 break;
301 }
302 pr_err("kernel not compiled with retpoline; no mitigation available!");
303 return;
304
305 retpoline_auto:
306 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
307 retpoline_amd:
308 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
309 pr_err("LFENCE not serializing. Switching to generic retpoline\n");
310 goto retpoline_generic;
311 }
312 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
313 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
314 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
315 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
316 } else {
317 retpoline_generic:
318 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
319 SPECTRE_V2_RETPOLINE_MINIMAL;
320 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
321 }
322
323 spectre_v2_enabled = mode;
324 pr_info("%s\n", spectre_v2_strings[mode]);
325
326 pr_info("Speculation control IBPB %s IBRS %s",
327 ibpb_supported ? "supported" : "not-supported",
328 ibrs_supported ? "supported" : "not-supported");
329
330 /*
331 * If we have a full retpoline mode and then disable IBPB in kernel mode
332 * we do not require both.
333 */
334 if (mode == SPECTRE_V2_RETPOLINE_AMD ||
335 mode == SPECTRE_V2_RETPOLINE_GENERIC)
336 {
337 if (ibrs_supported) {
338 pr_info("Retpoline compiled kernel. Defaulting IBRS to disabled");
339 set_ibrs_disabled();
340 if (!ibrs_inuse)
341 sysctl_ibrs_enabled = 0;
342 }
343 }
344
345 /*
346 * If neither SMEP or KPTI are available, there is a risk of
347 * hitting userspace addresses in the RSB after a context switch
348 * from a shallow call stack to a deeper one. To prevent this fill
349 * the entire RSB, even when using IBRS.
350 *
351 * Skylake era CPUs have a separate issue with *underflow* of the
352 * RSB, when they will predict 'ret' targets from the generic BTB.
353 * The proper mitigation for this is IBRS. If IBRS is not supported
354 * or deactivated in favour of retpolines the RSB fill on context
355 * switch is required.
356 */
357 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
358 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
359 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
360 pr_info("Filling RSB on context switch\n");
361 }
362 }
363
364 #undef pr_fmt
365 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
366
367 static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
368
369 /* The kernel command line selection */
370 enum ssb_mitigation_cmd {
371 SPEC_STORE_BYPASS_CMD_NONE,
372 SPEC_STORE_BYPASS_CMD_AUTO,
373 SPEC_STORE_BYPASS_CMD_ON,
374 };
375
376 static const char *ssb_strings[] = {
377 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
378 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
379 };
380
381 static const struct {
382 const char *option;
383 enum ssb_mitigation_cmd cmd;
384 } ssb_mitigation_options[] = {
385 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
386 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
387 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
388 };
389
390 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
391 {
392 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
393 char arg[20];
394 int ret, i;
395
396 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
397 return SPEC_STORE_BYPASS_CMD_NONE;
398 } else {
399 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
400 arg, sizeof(arg));
401 if (ret < 0)
402 return SPEC_STORE_BYPASS_CMD_AUTO;
403
404 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
405 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
406 continue;
407
408 cmd = ssb_mitigation_options[i].cmd;
409 break;
410 }
411
412 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
413 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
414 return SPEC_STORE_BYPASS_CMD_AUTO;
415 }
416 }
417
418 return cmd;
419 }
420
421 static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
422 {
423 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
424 enum ssb_mitigation_cmd cmd;
425
426 if (!boot_cpu_has(X86_FEATURE_RDS))
427 return mode;
428
429 cmd = ssb_parse_cmdline();
430 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
431 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
432 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
433 return mode;
434
435 switch (cmd) {
436 case SPEC_STORE_BYPASS_CMD_AUTO:
437 /*
438 * AMD platforms by default don't need SSB mitigation.
439 */
440 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
441 break;
442 case SPEC_STORE_BYPASS_CMD_ON:
443 mode = SPEC_STORE_BYPASS_DISABLE;
444 break;
445 case SPEC_STORE_BYPASS_CMD_NONE:
446 break;
447 }
448
449 /*
450 * We have three CPU feature flags that are in play here:
451 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
452 * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
453 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
454 */
455 if (mode != SPEC_STORE_BYPASS_NONE) {
456 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
457 /*
458 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
459 * a completely different MSR and bit dependent on family.
460 */
461 switch (boot_cpu_data.x86_vendor) {
462 case X86_VENDOR_INTEL:
463 x86_spec_ctrl_base |= SPEC_CTRL_RDS;
464 x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
465 x86_spec_ctrl_set(SPEC_CTRL_RDS);
466 break;
467 case X86_VENDOR_AMD:
468 x86_amd_rds_enable();
469 break;
470 }
471 }
472
473 return mode;
474 }
475
476 static void ssb_select_mitigation()
477 {
478 ssb_mode = __ssb_select_mitigation();
479
480 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
481 pr_info("%s\n", ssb_strings[ssb_mode]);
482 }
483
484 #undef pr_fmt
485
486 void x86_spec_ctrl_setup_ap(void)
487 {
488 if (ibrs_inuse)
489 x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
490
491 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
492 x86_amd_rds_enable();
493 }
494
495 #ifdef CONFIG_SYSFS
496 ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
497 char *buf, unsigned int bug)
498 {
499 if (!boot_cpu_has_bug(bug))
500 return sprintf(buf, "Not affected\n");
501
502 switch (bug) {
503 case X86_BUG_CPU_MELTDOWN:
504 if (boot_cpu_has(X86_FEATURE_PTI))
505 return sprintf(buf, "Mitigation: PTI\n");
506 break;
507
508 case X86_BUG_SPECTRE_V1:
509 if (osb_is_enabled)
510 return sprintf(buf, "Mitigation: OSB (observable speculation barrier, Intel v6)\n");
511
512 case X86_BUG_SPECTRE_V2:
513 return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled], ibpb_inuse ? ", IBPB (Intel v4)" : "");
514
515 case X86_BUG_SPEC_STORE_BYPASS:
516 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
517
518 default:
519 break;
520 }
521
522 return sprintf(buf, "Vulnerable\n");
523 }
524
525 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
526 char *buf)
527 {
528 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
529 }
530
531 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
532 char *buf)
533 {
534 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
535 }
536
537 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
538 char *buf)
539 {
540 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
541 }
542
543 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
544 {
545 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
546 }
547 #endif