]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/cpu/bugs.c
x86/bugs/intel: Set proper CPU features and setup RDS
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15
16 #include <asm/nospec-branch.h>
17 #include <asm/cmdline.h>
18 #include <asm/bugs.h>
19 #include <asm/processor.h>
20 #include <asm/processor-flags.h>
21 #include <asm/fpu/internal.h>
22 #include <asm/msr.h>
23 #include <asm/paravirt.h>
24 #include <asm/alternative.h>
25 #include <asm/pgtable.h>
26 #include <asm/set_memory.h>
27 #include <asm/intel-family.h>
28
29 static void __init spectre_v2_select_mitigation(void);
30 static void __init ssb_select_mitigation(void);
31
32 /*
33 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
34 * writes to SPEC_CTRL contain whatever reserved bits have been set.
35 */
36 static u64 __ro_after_init x86_spec_ctrl_base;
37
38 void __init check_bugs(void)
39 {
40 identify_boot_cpu();
41
42 if (!IS_ENABLED(CONFIG_SMP)) {
43 pr_info("CPU: ");
44 print_cpu_info(&boot_cpu_data);
45 }
46
47 /*
48 * Read the SPEC_CTRL MSR to account for reserved bits which may
49 * have unknown values.
50 */
51 if (boot_cpu_has(X86_FEATURE_IBRS))
52 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
53
54 /* Select the proper spectre mitigation before patching alternatives */
55 spectre_v2_select_mitigation();
56
57 /*
58 * Select proper mitigation for any exposure to the Speculative Store
59 * Bypass vulnerability.
60 */
61 ssb_select_mitigation();
62
63 #ifdef CONFIG_X86_32
64 /*
65 * Check whether we are able to run this kernel safely on SMP.
66 *
67 * - i386 is no longer supported.
68 * - In order to run on anything without a TSC, we need to be
69 * compiled for a i486.
70 */
71 if (boot_cpu_data.x86 < 4)
72 panic("Kernel requires i486+ for 'invlpg' and other features");
73
74 init_utsname()->machine[1] =
75 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
76 alternative_instructions();
77
78 fpu__init_check_bugs();
79 #else /* CONFIG_X86_64 */
80 alternative_instructions();
81
82 /*
83 * Make sure the first 2MB area is not mapped by huge pages
84 * There are typically fixed size MTRRs in there and overlapping
85 * MTRRs into large pages causes slow downs.
86 *
87 * Right now we don't do that with gbpages because there seems
88 * very little benefit for that case.
89 */
90 if (!direct_gbpages)
91 set_memory_4k((unsigned long)__va(0), 1);
92 #endif
93 }
94
95 /* The kernel command line selection */
96 enum spectre_v2_mitigation_cmd {
97 SPECTRE_V2_CMD_NONE,
98 SPECTRE_V2_CMD_AUTO,
99 SPECTRE_V2_CMD_FORCE,
100 SPECTRE_V2_CMD_RETPOLINE,
101 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
102 SPECTRE_V2_CMD_RETPOLINE_AMD,
103 };
104
105 static const char *spectre_v2_strings[] = {
106 [SPECTRE_V2_NONE] = "Vulnerable",
107 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
108 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
109 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
110 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
111 };
112
113 #undef pr_fmt
114 #define pr_fmt(fmt) "Spectre V2 : " fmt
115
116 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
117
118 void x86_spec_ctrl_set(u64 val)
119 {
120 if (val & ~(SPEC_CTRL_IBRS | SPEC_CTRL_RDS))
121 WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
122 else
123 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
124 }
125 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
126
127 u64 x86_spec_ctrl_get_default(void)
128 {
129 return x86_spec_ctrl_base;
130 }
131 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
132
133 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
134 {
135 if (!boot_cpu_has(X86_FEATURE_IBRS))
136 return;
137 if (x86_spec_ctrl_base != guest_spec_ctrl)
138 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
139 }
140 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
141
142 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
143 {
144 if (!boot_cpu_has(X86_FEATURE_IBRS))
145 return;
146 if (x86_spec_ctrl_base != guest_spec_ctrl)
147 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
148 }
149 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
150
151 #ifdef RETPOLINE
152 static bool spectre_v2_bad_module;
153
154 bool retpoline_module_ok(bool has_retpoline)
155 {
156 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
157 return true;
158
159 pr_err("System may be vulnerable to spectre v2\n");
160 spectre_v2_bad_module = true;
161 return false;
162 }
163
164 static inline const char *spectre_v2_module_string(void)
165 {
166 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
167 }
168 #else
169 static inline const char *spectre_v2_module_string(void) { return ""; }
170 #endif
171
172 static void __init spec2_print_if_insecure(const char *reason)
173 {
174 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
175 pr_info("%s selected on command line.\n", reason);
176 }
177
178 static void __init spec2_print_if_secure(const char *reason)
179 {
180 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
181 pr_info("%s selected on command line.\n", reason);
182 }
183
184 static inline bool retp_compiler(void)
185 {
186 return __is_defined(RETPOLINE);
187 }
188
189 static inline bool match_option(const char *arg, int arglen, const char *opt)
190 {
191 int len = strlen(opt);
192
193 return len == arglen && !strncmp(arg, opt, len);
194 }
195
196 static const struct {
197 const char *option;
198 enum spectre_v2_mitigation_cmd cmd;
199 bool secure;
200 } mitigation_options[] = {
201 { "off", SPECTRE_V2_CMD_NONE, false },
202 { "on", SPECTRE_V2_CMD_FORCE, true },
203 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
204 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
205 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
206 { "auto", SPECTRE_V2_CMD_AUTO, false },
207 };
208
209 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
210 {
211 char arg[20];
212 int ret, i;
213 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
214
215 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
216 return SPECTRE_V2_CMD_NONE;
217 else {
218 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
219 if (ret < 0)
220 return SPECTRE_V2_CMD_AUTO;
221
222 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
223 if (!match_option(arg, ret, mitigation_options[i].option))
224 continue;
225 cmd = mitigation_options[i].cmd;
226 break;
227 }
228
229 if (i >= ARRAY_SIZE(mitigation_options)) {
230 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
231 return SPECTRE_V2_CMD_AUTO;
232 }
233 }
234
235 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
236 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
237 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
238 !IS_ENABLED(CONFIG_RETPOLINE)) {
239 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
240 return SPECTRE_V2_CMD_AUTO;
241 }
242
243 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
244 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
245 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
246 return SPECTRE_V2_CMD_AUTO;
247 }
248
249 if (mitigation_options[i].secure)
250 spec2_print_if_secure(mitigation_options[i].option);
251 else
252 spec2_print_if_insecure(mitigation_options[i].option);
253
254 return cmd;
255 }
256
257 /* Check for Skylake-like CPUs (for RSB handling) */
258 static bool __init is_skylake_era(void)
259 {
260 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
261 boot_cpu_data.x86 == 6) {
262 switch (boot_cpu_data.x86_model) {
263 case INTEL_FAM6_SKYLAKE_MOBILE:
264 case INTEL_FAM6_SKYLAKE_DESKTOP:
265 case INTEL_FAM6_SKYLAKE_X:
266 case INTEL_FAM6_KABYLAKE_MOBILE:
267 case INTEL_FAM6_KABYLAKE_DESKTOP:
268 return true;
269 }
270 }
271 return false;
272 }
273
274 static void __init spectre_v2_select_mitigation(void)
275 {
276 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
277 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
278
279 /*
280 * If the CPU is not affected and the command line mode is NONE or AUTO
281 * then nothing to do.
282 */
283 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
284 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
285 return;
286
287 switch (cmd) {
288 case SPECTRE_V2_CMD_NONE:
289 return;
290
291 case SPECTRE_V2_CMD_FORCE:
292 case SPECTRE_V2_CMD_AUTO:
293 if (IS_ENABLED(CONFIG_RETPOLINE))
294 goto retpoline_auto;
295 break;
296 case SPECTRE_V2_CMD_RETPOLINE_AMD:
297 if (IS_ENABLED(CONFIG_RETPOLINE))
298 goto retpoline_amd;
299 break;
300 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
301 if (IS_ENABLED(CONFIG_RETPOLINE))
302 goto retpoline_generic;
303 break;
304 case SPECTRE_V2_CMD_RETPOLINE:
305 if (IS_ENABLED(CONFIG_RETPOLINE))
306 goto retpoline_auto;
307 break;
308 }
309 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
310 return;
311
312 retpoline_auto:
313 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
314 retpoline_amd:
315 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
316 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
317 goto retpoline_generic;
318 }
319 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
320 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
321 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
322 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
323 } else {
324 retpoline_generic:
325 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
326 SPECTRE_V2_RETPOLINE_MINIMAL;
327 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
328 }
329
330 spectre_v2_enabled = mode;
331 pr_info("%s\n", spectre_v2_strings[mode]);
332
333 /*
334 * If neither SMEP nor PTI are available, there is a risk of
335 * hitting userspace addresses in the RSB after a context switch
336 * from a shallow call stack to a deeper one. To prevent this fill
337 * the entire RSB, even when using IBRS.
338 *
339 * Skylake era CPUs have a separate issue with *underflow* of the
340 * RSB, when they will predict 'ret' targets from the generic BTB.
341 * The proper mitigation for this is IBRS. If IBRS is not supported
342 * or deactivated in favour of retpolines the RSB fill on context
343 * switch is required.
344 */
345 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
346 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
347 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
348 pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
349 }
350
351 /* Initialize Indirect Branch Prediction Barrier if supported */
352 if (boot_cpu_has(X86_FEATURE_IBPB)) {
353 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
354 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
355 }
356
357 /*
358 * Retpoline means the kernel is safe because it has no indirect
359 * branches. But firmware isn't, so use IBRS to protect that.
360 */
361 if (boot_cpu_has(X86_FEATURE_IBRS)) {
362 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
363 pr_info("Enabling Restricted Speculation for firmware calls\n");
364 }
365 }
366
367 #undef pr_fmt
368 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
369
370 static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
371
372 /* The kernel command line selection */
373 enum ssb_mitigation_cmd {
374 SPEC_STORE_BYPASS_CMD_NONE,
375 SPEC_STORE_BYPASS_CMD_AUTO,
376 SPEC_STORE_BYPASS_CMD_ON,
377 };
378
379 static const char *ssb_strings[] = {
380 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
381 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled"
382 };
383
384 static const struct {
385 const char *option;
386 enum ssb_mitigation_cmd cmd;
387 } ssb_mitigation_options[] = {
388 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
389 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
390 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
391 };
392
393 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
394 {
395 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
396 char arg[20];
397 int ret, i;
398
399 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
400 return SPEC_STORE_BYPASS_CMD_NONE;
401 } else {
402 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
403 arg, sizeof(arg));
404 if (ret < 0)
405 return SPEC_STORE_BYPASS_CMD_AUTO;
406
407 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
408 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
409 continue;
410
411 cmd = ssb_mitigation_options[i].cmd;
412 break;
413 }
414
415 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
416 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
417 return SPEC_STORE_BYPASS_CMD_AUTO;
418 }
419 }
420
421 return cmd;
422 }
423
424 static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
425 {
426 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
427 enum ssb_mitigation_cmd cmd;
428
429 if (!boot_cpu_has(X86_FEATURE_RDS))
430 return mode;
431
432 cmd = ssb_parse_cmdline();
433 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
434 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
435 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
436 return mode;
437
438 switch (cmd) {
439 case SPEC_STORE_BYPASS_CMD_AUTO:
440 case SPEC_STORE_BYPASS_CMD_ON:
441 mode = SPEC_STORE_BYPASS_DISABLE;
442 break;
443 case SPEC_STORE_BYPASS_CMD_NONE:
444 break;
445 }
446
447 /*
448 * We have three CPU feature flags that are in play here:
449 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
450 * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
451 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
452 */
453 if (mode != SPEC_STORE_BYPASS_NONE) {
454 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
455 /*
456 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
457 * a completely different MSR and bit dependent on family.
458 */
459 switch (boot_cpu_data.x86_vendor) {
460 case X86_VENDOR_INTEL:
461 x86_spec_ctrl_base |= SPEC_CTRL_RDS;
462 x86_spec_ctrl_set(SPEC_CTRL_RDS);
463 break;
464 case X86_VENDOR_AMD:
465 break;
466 }
467 }
468
469 return mode;
470 }
471
472 static void ssb_select_mitigation()
473 {
474 ssb_mode = __ssb_select_mitigation();
475
476 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
477 pr_info("%s\n", ssb_strings[ssb_mode]);
478 }
479
480 #undef pr_fmt
481
482 void x86_spec_ctrl_setup_ap(void)
483 {
484 if (boot_cpu_has(X86_FEATURE_IBRS))
485 x86_spec_ctrl_set(x86_spec_ctrl_base & (SPEC_CTRL_IBRS | SPEC_CTRL_RDS));
486 }
487
488 #ifdef CONFIG_SYSFS
489
490 ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
491 char *buf, unsigned int bug)
492 {
493 if (!boot_cpu_has_bug(bug))
494 return sprintf(buf, "Not affected\n");
495
496 switch (bug) {
497 case X86_BUG_CPU_MELTDOWN:
498 if (boot_cpu_has(X86_FEATURE_PTI))
499 return sprintf(buf, "Mitigation: PTI\n");
500
501 break;
502
503 case X86_BUG_SPECTRE_V1:
504 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
505
506 case X86_BUG_SPECTRE_V2:
507 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
508 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
509 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
510 spectre_v2_module_string());
511
512 case X86_BUG_SPEC_STORE_BYPASS:
513 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
514
515 default:
516 break;
517 }
518
519 return sprintf(buf, "Vulnerable\n");
520 }
521
522 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
523 {
524 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
525 }
526
527 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
528 {
529 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
530 }
531
532 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
533 {
534 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
535 }
536
537 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
538 {
539 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
540 }
541 #endif