]>
Commit | Line | Data |
---|---|---|
3e135d88 | 1 | /* |
6b44e72a | 2 | * CPU Microcode Update Driver for Linux |
3e135d88 | 3 | * |
cea58224 | 4 | * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> |
6b44e72a | 5 | * 2006 Shaohua Li <shaohua.li@intel.com> |
14cfbe55 | 6 | * 2013-2016 Borislav Petkov <bp@alien8.de> |
3e135d88 | 7 | * |
fe055896 BP |
8 | * X86 CPU microcode early update for Linux: |
9 | * | |
10 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | |
11 | * H Peter Anvin" <hpa@zytor.com> | |
12 | * (C) 2015 Borislav Petkov <bp@alien8.de> | |
13 | * | |
6b44e72a | 14 | * This driver allows to upgrade microcode on x86 processors. |
3e135d88 | 15 | * |
6b44e72a BP |
16 | * This program is free software; you can redistribute it and/or |
17 | * modify it under the terms of the GNU General Public License | |
18 | * as published by the Free Software Foundation; either version | |
19 | * 2 of the License, or (at your option) any later version. | |
3e135d88 | 20 | */ |
f58e1f53 | 21 | |
6b26e1bf | 22 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 23 | |
4bae1967 | 24 | #include <linux/platform_device.h> |
41154bc7 | 25 | #include <linux/stop_machine.h> |
fe055896 | 26 | #include <linux/syscore_ops.h> |
4bae1967 | 27 | #include <linux/miscdevice.h> |
871b72dd | 28 | #include <linux/capability.h> |
fe055896 | 29 | #include <linux/firmware.h> |
4bae1967 | 30 | #include <linux/kernel.h> |
41154bc7 | 31 | #include <linux/delay.h> |
3e135d88 PO |
32 | #include <linux/mutex.h> |
33 | #include <linux/cpu.h> | |
41154bc7 | 34 | #include <linux/nmi.h> |
4bae1967 IM |
35 | #include <linux/fs.h> |
36 | #include <linux/mm.h> | |
3e135d88 | 37 | |
fe055896 | 38 | #include <asm/microcode_intel.h> |
78ff123b | 39 | #include <asm/cpu_device_id.h> |
fe055896 | 40 | #include <asm/microcode_amd.h> |
c93dc84c | 41 | #include <asm/perf_event.h> |
fe055896 BP |
42 | #include <asm/microcode.h> |
43 | #include <asm/processor.h> | |
44 | #include <asm/cmdline.h> | |
06b8534c | 45 | #include <asm/setup.h> |
3e135d88 | 46 | |
14cfbe55 | 47 | #define DRIVER_VERSION "2.2" |
3e135d88 | 48 | |
4bae1967 | 49 | static struct microcode_ops *microcode_ops; |
a15a7535 | 50 | static bool dis_ucode_ldr = true; |
6b26e1bf | 51 | |
24c25032 BP |
52 | bool initrd_gone; |
53 | ||
058dc498 BP |
54 | LIST_HEAD(microcode_cache); |
55 | ||
871b72dd DA |
56 | /* |
57 | * Synchronization. | |
58 | * | |
59 | * All non cpu-hotplug-callback call sites use: | |
60 | * | |
61 | * - microcode_mutex to synchronize with each other; | |
62 | * - get/put_online_cpus() to synchronize with | |
63 | * the cpu-hotplug-callback call sites. | |
64 | * | |
65 | * We guarantee that only a single cpu is being | |
66 | * updated at any particular moment of time. | |
67 | */ | |
d45de409 | 68 | static DEFINE_MUTEX(microcode_mutex); |
3e135d88 | 69 | |
41154bc7 AR |
70 | /* |
71 | * Serialize late loading so that CPUs get updated one-by-one. | |
72 | */ | |
73 | static DEFINE_SPINLOCK(update_lock); | |
74 | ||
4bae1967 | 75 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
3e135d88 | 76 | |
871b72dd DA |
77 | struct cpu_info_ctx { |
78 | struct cpu_signature *cpu_sig; | |
79 | int err; | |
80 | }; | |
81 | ||
f3ad136d BP |
82 | /* |
83 | * Those patch levels cannot be updated to newer ones and thus should be final. | |
84 | */ | |
85 | static u32 final_levels[] = { | |
86 | 0x01000098, | |
87 | 0x0100009f, | |
88 | 0x010000af, | |
89 | 0, /* T-101 terminator */ | |
90 | }; | |
91 | ||
92 | /* | |
93 | * Check the current patch level on this CPU. | |
94 | * | |
95 | * Returns: | |
96 | * - true: if update should stop | |
97 | * - false: otherwise | |
98 | */ | |
99 | static bool amd_check_current_patch_level(void) | |
100 | { | |
101 | u32 lvl, dummy, i; | |
102 | u32 *levels; | |
103 | ||
104 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); | |
105 | ||
106 | if (IS_ENABLED(CONFIG_X86_32)) | |
107 | levels = (u32 *)__pa_nodebug(&final_levels); | |
108 | else | |
109 | levels = final_levels; | |
110 | ||
111 | for (i = 0; levels[i]; i++) { | |
112 | if (lvl == levels[i]) | |
113 | return true; | |
114 | } | |
115 | return false; | |
116 | } | |
117 | ||
fe055896 BP |
118 | static bool __init check_loader_disabled_bsp(void) |
119 | { | |
e8c8165e BP |
120 | static const char *__dis_opt_str = "dis_ucode_ldr"; |
121 | ||
fe055896 BP |
122 | #ifdef CONFIG_X86_32 |
123 | const char *cmdline = (const char *)__pa_nodebug(boot_command_line); | |
e8c8165e | 124 | const char *option = (const char *)__pa_nodebug(__dis_opt_str); |
fe055896 BP |
125 | bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); |
126 | ||
127 | #else /* CONFIG_X86_64 */ | |
128 | const char *cmdline = boot_command_line; | |
e8c8165e | 129 | const char *option = __dis_opt_str; |
fe055896 BP |
130 | bool *res = &dis_ucode_ldr; |
131 | #endif | |
132 | ||
a15a7535 BP |
133 | /* |
134 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not | |
135 | * completely accurate as xen pv guests don't see that CPUID bit set but | |
136 | * that's good enough as they don't land on the BSP path anyway. | |
137 | */ | |
309aac77 | 138 | if (native_cpuid_ecx(1) & BIT(31)) |
a15a7535 BP |
139 | return *res; |
140 | ||
f3ad136d BP |
141 | if (x86_cpuid_vendor() == X86_VENDOR_AMD) { |
142 | if (amd_check_current_patch_level()) | |
143 | return *res; | |
144 | } | |
145 | ||
a15a7535 BP |
146 | if (cmdline_find_option_bool(cmdline, option) <= 0) |
147 | *res = false; | |
fe055896 BP |
148 | |
149 | return *res; | |
150 | } | |
151 | ||
152 | extern struct builtin_fw __start_builtin_fw[]; | |
153 | extern struct builtin_fw __end_builtin_fw[]; | |
154 | ||
155 | bool get_builtin_firmware(struct cpio_data *cd, const char *name) | |
156 | { | |
157 | #ifdef CONFIG_FW_LOADER | |
158 | struct builtin_fw *b_fw; | |
159 | ||
160 | for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { | |
161 | if (!strcmp(name, b_fw->name)) { | |
162 | cd->size = b_fw->size; | |
163 | cd->data = b_fw->data; | |
164 | return true; | |
165 | } | |
166 | } | |
167 | #endif | |
168 | return false; | |
169 | } | |
170 | ||
171 | void __init load_ucode_bsp(void) | |
172 | { | |
7a93a40b | 173 | unsigned int cpuid_1_eax; |
1f161f67 | 174 | bool intel = true; |
fe055896 | 175 | |
1f161f67 | 176 | if (!have_cpuid_p()) |
fe055896 BP |
177 | return; |
178 | ||
309aac77 | 179 | cpuid_1_eax = native_cpuid_eax(1); |
fe055896 | 180 | |
7a93a40b | 181 | switch (x86_cpuid_vendor()) { |
fe055896 | 182 | case X86_VENDOR_INTEL: |
1f161f67 BP |
183 | if (x86_family(cpuid_1_eax) < 6) |
184 | return; | |
fe055896 | 185 | break; |
1f161f67 | 186 | |
fe055896 | 187 | case X86_VENDOR_AMD: |
1f161f67 BP |
188 | if (x86_family(cpuid_1_eax) < 0x10) |
189 | return; | |
190 | intel = false; | |
fe055896 | 191 | break; |
1f161f67 | 192 | |
fe055896 | 193 | default: |
1f161f67 | 194 | return; |
fe055896 | 195 | } |
1f161f67 BP |
196 | |
197 | if (check_loader_disabled_bsp()) | |
198 | return; | |
199 | ||
200 | if (intel) | |
201 | load_ucode_intel_bsp(); | |
202 | else | |
203 | load_ucode_amd_bsp(cpuid_1_eax); | |
fe055896 BP |
204 | } |
205 | ||
206 | static bool check_loader_disabled_ap(void) | |
207 | { | |
208 | #ifdef CONFIG_X86_32 | |
209 | return *((bool *)__pa_nodebug(&dis_ucode_ldr)); | |
210 | #else | |
211 | return dis_ucode_ldr; | |
212 | #endif | |
213 | } | |
214 | ||
215 | void load_ucode_ap(void) | |
216 | { | |
7a93a40b | 217 | unsigned int cpuid_1_eax; |
fe055896 BP |
218 | |
219 | if (check_loader_disabled_ap()) | |
220 | return; | |
221 | ||
309aac77 | 222 | cpuid_1_eax = native_cpuid_eax(1); |
fe055896 | 223 | |
7a93a40b | 224 | switch (x86_cpuid_vendor()) { |
fe055896 | 225 | case X86_VENDOR_INTEL: |
309aac77 | 226 | if (x86_family(cpuid_1_eax) >= 6) |
fe055896 BP |
227 | load_ucode_intel_ap(); |
228 | break; | |
229 | case X86_VENDOR_AMD: | |
309aac77 BP |
230 | if (x86_family(cpuid_1_eax) >= 0x10) |
231 | load_ucode_amd_ap(cpuid_1_eax); | |
fe055896 BP |
232 | break; |
233 | default: | |
234 | break; | |
235 | } | |
236 | } | |
237 | ||
4b703305 | 238 | static int __init save_microcode_in_initrd(void) |
fe055896 BP |
239 | { |
240 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
24c25032 | 241 | int ret = -EINVAL; |
fe055896 BP |
242 | |
243 | switch (c->x86_vendor) { | |
244 | case X86_VENDOR_INTEL: | |
245 | if (c->x86 >= 6) | |
24c25032 | 246 | ret = save_microcode_in_initrd_intel(); |
fe055896 BP |
247 | break; |
248 | case X86_VENDOR_AMD: | |
249 | if (c->x86 >= 0x10) | |
1d080f09 | 250 | ret = save_microcode_in_initrd_amd(cpuid_eax(1)); |
fe055896 BP |
251 | break; |
252 | default: | |
253 | break; | |
254 | } | |
255 | ||
24c25032 BP |
256 | initrd_gone = true; |
257 | ||
258 | return ret; | |
fe055896 BP |
259 | } |
260 | ||
06b8534c BP |
261 | struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) |
262 | { | |
263 | #ifdef CONFIG_BLK_DEV_INITRD | |
264 | unsigned long start = 0; | |
265 | size_t size; | |
266 | ||
267 | #ifdef CONFIG_X86_32 | |
268 | struct boot_params *params; | |
269 | ||
270 | if (use_pa) | |
271 | params = (struct boot_params *)__pa_nodebug(&boot_params); | |
272 | else | |
273 | params = &boot_params; | |
274 | ||
275 | size = params->hdr.ramdisk_size; | |
276 | ||
277 | /* | |
278 | * Set start only if we have an initrd image. We cannot use initrd_start | |
279 | * because it is not set that early yet. | |
280 | */ | |
281 | if (size) | |
282 | start = params->hdr.ramdisk_image; | |
283 | ||
284 | # else /* CONFIG_X86_64 */ | |
285 | size = (unsigned long)boot_params.ext_ramdisk_size << 32; | |
286 | size |= boot_params.hdr.ramdisk_size; | |
287 | ||
288 | if (size) { | |
289 | start = (unsigned long)boot_params.ext_ramdisk_image << 32; | |
290 | start |= boot_params.hdr.ramdisk_image; | |
291 | ||
292 | start += PAGE_OFFSET; | |
293 | } | |
294 | # endif | |
295 | ||
296 | /* | |
8877ebdd BP |
297 | * Fixup the start address: after reserve_initrd() runs, initrd_start |
298 | * has the virtual address of the beginning of the initrd. It also | |
299 | * possibly relocates the ramdisk. In either case, initrd_start contains | |
300 | * the updated address so use that instead. | |
24c25032 BP |
301 | * |
302 | * initrd_gone is for the hotplug case where we've thrown out initrd | |
303 | * already. | |
06b8534c | 304 | */ |
24c25032 BP |
305 | if (!use_pa) { |
306 | if (initrd_gone) | |
307 | return (struct cpio_data){ NULL, 0, "" }; | |
308 | if (initrd_start) | |
309 | start = initrd_start; | |
a3d98c93 BP |
310 | } else { |
311 | /* | |
312 | * The picture with physical addresses is a bit different: we | |
313 | * need to get the *physical* address to which the ramdisk was | |
314 | * relocated, i.e., relocated_ramdisk (not initrd_start) and | |
315 | * since we're running from physical addresses, we need to access | |
316 | * relocated_ramdisk through its *physical* address too. | |
317 | */ | |
318 | u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); | |
319 | if (*rr) | |
320 | start = *rr; | |
24c25032 | 321 | } |
06b8534c BP |
322 | |
323 | return find_cpio_data(path, (void *)start, size, NULL); | |
324 | #else /* !CONFIG_BLK_DEV_INITRD */ | |
325 | return (struct cpio_data){ NULL, 0, "" }; | |
326 | #endif | |
327 | } | |
328 | ||
fe055896 BP |
329 | void reload_early_microcode(void) |
330 | { | |
331 | int vendor, family; | |
332 | ||
99f925ce BP |
333 | vendor = x86_cpuid_vendor(); |
334 | family = x86_cpuid_family(); | |
fe055896 BP |
335 | |
336 | switch (vendor) { | |
337 | case X86_VENDOR_INTEL: | |
338 | if (family >= 6) | |
339 | reload_ucode_intel(); | |
340 | break; | |
341 | case X86_VENDOR_AMD: | |
342 | if (family >= 0x10) | |
343 | reload_ucode_amd(); | |
344 | break; | |
345 | default: | |
346 | break; | |
347 | } | |
348 | } | |
349 | ||
871b72dd DA |
350 | static void collect_cpu_info_local(void *arg) |
351 | { | |
352 | struct cpu_info_ctx *ctx = arg; | |
353 | ||
354 | ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(), | |
355 | ctx->cpu_sig); | |
356 | } | |
357 | ||
358 | static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) | |
359 | { | |
360 | struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 }; | |
361 | int ret; | |
362 | ||
363 | ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); | |
364 | if (!ret) | |
365 | ret = ctx.err; | |
366 | ||
367 | return ret; | |
368 | } | |
369 | ||
370 | static int collect_cpu_info(int cpu) | |
371 | { | |
372 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
373 | int ret; | |
374 | ||
375 | memset(uci, 0, sizeof(*uci)); | |
376 | ||
377 | ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); | |
378 | if (!ret) | |
379 | uci->valid = 1; | |
380 | ||
381 | return ret; | |
382 | } | |
383 | ||
871b72dd DA |
384 | static void apply_microcode_local(void *arg) |
385 | { | |
ffed5188 | 386 | enum ucode_state *err = arg; |
871b72dd | 387 | |
ffed5188 | 388 | *err = microcode_ops->apply_microcode(smp_processor_id()); |
871b72dd DA |
389 | } |
390 | ||
391 | static int apply_microcode_on_target(int cpu) | |
392 | { | |
ffed5188 | 393 | enum ucode_state err; |
871b72dd DA |
394 | int ret; |
395 | ||
ffed5188 BP |
396 | ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); |
397 | if (!ret) { | |
398 | if (err == UCODE_ERROR) | |
399 | ret = 1; | |
400 | } | |
871b72dd DA |
401 | return ret; |
402 | } | |
403 | ||
3e135d88 | 404 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
a0a29b62 | 405 | static int do_microcode_update(const void __user *buf, size_t size) |
3e135d88 | 406 | { |
3e135d88 | 407 | int error = 0; |
3e135d88 | 408 | int cpu; |
6f66cbc6 | 409 | |
a0a29b62 DA |
410 | for_each_online_cpu(cpu) { |
411 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
871b72dd | 412 | enum ucode_state ustate; |
a0a29b62 DA |
413 | |
414 | if (!uci->valid) | |
415 | continue; | |
6f66cbc6 | 416 | |
871b72dd DA |
417 | ustate = microcode_ops->request_microcode_user(cpu, buf, size); |
418 | if (ustate == UCODE_ERROR) { | |
419 | error = -1; | |
420 | break; | |
421 | } else if (ustate == UCODE_OK) | |
422 | apply_microcode_on_target(cpu); | |
3e135d88 | 423 | } |
871b72dd | 424 | |
3e135d88 PO |
425 | return error; |
426 | } | |
427 | ||
3f10940e | 428 | static int microcode_open(struct inode *inode, struct file *file) |
3e135d88 | 429 | { |
3f10940e | 430 | return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM; |
3e135d88 PO |
431 | } |
432 | ||
d33dcb9e PO |
433 | static ssize_t microcode_write(struct file *file, const char __user *buf, |
434 | size_t len, loff_t *ppos) | |
3e135d88 | 435 | { |
871b72dd | 436 | ssize_t ret = -EINVAL; |
3e135d88 | 437 | |
4481374c | 438 | if ((len >> PAGE_SHIFT) > totalram_pages) { |
f58e1f53 | 439 | pr_err("too much data (max %ld pages)\n", totalram_pages); |
871b72dd | 440 | return ret; |
3e135d88 PO |
441 | } |
442 | ||
443 | get_online_cpus(); | |
444 | mutex_lock(µcode_mutex); | |
445 | ||
871b72dd | 446 | if (do_microcode_update(buf, len) == 0) |
3e135d88 PO |
447 | ret = (ssize_t)len; |
448 | ||
e3e45c01 SE |
449 | if (ret > 0) |
450 | perf_check_microcode(); | |
451 | ||
3e135d88 PO |
452 | mutex_unlock(µcode_mutex); |
453 | put_online_cpus(); | |
454 | ||
455 | return ret; | |
456 | } | |
457 | ||
458 | static const struct file_operations microcode_fops = { | |
871b72dd DA |
459 | .owner = THIS_MODULE, |
460 | .write = microcode_write, | |
461 | .open = microcode_open, | |
6038f373 | 462 | .llseek = no_llseek, |
3e135d88 PO |
463 | }; |
464 | ||
465 | static struct miscdevice microcode_dev = { | |
871b72dd DA |
466 | .minor = MICROCODE_MINOR, |
467 | .name = "microcode", | |
e454cea2 | 468 | .nodename = "cpu/microcode", |
871b72dd | 469 | .fops = µcode_fops, |
3e135d88 PO |
470 | }; |
471 | ||
d33dcb9e | 472 | static int __init microcode_dev_init(void) |
3e135d88 PO |
473 | { |
474 | int error; | |
475 | ||
476 | error = misc_register(µcode_dev); | |
477 | if (error) { | |
f58e1f53 | 478 | pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); |
3e135d88 PO |
479 | return error; |
480 | } | |
481 | ||
482 | return 0; | |
483 | } | |
484 | ||
bd399063 | 485 | static void __exit microcode_dev_exit(void) |
3e135d88 PO |
486 | { |
487 | misc_deregister(µcode_dev); | |
488 | } | |
3e135d88 | 489 | #else |
4bae1967 IM |
490 | #define microcode_dev_init() 0 |
491 | #define microcode_dev_exit() do { } while (0) | |
3e135d88 PO |
492 | #endif |
493 | ||
494 | /* fake device for request_firmware */ | |
4bae1967 | 495 | static struct platform_device *microcode_pdev; |
3e135d88 | 496 | |
41154bc7 AR |
497 | /* |
498 | * Late loading dance. Why the heavy-handed stomp_machine effort? | |
499 | * | |
500 | * - HT siblings must be idle and not execute other code while the other sibling | |
501 | * is loading microcode in order to avoid any negative interactions caused by | |
502 | * the loading. | |
503 | * | |
504 | * - In addition, microcode update on the cores must be serialized until this | |
505 | * requirement can be relaxed in the future. Right now, this is conservative | |
506 | * and good. | |
507 | */ | |
508 | #define SPINUNIT 100 /* 100 nsec */ | |
509 | ||
1db9027f AR |
510 | static int check_online_cpus(void) |
511 | { | |
512 | if (num_online_cpus() == num_present_cpus()) | |
513 | return 0; | |
514 | ||
515 | pr_err("Not all CPUs online, aborting microcode update.\n"); | |
516 | ||
517 | return -EINVAL; | |
518 | } | |
519 | ||
d6c97b05 BP |
520 | static atomic_t late_cpus_in; |
521 | static atomic_t late_cpus_out; | |
522 | ||
523 | static int __wait_for_cpus(atomic_t *t, long long timeout) | |
524 | { | |
525 | int all_cpus = num_online_cpus(); | |
526 | ||
527 | atomic_inc(t); | |
528 | ||
529 | while (atomic_read(t) < all_cpus) { | |
530 | if (timeout < SPINUNIT) { | |
531 | pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", | |
532 | all_cpus - atomic_read(t)); | |
533 | return 1; | |
534 | } | |
535 | ||
536 | ndelay(SPINUNIT); | |
537 | timeout -= SPINUNIT; | |
538 | ||
539 | touch_nmi_watchdog(); | |
540 | } | |
541 | return 0; | |
542 | } | |
41154bc7 AR |
543 | |
544 | /* | |
545 | * Returns: | |
546 | * < 0 - on error | |
547 | * 0 - no update done | |
548 | * 1 - microcode was updated | |
549 | */ | |
550 | static int __reload_late(void *info) | |
af5c820a | 551 | { |
41154bc7 AR |
552 | int cpu = smp_processor_id(); |
553 | enum ucode_state err; | |
554 | int ret = 0; | |
555 | ||
41154bc7 AR |
556 | /* |
557 | * Wait for all CPUs to arrive. A load will not be attempted unless all | |
558 | * CPUs show up. | |
559 | * */ | |
d6c97b05 BP |
560 | if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) |
561 | return -1; | |
41154bc7 AR |
562 | |
563 | spin_lock(&update_lock); | |
564 | apply_microcode_local(&err); | |
565 | spin_unlock(&update_lock); | |
566 | ||
567 | if (err > UCODE_NFOUND) { | |
568 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | |
d6c97b05 BP |
569 | return -1; |
570 | /* siblings return UCODE_OK because their engine got updated already */ | |
571 | } else if (err == UCODE_UPDATED || err == UCODE_OK) { | |
41154bc7 | 572 | ret = 1; |
d6c97b05 BP |
573 | } else { |
574 | return ret; | |
41154bc7 | 575 | } |
af5c820a | 576 | |
d6c97b05 BP |
577 | /* |
578 | * Increase the wait timeout to a safe value here since we're | |
579 | * serializing the microcode update and that could take a while on a | |
580 | * large number of CPUs. And that is fine as the *actual* timeout will | |
581 | * be determined by the last CPU finished updating and thus cut short. | |
582 | */ | |
583 | if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) | |
584 | panic("Timeout during microcode update!\n"); | |
41154bc7 AR |
585 | |
586 | return ret; | |
587 | } | |
588 | ||
589 | /* | |
590 | * Reload microcode late on all CPUs. Wait for a sec until they | |
591 | * all gather together. | |
592 | */ | |
593 | static int microcode_reload_late(void) | |
594 | { | |
595 | int ret; | |
596 | ||
d6c97b05 BP |
597 | atomic_set(&late_cpus_in, 0); |
598 | atomic_set(&late_cpus_out, 0); | |
41154bc7 AR |
599 | |
600 | ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); | |
d6c97b05 | 601 | if (ret > 0) |
41154bc7 AR |
602 | microcode_check(); |
603 | ||
604 | return ret; | |
af5c820a RR |
605 | } |
606 | ||
8a25a2fd KS |
607 | static ssize_t reload_store(struct device *dev, |
608 | struct device_attribute *attr, | |
871b72dd | 609 | const char *buf, size_t size) |
3e135d88 | 610 | { |
96d0dd3c | 611 | enum ucode_state tmp_ret = UCODE_OK; |
41154bc7 | 612 | int bsp = boot_cpu_data.cpu_index; |
871b72dd | 613 | unsigned long val; |
96d0dd3c | 614 | ssize_t ret = 0; |
c9fc3f77 | 615 | |
e826abd5 SK |
616 | ret = kstrtoul(buf, 0, &val); |
617 | if (ret) | |
618 | return ret; | |
871b72dd | 619 | |
c9fc3f77 BP |
620 | if (val != 1) |
621 | return size; | |
622 | ||
86f32375 | 623 | tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); |
3b6e17f3 | 624 | if (tmp_ret != UCODE_NEW) |
86f32375 BP |
625 | return size; |
626 | ||
c9fc3f77 | 627 | get_online_cpus(); |
1db9027f AR |
628 | |
629 | ret = check_online_cpus(); | |
630 | if (ret) | |
631 | goto put; | |
632 | ||
c93dc84c | 633 | mutex_lock(µcode_mutex); |
41154bc7 | 634 | ret = microcode_reload_late(); |
c93dc84c | 635 | mutex_unlock(µcode_mutex); |
1db9027f AR |
636 | |
637 | put: | |
c9fc3f77 | 638 | put_online_cpus(); |
871b72dd | 639 | |
41154bc7 | 640 | if (ret >= 0) |
871b72dd DA |
641 | ret = size; |
642 | ||
643 | return ret; | |
3e135d88 PO |
644 | } |
645 | ||
8a25a2fd KS |
646 | static ssize_t version_show(struct device *dev, |
647 | struct device_attribute *attr, char *buf) | |
3e135d88 PO |
648 | { |
649 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | |
650 | ||
d45de409 | 651 | return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); |
3e135d88 PO |
652 | } |
653 | ||
8a25a2fd KS |
654 | static ssize_t pf_show(struct device *dev, |
655 | struct device_attribute *attr, char *buf) | |
3e135d88 PO |
656 | { |
657 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | |
658 | ||
d45de409 | 659 | return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); |
3e135d88 PO |
660 | } |
661 | ||
d6864bd8 | 662 | static DEVICE_ATTR_WO(reload); |
8a25a2fd KS |
663 | static DEVICE_ATTR(version, 0400, version_show, NULL); |
664 | static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); | |
3e135d88 PO |
665 | |
666 | static struct attribute *mc_default_attrs[] = { | |
8a25a2fd KS |
667 | &dev_attr_version.attr, |
668 | &dev_attr_processor_flags.attr, | |
3e135d88 PO |
669 | NULL |
670 | }; | |
671 | ||
45bd07ad | 672 | static const struct attribute_group mc_attr_group = { |
871b72dd DA |
673 | .attrs = mc_default_attrs, |
674 | .name = "microcode", | |
3e135d88 PO |
675 | }; |
676 | ||
871b72dd | 677 | static void microcode_fini_cpu(int cpu) |
d45de409 | 678 | { |
06b8534c BP |
679 | if (microcode_ops->microcode_fini_cpu) |
680 | microcode_ops->microcode_fini_cpu(cpu); | |
280a9ca5 DA |
681 | } |
682 | ||
871b72dd | 683 | static enum ucode_state microcode_resume_cpu(int cpu) |
d45de409 | 684 | { |
bb9d3e47 BP |
685 | if (apply_microcode_on_target(cpu)) |
686 | return UCODE_ERROR; | |
871b72dd | 687 | |
6b14b818 BP |
688 | pr_debug("CPU%d updated upon resume\n", cpu); |
689 | ||
871b72dd | 690 | return UCODE_OK; |
d45de409 DA |
691 | } |
692 | ||
48e30685 | 693 | static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) |
d45de409 | 694 | { |
871b72dd | 695 | enum ucode_state ustate; |
9cd4d78e FY |
696 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
697 | ||
43858f57 | 698 | if (uci->valid) |
9cd4d78e | 699 | return UCODE_OK; |
d45de409 | 700 | |
871b72dd DA |
701 | if (collect_cpu_info(cpu)) |
702 | return UCODE_ERROR; | |
d45de409 | 703 | |
871b72dd DA |
704 | /* --dimm. Trigger a delayed update? */ |
705 | if (system_state != SYSTEM_RUNNING) | |
706 | return UCODE_NFOUND; | |
d45de409 | 707 | |
3b6e17f3 BP |
708 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); |
709 | if (ustate == UCODE_NEW) { | |
f58e1f53 | 710 | pr_debug("CPU%d updated upon init\n", cpu); |
871b72dd | 711 | apply_microcode_on_target(cpu); |
d45de409 DA |
712 | } |
713 | ||
871b72dd | 714 | return ustate; |
d45de409 DA |
715 | } |
716 | ||
871b72dd | 717 | static enum ucode_state microcode_update_cpu(int cpu) |
d45de409 | 718 | { |
871b72dd | 719 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
d45de409 | 720 | |
7f709d0c BP |
721 | /* Refresh CPU microcode revision after resume. */ |
722 | collect_cpu_info(cpu); | |
723 | ||
2f99f5c8 | 724 | if (uci->valid) |
bb9d3e47 | 725 | return microcode_resume_cpu(cpu); |
d45de409 | 726 | |
48e30685 | 727 | return microcode_init_cpu(cpu, false); |
d45de409 DA |
728 | } |
729 | ||
8a25a2fd | 730 | static int mc_device_add(struct device *dev, struct subsys_interface *sif) |
3e135d88 | 731 | { |
8a25a2fd | 732 | int err, cpu = dev->id; |
3e135d88 PO |
733 | |
734 | if (!cpu_online(cpu)) | |
735 | return 0; | |
736 | ||
f58e1f53 | 737 | pr_debug("CPU%d added\n", cpu); |
3e135d88 | 738 | |
8a25a2fd | 739 | err = sysfs_create_group(&dev->kobj, &mc_attr_group); |
3e135d88 PO |
740 | if (err) |
741 | return err; | |
742 | ||
48e30685 | 743 | if (microcode_init_cpu(cpu, true) == UCODE_ERROR) |
6c53cbfc | 744 | return -EINVAL; |
af5c820a RR |
745 | |
746 | return err; | |
3e135d88 PO |
747 | } |
748 | ||
71db87ba | 749 | static void mc_device_remove(struct device *dev, struct subsys_interface *sif) |
3e135d88 | 750 | { |
8a25a2fd | 751 | int cpu = dev->id; |
3e135d88 PO |
752 | |
753 | if (!cpu_online(cpu)) | |
71db87ba | 754 | return; |
3e135d88 | 755 | |
f58e1f53 | 756 | pr_debug("CPU%d removed\n", cpu); |
d45de409 | 757 | microcode_fini_cpu(cpu); |
8a25a2fd | 758 | sysfs_remove_group(&dev->kobj, &mc_attr_group); |
3e135d88 PO |
759 | } |
760 | ||
8a25a2fd KS |
761 | static struct subsys_interface mc_cpu_interface = { |
762 | .name = "microcode", | |
763 | .subsys = &cpu_subsys, | |
764 | .add_dev = mc_device_add, | |
765 | .remove_dev = mc_device_remove, | |
f3c6ea1b RW |
766 | }; |
767 | ||
768 | /** | |
769 | * mc_bp_resume - Update boot CPU microcode during resume. | |
770 | */ | |
771 | static void mc_bp_resume(void) | |
3e135d88 | 772 | { |
f3c6ea1b | 773 | int cpu = smp_processor_id(); |
871b72dd | 774 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
3e135d88 | 775 | |
871b72dd DA |
776 | if (uci->valid && uci->mc) |
777 | microcode_ops->apply_microcode(cpu); | |
fb86b973 | 778 | else if (!uci->mc) |
fbae4ba8 | 779 | reload_early_microcode(); |
3e135d88 PO |
780 | } |
781 | ||
f3c6ea1b RW |
782 | static struct syscore_ops mc_syscore_ops = { |
783 | .resume = mc_bp_resume, | |
3e135d88 PO |
784 | }; |
785 | ||
29bd7fbc | 786 | static int mc_cpu_online(unsigned int cpu) |
3e135d88 | 787 | { |
8a25a2fd | 788 | struct device *dev; |
3e135d88 | 789 | |
8a25a2fd | 790 | dev = get_cpu_device(cpu); |
29bd7fbc SAS |
791 | microcode_update_cpu(cpu); |
792 | pr_debug("CPU%d added\n", cpu); | |
09c3f0d8 | 793 | |
29bd7fbc SAS |
794 | if (sysfs_create_group(&dev->kobj, &mc_attr_group)) |
795 | pr_err("Failed to create group for CPU%d\n", cpu); | |
796 | return 0; | |
797 | } | |
09c3f0d8 | 798 | |
29bd7fbc SAS |
799 | static int mc_cpu_down_prep(unsigned int cpu) |
800 | { | |
801 | struct device *dev; | |
70989449 | 802 | |
29bd7fbc SAS |
803 | dev = get_cpu_device(cpu); |
804 | /* Suspend is in progress, only remove the interface */ | |
805 | sysfs_remove_group(&dev->kobj, &mc_attr_group); | |
806 | pr_debug("CPU%d removed\n", cpu); | |
06b8534c | 807 | |
29bd7fbc | 808 | return 0; |
3e135d88 PO |
809 | } |
810 | ||
3d8986bc BP |
811 | static struct attribute *cpu_root_microcode_attrs[] = { |
812 | &dev_attr_reload.attr, | |
813 | NULL | |
814 | }; | |
815 | ||
45bd07ad | 816 | static const struct attribute_group cpu_root_microcode_group = { |
3d8986bc BP |
817 | .name = "microcode", |
818 | .attrs = cpu_root_microcode_attrs, | |
819 | }; | |
820 | ||
9a2bc335 | 821 | int __init microcode_init(void) |
3e135d88 | 822 | { |
9a2bc335 | 823 | struct cpuinfo_x86 *c = &boot_cpu_data; |
3e135d88 PO |
824 | int error; |
825 | ||
84aba677 | 826 | if (dis_ucode_ldr) |
da63865a | 827 | return -EINVAL; |
65cef131 | 828 | |
18dbc916 DA |
829 | if (c->x86_vendor == X86_VENDOR_INTEL) |
830 | microcode_ops = init_intel_microcode(); | |
82b07865 | 831 | else if (c->x86_vendor == X86_VENDOR_AMD) |
18dbc916 | 832 | microcode_ops = init_amd_microcode(); |
283c1f25 | 833 | else |
f58e1f53 | 834 | pr_err("no support for this CPU vendor\n"); |
283c1f25 AH |
835 | |
836 | if (!microcode_ops) | |
18dbc916 | 837 | return -ENODEV; |
3e135d88 | 838 | |
3e135d88 PO |
839 | microcode_pdev = platform_device_register_simple("microcode", -1, |
840 | NULL, 0); | |
bd399063 | 841 | if (IS_ERR(microcode_pdev)) |
3e135d88 | 842 | return PTR_ERR(microcode_pdev); |
3e135d88 PO |
843 | |
844 | get_online_cpus(); | |
871b72dd DA |
845 | mutex_lock(µcode_mutex); |
846 | ||
8a25a2fd | 847 | error = subsys_interface_register(&mc_cpu_interface); |
c93dc84c PZ |
848 | if (!error) |
849 | perf_check_microcode(); | |
871b72dd | 850 | mutex_unlock(µcode_mutex); |
3e135d88 | 851 | put_online_cpus(); |
871b72dd | 852 | |
bd399063 SB |
853 | if (error) |
854 | goto out_pdev; | |
3e135d88 | 855 | |
3d8986bc BP |
856 | error = sysfs_create_group(&cpu_subsys.dev_root->kobj, |
857 | &cpu_root_microcode_group); | |
858 | ||
859 | if (error) { | |
860 | pr_err("Error creating microcode group!\n"); | |
861 | goto out_driver; | |
862 | } | |
863 | ||
871b72dd DA |
864 | error = microcode_dev_init(); |
865 | if (error) | |
3d8986bc | 866 | goto out_ucode_group; |
871b72dd | 867 | |
f3c6ea1b | 868 | register_syscore_ops(&mc_syscore_ops); |
29bd7fbc SAS |
869 | cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", |
870 | mc_cpu_online, mc_cpu_down_prep); | |
8d86f390 | 871 | |
14cfbe55 | 872 | pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); |
8d86f390 | 873 | |
3e135d88 | 874 | return 0; |
bd399063 | 875 | |
3d8986bc BP |
876 | out_ucode_group: |
877 | sysfs_remove_group(&cpu_subsys.dev_root->kobj, | |
878 | &cpu_root_microcode_group); | |
879 | ||
880 | out_driver: | |
bd399063 SB |
881 | get_online_cpus(); |
882 | mutex_lock(µcode_mutex); | |
883 | ||
ff4b8a57 | 884 | subsys_interface_unregister(&mc_cpu_interface); |
bd399063 SB |
885 | |
886 | mutex_unlock(µcode_mutex); | |
887 | put_online_cpus(); | |
888 | ||
3d8986bc | 889 | out_pdev: |
bd399063 SB |
890 | platform_device_unregister(microcode_pdev); |
891 | return error; | |
892 | ||
3e135d88 | 893 | } |
4b703305 | 894 | fs_initcall(save_microcode_in_initrd); |
2d5be37d | 895 | late_initcall(microcode_init); |