]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
3e135d88 | 2 | /* |
6b44e72a | 3 | * CPU Microcode Update Driver for Linux |
3e135d88 | 4 | * |
cea58224 | 5 | * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> |
6b44e72a | 6 | * 2006 Shaohua Li <shaohua.li@intel.com> |
14cfbe55 | 7 | * 2013-2016 Borislav Petkov <bp@alien8.de> |
3e135d88 | 8 | * |
fe055896 BP |
9 | * X86 CPU microcode early update for Linux: |
10 | * | |
11 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | |
12 | * H Peter Anvin" <hpa@zytor.com> | |
13 | * (C) 2015 Borislav Petkov <bp@alien8.de> | |
14 | * | |
6b44e72a | 15 | * This driver allows to upgrade microcode on x86 processors. |
3e135d88 | 16 | */ |
f58e1f53 | 17 | |
6b26e1bf | 18 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 19 | |
4bae1967 | 20 | #include <linux/platform_device.h> |
a5321aec | 21 | #include <linux/stop_machine.h> |
fe055896 | 22 | #include <linux/syscore_ops.h> |
4bae1967 | 23 | #include <linux/miscdevice.h> |
871b72dd | 24 | #include <linux/capability.h> |
fe055896 | 25 | #include <linux/firmware.h> |
4bae1967 | 26 | #include <linux/kernel.h> |
a5321aec | 27 | #include <linux/delay.h> |
3e135d88 PO |
28 | #include <linux/mutex.h> |
29 | #include <linux/cpu.h> | |
a5321aec | 30 | #include <linux/nmi.h> |
4bae1967 IM |
31 | #include <linux/fs.h> |
32 | #include <linux/mm.h> | |
3e135d88 | 33 | |
fe055896 | 34 | #include <asm/microcode_intel.h> |
78ff123b | 35 | #include <asm/cpu_device_id.h> |
fe055896 | 36 | #include <asm/microcode_amd.h> |
c93dc84c | 37 | #include <asm/perf_event.h> |
fe055896 BP |
38 | #include <asm/microcode.h> |
39 | #include <asm/processor.h> | |
40 | #include <asm/cmdline.h> | |
06b8534c | 41 | #include <asm/setup.h> |
3e135d88 | 42 | |
14cfbe55 | 43 | #define DRIVER_VERSION "2.2" |
3e135d88 | 44 | |
4bae1967 | 45 | static struct microcode_ops *microcode_ops; |
a15a7535 | 46 | static bool dis_ucode_ldr = true; |
6b26e1bf | 47 | |
24c25032 BP |
48 | bool initrd_gone; |
49 | ||
058dc498 BP |
50 | LIST_HEAD(microcode_cache); |
51 | ||
871b72dd DA |
52 | /* |
53 | * Synchronization. | |
54 | * | |
55 | * All non cpu-hotplug-callback call sites use: | |
56 | * | |
57 | * - microcode_mutex to synchronize with each other; | |
58 | * - get/put_online_cpus() to synchronize with | |
59 | * the cpu-hotplug-callback call sites. | |
60 | * | |
61 | * We guarantee that only a single cpu is being | |
62 | * updated at any particular moment of time. | |
63 | */ | |
d45de409 | 64 | static DEFINE_MUTEX(microcode_mutex); |
3e135d88 | 65 | |
a5321aec AR |
66 | /* |
67 | * Serialize late loading so that CPUs get updated one-by-one. | |
68 | */ | |
ff987fcf | 69 | static DEFINE_RAW_SPINLOCK(update_lock); |
a5321aec | 70 | |
4bae1967 | 71 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
3e135d88 | 72 | |
871b72dd DA |
73 | struct cpu_info_ctx { |
74 | struct cpu_signature *cpu_sig; | |
75 | int err; | |
76 | }; | |
77 | ||
f3ad136d BP |
78 | /* |
79 | * Those patch levels cannot be updated to newer ones and thus should be final. | |
80 | */ | |
81 | static u32 final_levels[] = { | |
82 | 0x01000098, | |
83 | 0x0100009f, | |
84 | 0x010000af, | |
85 | 0, /* T-101 terminator */ | |
86 | }; | |
87 | ||
88 | /* | |
89 | * Check the current patch level on this CPU. | |
90 | * | |
91 | * Returns: | |
92 | * - true: if update should stop | |
93 | * - false: otherwise | |
94 | */ | |
95 | static bool amd_check_current_patch_level(void) | |
96 | { | |
97 | u32 lvl, dummy, i; | |
98 | u32 *levels; | |
99 | ||
100 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); | |
101 | ||
102 | if (IS_ENABLED(CONFIG_X86_32)) | |
103 | levels = (u32 *)__pa_nodebug(&final_levels); | |
104 | else | |
105 | levels = final_levels; | |
106 | ||
107 | for (i = 0; levels[i]; i++) { | |
108 | if (lvl == levels[i]) | |
109 | return true; | |
110 | } | |
111 | return false; | |
112 | } | |
113 | ||
fe055896 BP |
114 | static bool __init check_loader_disabled_bsp(void) |
115 | { | |
e8c8165e BP |
116 | static const char *__dis_opt_str = "dis_ucode_ldr"; |
117 | ||
fe055896 BP |
118 | #ifdef CONFIG_X86_32 |
119 | const char *cmdline = (const char *)__pa_nodebug(boot_command_line); | |
e8c8165e | 120 | const char *option = (const char *)__pa_nodebug(__dis_opt_str); |
fe055896 BP |
121 | bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); |
122 | ||
123 | #else /* CONFIG_X86_64 */ | |
124 | const char *cmdline = boot_command_line; | |
e8c8165e | 125 | const char *option = __dis_opt_str; |
fe055896 BP |
126 | bool *res = &dis_ucode_ldr; |
127 | #endif | |
128 | ||
a15a7535 BP |
129 | /* |
130 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not | |
131 | * completely accurate as xen pv guests don't see that CPUID bit set but | |
132 | * that's good enough as they don't land on the BSP path anyway. | |
133 | */ | |
309aac77 | 134 | if (native_cpuid_ecx(1) & BIT(31)) |
a15a7535 BP |
135 | return *res; |
136 | ||
f3ad136d BP |
137 | if (x86_cpuid_vendor() == X86_VENDOR_AMD) { |
138 | if (amd_check_current_patch_level()) | |
139 | return *res; | |
140 | } | |
141 | ||
a15a7535 BP |
142 | if (cmdline_find_option_bool(cmdline, option) <= 0) |
143 | *res = false; | |
fe055896 BP |
144 | |
145 | return *res; | |
146 | } | |
147 | ||
148 | extern struct builtin_fw __start_builtin_fw[]; | |
149 | extern struct builtin_fw __end_builtin_fw[]; | |
150 | ||
151 | bool get_builtin_firmware(struct cpio_data *cd, const char *name) | |
152 | { | |
153 | #ifdef CONFIG_FW_LOADER | |
154 | struct builtin_fw *b_fw; | |
155 | ||
156 | for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { | |
157 | if (!strcmp(name, b_fw->name)) { | |
158 | cd->size = b_fw->size; | |
159 | cd->data = b_fw->data; | |
160 | return true; | |
161 | } | |
162 | } | |
163 | #endif | |
164 | return false; | |
165 | } | |
166 | ||
167 | void __init load_ucode_bsp(void) | |
168 | { | |
7a93a40b | 169 | unsigned int cpuid_1_eax; |
1f161f67 | 170 | bool intel = true; |
fe055896 | 171 | |
1f161f67 | 172 | if (!have_cpuid_p()) |
fe055896 BP |
173 | return; |
174 | ||
309aac77 | 175 | cpuid_1_eax = native_cpuid_eax(1); |
fe055896 | 176 | |
7a93a40b | 177 | switch (x86_cpuid_vendor()) { |
fe055896 | 178 | case X86_VENDOR_INTEL: |
1f161f67 BP |
179 | if (x86_family(cpuid_1_eax) < 6) |
180 | return; | |
fe055896 | 181 | break; |
1f161f67 | 182 | |
fe055896 | 183 | case X86_VENDOR_AMD: |
1f161f67 BP |
184 | if (x86_family(cpuid_1_eax) < 0x10) |
185 | return; | |
186 | intel = false; | |
fe055896 | 187 | break; |
1f161f67 | 188 | |
fe055896 | 189 | default: |
1f161f67 | 190 | return; |
fe055896 | 191 | } |
1f161f67 BP |
192 | |
193 | if (check_loader_disabled_bsp()) | |
194 | return; | |
195 | ||
196 | if (intel) | |
197 | load_ucode_intel_bsp(); | |
198 | else | |
199 | load_ucode_amd_bsp(cpuid_1_eax); | |
fe055896 BP |
200 | } |
201 | ||
202 | static bool check_loader_disabled_ap(void) | |
203 | { | |
204 | #ifdef CONFIG_X86_32 | |
205 | return *((bool *)__pa_nodebug(&dis_ucode_ldr)); | |
206 | #else | |
207 | return dis_ucode_ldr; | |
208 | #endif | |
209 | } | |
210 | ||
211 | void load_ucode_ap(void) | |
212 | { | |
7a93a40b | 213 | unsigned int cpuid_1_eax; |
fe055896 BP |
214 | |
215 | if (check_loader_disabled_ap()) | |
216 | return; | |
217 | ||
309aac77 | 218 | cpuid_1_eax = native_cpuid_eax(1); |
fe055896 | 219 | |
7a93a40b | 220 | switch (x86_cpuid_vendor()) { |
fe055896 | 221 | case X86_VENDOR_INTEL: |
309aac77 | 222 | if (x86_family(cpuid_1_eax) >= 6) |
fe055896 BP |
223 | load_ucode_intel_ap(); |
224 | break; | |
225 | case X86_VENDOR_AMD: | |
309aac77 BP |
226 | if (x86_family(cpuid_1_eax) >= 0x10) |
227 | load_ucode_amd_ap(cpuid_1_eax); | |
fe055896 BP |
228 | break; |
229 | default: | |
230 | break; | |
231 | } | |
232 | } | |
233 | ||
4b703305 | 234 | static int __init save_microcode_in_initrd(void) |
fe055896 BP |
235 | { |
236 | struct cpuinfo_x86 *c = &boot_cpu_data; | |
24c25032 | 237 | int ret = -EINVAL; |
fe055896 BP |
238 | |
239 | switch (c->x86_vendor) { | |
240 | case X86_VENDOR_INTEL: | |
241 | if (c->x86 >= 6) | |
24c25032 | 242 | ret = save_microcode_in_initrd_intel(); |
fe055896 BP |
243 | break; |
244 | case X86_VENDOR_AMD: | |
245 | if (c->x86 >= 0x10) | |
1d080f09 | 246 | ret = save_microcode_in_initrd_amd(cpuid_eax(1)); |
fe055896 BP |
247 | break; |
248 | default: | |
249 | break; | |
250 | } | |
251 | ||
24c25032 BP |
252 | initrd_gone = true; |
253 | ||
254 | return ret; | |
fe055896 BP |
255 | } |
256 | ||
06b8534c BP |
257 | struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) |
258 | { | |
259 | #ifdef CONFIG_BLK_DEV_INITRD | |
260 | unsigned long start = 0; | |
261 | size_t size; | |
262 | ||
263 | #ifdef CONFIG_X86_32 | |
264 | struct boot_params *params; | |
265 | ||
266 | if (use_pa) | |
267 | params = (struct boot_params *)__pa_nodebug(&boot_params); | |
268 | else | |
269 | params = &boot_params; | |
270 | ||
271 | size = params->hdr.ramdisk_size; | |
272 | ||
273 | /* | |
274 | * Set start only if we have an initrd image. We cannot use initrd_start | |
275 | * because it is not set that early yet. | |
276 | */ | |
277 | if (size) | |
278 | start = params->hdr.ramdisk_image; | |
279 | ||
280 | # else /* CONFIG_X86_64 */ | |
281 | size = (unsigned long)boot_params.ext_ramdisk_size << 32; | |
282 | size |= boot_params.hdr.ramdisk_size; | |
283 | ||
284 | if (size) { | |
285 | start = (unsigned long)boot_params.ext_ramdisk_image << 32; | |
286 | start |= boot_params.hdr.ramdisk_image; | |
287 | ||
288 | start += PAGE_OFFSET; | |
289 | } | |
290 | # endif | |
291 | ||
292 | /* | |
8877ebdd BP |
293 | * Fixup the start address: after reserve_initrd() runs, initrd_start |
294 | * has the virtual address of the beginning of the initrd. It also | |
295 | * possibly relocates the ramdisk. In either case, initrd_start contains | |
296 | * the updated address so use that instead. | |
24c25032 BP |
297 | * |
298 | * initrd_gone is for the hotplug case where we've thrown out initrd | |
299 | * already. | |
06b8534c | 300 | */ |
24c25032 BP |
301 | if (!use_pa) { |
302 | if (initrd_gone) | |
303 | return (struct cpio_data){ NULL, 0, "" }; | |
304 | if (initrd_start) | |
305 | start = initrd_start; | |
a3d98c93 BP |
306 | } else { |
307 | /* | |
308 | * The picture with physical addresses is a bit different: we | |
309 | * need to get the *physical* address to which the ramdisk was | |
310 | * relocated, i.e., relocated_ramdisk (not initrd_start) and | |
311 | * since we're running from physical addresses, we need to access | |
312 | * relocated_ramdisk through its *physical* address too. | |
313 | */ | |
314 | u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); | |
315 | if (*rr) | |
316 | start = *rr; | |
24c25032 | 317 | } |
06b8534c BP |
318 | |
319 | return find_cpio_data(path, (void *)start, size, NULL); | |
320 | #else /* !CONFIG_BLK_DEV_INITRD */ | |
321 | return (struct cpio_data){ NULL, 0, "" }; | |
322 | #endif | |
323 | } | |
324 | ||
fe055896 BP |
325 | void reload_early_microcode(void) |
326 | { | |
327 | int vendor, family; | |
328 | ||
99f925ce BP |
329 | vendor = x86_cpuid_vendor(); |
330 | family = x86_cpuid_family(); | |
fe055896 BP |
331 | |
332 | switch (vendor) { | |
333 | case X86_VENDOR_INTEL: | |
334 | if (family >= 6) | |
335 | reload_ucode_intel(); | |
336 | break; | |
337 | case X86_VENDOR_AMD: | |
338 | if (family >= 0x10) | |
339 | reload_ucode_amd(); | |
340 | break; | |
341 | default: | |
342 | break; | |
343 | } | |
344 | } | |
345 | ||
871b72dd DA |
346 | static void collect_cpu_info_local(void *arg) |
347 | { | |
348 | struct cpu_info_ctx *ctx = arg; | |
349 | ||
350 | ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(), | |
351 | ctx->cpu_sig); | |
352 | } | |
353 | ||
354 | static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig) | |
355 | { | |
356 | struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 }; | |
357 | int ret; | |
358 | ||
359 | ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1); | |
360 | if (!ret) | |
361 | ret = ctx.err; | |
362 | ||
363 | return ret; | |
364 | } | |
365 | ||
366 | static int collect_cpu_info(int cpu) | |
367 | { | |
368 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
369 | int ret; | |
370 | ||
371 | memset(uci, 0, sizeof(*uci)); | |
372 | ||
373 | ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig); | |
374 | if (!ret) | |
375 | uci->valid = 1; | |
376 | ||
377 | return ret; | |
378 | } | |
379 | ||
871b72dd DA |
380 | static void apply_microcode_local(void *arg) |
381 | { | |
854857f5 | 382 | enum ucode_state *err = arg; |
871b72dd | 383 | |
854857f5 | 384 | *err = microcode_ops->apply_microcode(smp_processor_id()); |
871b72dd DA |
385 | } |
386 | ||
387 | static int apply_microcode_on_target(int cpu) | |
388 | { | |
854857f5 | 389 | enum ucode_state err; |
871b72dd DA |
390 | int ret; |
391 | ||
854857f5 BP |
392 | ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); |
393 | if (!ret) { | |
394 | if (err == UCODE_ERROR) | |
395 | ret = 1; | |
396 | } | |
871b72dd DA |
397 | return ret; |
398 | } | |
399 | ||
3e135d88 | 400 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
a0a29b62 | 401 | static int do_microcode_update(const void __user *buf, size_t size) |
3e135d88 | 402 | { |
3e135d88 | 403 | int error = 0; |
3e135d88 | 404 | int cpu; |
6f66cbc6 | 405 | |
a0a29b62 DA |
406 | for_each_online_cpu(cpu) { |
407 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
871b72dd | 408 | enum ucode_state ustate; |
a0a29b62 DA |
409 | |
410 | if (!uci->valid) | |
411 | continue; | |
6f66cbc6 | 412 | |
871b72dd DA |
413 | ustate = microcode_ops->request_microcode_user(cpu, buf, size); |
414 | if (ustate == UCODE_ERROR) { | |
415 | error = -1; | |
416 | break; | |
24613a04 | 417 | } else if (ustate == UCODE_NEW) { |
871b72dd | 418 | apply_microcode_on_target(cpu); |
24613a04 | 419 | } |
3e135d88 | 420 | } |
871b72dd | 421 | |
3e135d88 PO |
422 | return error; |
423 | } | |
424 | ||
3f10940e | 425 | static int microcode_open(struct inode *inode, struct file *file) |
3e135d88 | 426 | { |
c5bf68fe | 427 | return capable(CAP_SYS_RAWIO) ? stream_open(inode, file) : -EPERM; |
3e135d88 PO |
428 | } |
429 | ||
d33dcb9e PO |
430 | static ssize_t microcode_write(struct file *file, const char __user *buf, |
431 | size_t len, loff_t *ppos) | |
3e135d88 | 432 | { |
871b72dd | 433 | ssize_t ret = -EINVAL; |
ca79b0c2 | 434 | unsigned long nr_pages = totalram_pages(); |
3e135d88 | 435 | |
3d6357de AK |
436 | if ((len >> PAGE_SHIFT) > nr_pages) { |
437 | pr_err("too much data (max %ld pages)\n", nr_pages); | |
871b72dd | 438 | return ret; |
3e135d88 PO |
439 | } |
440 | ||
441 | get_online_cpus(); | |
442 | mutex_lock(µcode_mutex); | |
443 | ||
871b72dd | 444 | if (do_microcode_update(buf, len) == 0) |
3e135d88 PO |
445 | ret = (ssize_t)len; |
446 | ||
e3e45c01 SE |
447 | if (ret > 0) |
448 | perf_check_microcode(); | |
449 | ||
3e135d88 PO |
450 | mutex_unlock(µcode_mutex); |
451 | put_online_cpus(); | |
452 | ||
453 | return ret; | |
454 | } | |
455 | ||
456 | static const struct file_operations microcode_fops = { | |
871b72dd DA |
457 | .owner = THIS_MODULE, |
458 | .write = microcode_write, | |
459 | .open = microcode_open, | |
6038f373 | 460 | .llseek = no_llseek, |
3e135d88 PO |
461 | }; |
462 | ||
463 | static struct miscdevice microcode_dev = { | |
871b72dd DA |
464 | .minor = MICROCODE_MINOR, |
465 | .name = "microcode", | |
e454cea2 | 466 | .nodename = "cpu/microcode", |
871b72dd | 467 | .fops = µcode_fops, |
3e135d88 PO |
468 | }; |
469 | ||
d33dcb9e | 470 | static int __init microcode_dev_init(void) |
3e135d88 PO |
471 | { |
472 | int error; | |
473 | ||
474 | error = misc_register(µcode_dev); | |
475 | if (error) { | |
f58e1f53 | 476 | pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR); |
3e135d88 PO |
477 | return error; |
478 | } | |
479 | ||
480 | return 0; | |
481 | } | |
482 | ||
bd399063 | 483 | static void __exit microcode_dev_exit(void) |
3e135d88 PO |
484 | { |
485 | misc_deregister(µcode_dev); | |
486 | } | |
3e135d88 | 487 | #else |
4bae1967 IM |
488 | #define microcode_dev_init() 0 |
489 | #define microcode_dev_exit() do { } while (0) | |
3e135d88 PO |
490 | #endif |
491 | ||
492 | /* fake device for request_firmware */ | |
4bae1967 | 493 | static struct platform_device *microcode_pdev; |
3e135d88 | 494 | |
a5321aec AR |
495 | /* |
496 | * Late loading dance. Why the heavy-handed stomp_machine effort? | |
497 | * | |
498 | * - HT siblings must be idle and not execute other code while the other sibling | |
499 | * is loading microcode in order to avoid any negative interactions caused by | |
500 | * the loading. | |
501 | * | |
502 | * - In addition, microcode update on the cores must be serialized until this | |
503 | * requirement can be relaxed in the future. Right now, this is conservative | |
504 | * and good. | |
505 | */ | |
506 | #define SPINUNIT 100 /* 100 nsec */ | |
507 | ||
30ec26da AR |
508 | static int check_online_cpus(void) |
509 | { | |
07d981ad | 510 | unsigned int cpu; |
30ec26da | 511 | |
07d981ad JP |
512 | /* |
513 | * Make sure all CPUs are online. It's fine for SMT to be disabled if | |
514 | * all the primary threads are still online. | |
515 | */ | |
516 | for_each_present_cpu(cpu) { | |
517 | if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { | |
518 | pr_err("Not all CPUs online, aborting microcode update.\n"); | |
519 | return -EINVAL; | |
520 | } | |
521 | } | |
30ec26da | 522 | |
07d981ad | 523 | return 0; |
30ec26da AR |
524 | } |
525 | ||
bb8c13d6 BP |
526 | static atomic_t late_cpus_in; |
527 | static atomic_t late_cpus_out; | |
528 | ||
529 | static int __wait_for_cpus(atomic_t *t, long long timeout) | |
530 | { | |
531 | int all_cpus = num_online_cpus(); | |
532 | ||
533 | atomic_inc(t); | |
534 | ||
535 | while (atomic_read(t) < all_cpus) { | |
536 | if (timeout < SPINUNIT) { | |
537 | pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", | |
538 | all_cpus - atomic_read(t)); | |
539 | return 1; | |
540 | } | |
541 | ||
542 | ndelay(SPINUNIT); | |
543 | timeout -= SPINUNIT; | |
544 | ||
545 | touch_nmi_watchdog(); | |
546 | } | |
547 | return 0; | |
548 | } | |
a5321aec AR |
549 | |
550 | /* | |
551 | * Returns: | |
552 | * < 0 - on error | |
553 | * 0 - no update done | |
554 | * 1 - microcode was updated | |
555 | */ | |
556 | static int __reload_late(void *info) | |
af5c820a | 557 | { |
a5321aec AR |
558 | int cpu = smp_processor_id(); |
559 | enum ucode_state err; | |
560 | int ret = 0; | |
561 | ||
a5321aec AR |
562 | /* |
563 | * Wait for all CPUs to arrive. A load will not be attempted unless all | |
564 | * CPUs show up. | |
565 | * */ | |
bb8c13d6 BP |
566 | if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) |
567 | return -1; | |
a5321aec | 568 | |
ff987fcf | 569 | raw_spin_lock(&update_lock); |
a5321aec | 570 | apply_microcode_local(&err); |
ff987fcf | 571 | raw_spin_unlock(&update_lock); |
a5321aec | 572 | |
09e182d1 | 573 | /* siblings return UCODE_OK because their engine got updated already */ |
a5321aec AR |
574 | if (err > UCODE_NFOUND) { |
575 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | |
09e182d1 | 576 | ret = -1; |
bb8c13d6 | 577 | } else if (err == UCODE_UPDATED || err == UCODE_OK) { |
a5321aec AR |
578 | ret = 1; |
579 | } | |
af5c820a | 580 | |
bb8c13d6 BP |
581 | /* |
582 | * Increase the wait timeout to a safe value here since we're | |
583 | * serializing the microcode update and that could take a while on a | |
584 | * large number of CPUs. And that is fine as the *actual* timeout will | |
585 | * be determined by the last CPU finished updating and thus cut short. | |
586 | */ | |
587 | if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) | |
588 | panic("Timeout during microcode update!\n"); | |
a5321aec AR |
589 | |
590 | return ret; | |
591 | } | |
592 | ||
593 | /* | |
594 | * Reload microcode late on all CPUs. Wait for a sec until they | |
595 | * all gather together. | |
596 | */ | |
597 | static int microcode_reload_late(void) | |
598 | { | |
599 | int ret; | |
600 | ||
bb8c13d6 BP |
601 | atomic_set(&late_cpus_in, 0); |
602 | atomic_set(&late_cpus_out, 0); | |
a5321aec AR |
603 | |
604 | ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); | |
bb8c13d6 | 605 | if (ret > 0) |
a5321aec AR |
606 | microcode_check(); |
607 | ||
9bd68125 BP |
608 | pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode); |
609 | ||
a5321aec | 610 | return ret; |
af5c820a RR |
611 | } |
612 | ||
8a25a2fd KS |
613 | static ssize_t reload_store(struct device *dev, |
614 | struct device_attribute *attr, | |
871b72dd | 615 | const char *buf, size_t size) |
3e135d88 | 616 | { |
3f1f576a | 617 | enum ucode_state tmp_ret = UCODE_OK; |
a5321aec | 618 | int bsp = boot_cpu_data.cpu_index; |
871b72dd | 619 | unsigned long val; |
3f1f576a | 620 | ssize_t ret = 0; |
c9fc3f77 | 621 | |
e826abd5 SK |
622 | ret = kstrtoul(buf, 0, &val); |
623 | if (ret) | |
624 | return ret; | |
871b72dd | 625 | |
c9fc3f77 BP |
626 | if (val != 1) |
627 | return size; | |
628 | ||
cfb52a5a | 629 | tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); |
2613f36e | 630 | if (tmp_ret != UCODE_NEW) |
cfb52a5a BP |
631 | return size; |
632 | ||
c9fc3f77 | 633 | get_online_cpus(); |
30ec26da AR |
634 | |
635 | ret = check_online_cpus(); | |
636 | if (ret) | |
637 | goto put; | |
638 | ||
c93dc84c | 639 | mutex_lock(µcode_mutex); |
a5321aec | 640 | ret = microcode_reload_late(); |
c93dc84c | 641 | mutex_unlock(µcode_mutex); |
30ec26da AR |
642 | |
643 | put: | |
c9fc3f77 | 644 | put_online_cpus(); |
871b72dd | 645 | |
a5321aec | 646 | if (ret >= 0) |
871b72dd DA |
647 | ret = size; |
648 | ||
649 | return ret; | |
3e135d88 PO |
650 | } |
651 | ||
8a25a2fd KS |
652 | static ssize_t version_show(struct device *dev, |
653 | struct device_attribute *attr, char *buf) | |
3e135d88 PO |
654 | { |
655 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | |
656 | ||
d45de409 | 657 | return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); |
3e135d88 PO |
658 | } |
659 | ||
8a25a2fd KS |
660 | static ssize_t pf_show(struct device *dev, |
661 | struct device_attribute *attr, char *buf) | |
3e135d88 PO |
662 | { |
663 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | |
664 | ||
d45de409 | 665 | return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); |
3e135d88 PO |
666 | } |
667 | ||
6cbaefb4 | 668 | static DEVICE_ATTR_WO(reload); |
f4661d29 JT |
669 | static DEVICE_ATTR(version, 0444, version_show, NULL); |
670 | static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); | |
3e135d88 PO |
671 | |
672 | static struct attribute *mc_default_attrs[] = { | |
8a25a2fd KS |
673 | &dev_attr_version.attr, |
674 | &dev_attr_processor_flags.attr, | |
3e135d88 PO |
675 | NULL |
676 | }; | |
677 | ||
45bd07ad | 678 | static const struct attribute_group mc_attr_group = { |
871b72dd DA |
679 | .attrs = mc_default_attrs, |
680 | .name = "microcode", | |
3e135d88 PO |
681 | }; |
682 | ||
871b72dd | 683 | static void microcode_fini_cpu(int cpu) |
d45de409 | 684 | { |
06b8534c BP |
685 | if (microcode_ops->microcode_fini_cpu) |
686 | microcode_ops->microcode_fini_cpu(cpu); | |
280a9ca5 DA |
687 | } |
688 | ||
871b72dd | 689 | static enum ucode_state microcode_resume_cpu(int cpu) |
d45de409 | 690 | { |
bb9d3e47 BP |
691 | if (apply_microcode_on_target(cpu)) |
692 | return UCODE_ERROR; | |
871b72dd | 693 | |
6b14b818 BP |
694 | pr_debug("CPU%d updated upon resume\n", cpu); |
695 | ||
871b72dd | 696 | return UCODE_OK; |
d45de409 DA |
697 | } |
698 | ||
48e30685 | 699 | static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) |
d45de409 | 700 | { |
871b72dd | 701 | enum ucode_state ustate; |
9cd4d78e FY |
702 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
703 | ||
43858f57 | 704 | if (uci->valid) |
9cd4d78e | 705 | return UCODE_OK; |
d45de409 | 706 | |
871b72dd DA |
707 | if (collect_cpu_info(cpu)) |
708 | return UCODE_ERROR; | |
d45de409 | 709 | |
871b72dd DA |
710 | /* --dimm. Trigger a delayed update? */ |
711 | if (system_state != SYSTEM_RUNNING) | |
712 | return UCODE_NFOUND; | |
d45de409 | 713 | |
2613f36e BP |
714 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); |
715 | if (ustate == UCODE_NEW) { | |
f58e1f53 | 716 | pr_debug("CPU%d updated upon init\n", cpu); |
871b72dd | 717 | apply_microcode_on_target(cpu); |
d45de409 DA |
718 | } |
719 | ||
871b72dd | 720 | return ustate; |
d45de409 DA |
721 | } |
722 | ||
871b72dd | 723 | static enum ucode_state microcode_update_cpu(int cpu) |
d45de409 | 724 | { |
871b72dd | 725 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
d45de409 | 726 | |
7f709d0c BP |
727 | /* Refresh CPU microcode revision after resume. */ |
728 | collect_cpu_info(cpu); | |
729 | ||
2f99f5c8 | 730 | if (uci->valid) |
bb9d3e47 | 731 | return microcode_resume_cpu(cpu); |
d45de409 | 732 | |
48e30685 | 733 | return microcode_init_cpu(cpu, false); |
d45de409 DA |
734 | } |
735 | ||
8a25a2fd | 736 | static int mc_device_add(struct device *dev, struct subsys_interface *sif) |
3e135d88 | 737 | { |
8a25a2fd | 738 | int err, cpu = dev->id; |
3e135d88 PO |
739 | |
740 | if (!cpu_online(cpu)) | |
741 | return 0; | |
742 | ||
f58e1f53 | 743 | pr_debug("CPU%d added\n", cpu); |
3e135d88 | 744 | |
8a25a2fd | 745 | err = sysfs_create_group(&dev->kobj, &mc_attr_group); |
3e135d88 PO |
746 | if (err) |
747 | return err; | |
748 | ||
48e30685 | 749 | if (microcode_init_cpu(cpu, true) == UCODE_ERROR) |
6c53cbfc | 750 | return -EINVAL; |
af5c820a RR |
751 | |
752 | return err; | |
3e135d88 PO |
753 | } |
754 | ||
71db87ba | 755 | static void mc_device_remove(struct device *dev, struct subsys_interface *sif) |
3e135d88 | 756 | { |
8a25a2fd | 757 | int cpu = dev->id; |
3e135d88 PO |
758 | |
759 | if (!cpu_online(cpu)) | |
71db87ba | 760 | return; |
3e135d88 | 761 | |
f58e1f53 | 762 | pr_debug("CPU%d removed\n", cpu); |
d45de409 | 763 | microcode_fini_cpu(cpu); |
8a25a2fd | 764 | sysfs_remove_group(&dev->kobj, &mc_attr_group); |
3e135d88 PO |
765 | } |
766 | ||
8a25a2fd KS |
767 | static struct subsys_interface mc_cpu_interface = { |
768 | .name = "microcode", | |
769 | .subsys = &cpu_subsys, | |
770 | .add_dev = mc_device_add, | |
771 | .remove_dev = mc_device_remove, | |
f3c6ea1b RW |
772 | }; |
773 | ||
774 | /** | |
775 | * mc_bp_resume - Update boot CPU microcode during resume. | |
776 | */ | |
777 | static void mc_bp_resume(void) | |
3e135d88 | 778 | { |
f3c6ea1b | 779 | int cpu = smp_processor_id(); |
871b72dd | 780 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
3e135d88 | 781 | |
871b72dd DA |
782 | if (uci->valid && uci->mc) |
783 | microcode_ops->apply_microcode(cpu); | |
fb86b973 | 784 | else if (!uci->mc) |
fbae4ba8 | 785 | reload_early_microcode(); |
3e135d88 PO |
786 | } |
787 | ||
f3c6ea1b RW |
788 | static struct syscore_ops mc_syscore_ops = { |
789 | .resume = mc_bp_resume, | |
3e135d88 PO |
790 | }; |
791 | ||
29bd7fbc | 792 | static int mc_cpu_online(unsigned int cpu) |
3e135d88 | 793 | { |
8a25a2fd | 794 | struct device *dev; |
3e135d88 | 795 | |
8a25a2fd | 796 | dev = get_cpu_device(cpu); |
29bd7fbc SAS |
797 | microcode_update_cpu(cpu); |
798 | pr_debug("CPU%d added\n", cpu); | |
09c3f0d8 | 799 | |
29bd7fbc SAS |
800 | if (sysfs_create_group(&dev->kobj, &mc_attr_group)) |
801 | pr_err("Failed to create group for CPU%d\n", cpu); | |
802 | return 0; | |
803 | } | |
09c3f0d8 | 804 | |
29bd7fbc SAS |
805 | static int mc_cpu_down_prep(unsigned int cpu) |
806 | { | |
807 | struct device *dev; | |
70989449 | 808 | |
29bd7fbc SAS |
809 | dev = get_cpu_device(cpu); |
810 | /* Suspend is in progress, only remove the interface */ | |
811 | sysfs_remove_group(&dev->kobj, &mc_attr_group); | |
812 | pr_debug("CPU%d removed\n", cpu); | |
06b8534c | 813 | |
29bd7fbc | 814 | return 0; |
3e135d88 PO |
815 | } |
816 | ||
3d8986bc BP |
817 | static struct attribute *cpu_root_microcode_attrs[] = { |
818 | &dev_attr_reload.attr, | |
819 | NULL | |
820 | }; | |
821 | ||
45bd07ad | 822 | static const struct attribute_group cpu_root_microcode_group = { |
3d8986bc BP |
823 | .name = "microcode", |
824 | .attrs = cpu_root_microcode_attrs, | |
825 | }; | |
826 | ||
9a2bc335 | 827 | int __init microcode_init(void) |
3e135d88 | 828 | { |
9a2bc335 | 829 | struct cpuinfo_x86 *c = &boot_cpu_data; |
3e135d88 PO |
830 | int error; |
831 | ||
84aba677 | 832 | if (dis_ucode_ldr) |
da63865a | 833 | return -EINVAL; |
65cef131 | 834 | |
18dbc916 DA |
835 | if (c->x86_vendor == X86_VENDOR_INTEL) |
836 | microcode_ops = init_intel_microcode(); | |
82b07865 | 837 | else if (c->x86_vendor == X86_VENDOR_AMD) |
18dbc916 | 838 | microcode_ops = init_amd_microcode(); |
283c1f25 | 839 | else |
f58e1f53 | 840 | pr_err("no support for this CPU vendor\n"); |
283c1f25 AH |
841 | |
842 | if (!microcode_ops) | |
18dbc916 | 843 | return -ENODEV; |
3e135d88 | 844 | |
3e135d88 PO |
845 | microcode_pdev = platform_device_register_simple("microcode", -1, |
846 | NULL, 0); | |
bd399063 | 847 | if (IS_ERR(microcode_pdev)) |
3e135d88 | 848 | return PTR_ERR(microcode_pdev); |
3e135d88 PO |
849 | |
850 | get_online_cpus(); | |
871b72dd DA |
851 | mutex_lock(µcode_mutex); |
852 | ||
8a25a2fd | 853 | error = subsys_interface_register(&mc_cpu_interface); |
c93dc84c PZ |
854 | if (!error) |
855 | perf_check_microcode(); | |
871b72dd | 856 | mutex_unlock(µcode_mutex); |
3e135d88 | 857 | put_online_cpus(); |
871b72dd | 858 | |
bd399063 SB |
859 | if (error) |
860 | goto out_pdev; | |
3e135d88 | 861 | |
3d8986bc BP |
862 | error = sysfs_create_group(&cpu_subsys.dev_root->kobj, |
863 | &cpu_root_microcode_group); | |
864 | ||
865 | if (error) { | |
866 | pr_err("Error creating microcode group!\n"); | |
867 | goto out_driver; | |
868 | } | |
869 | ||
871b72dd DA |
870 | error = microcode_dev_init(); |
871 | if (error) | |
3d8986bc | 872 | goto out_ucode_group; |
871b72dd | 873 | |
f3c6ea1b | 874 | register_syscore_ops(&mc_syscore_ops); |
29bd7fbc SAS |
875 | cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", |
876 | mc_cpu_online, mc_cpu_down_prep); | |
8d86f390 | 877 | |
14cfbe55 | 878 | pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); |
8d86f390 | 879 | |
3e135d88 | 880 | return 0; |
bd399063 | 881 | |
3d8986bc BP |
882 | out_ucode_group: |
883 | sysfs_remove_group(&cpu_subsys.dev_root->kobj, | |
884 | &cpu_root_microcode_group); | |
885 | ||
886 | out_driver: | |
bd399063 SB |
887 | get_online_cpus(); |
888 | mutex_lock(µcode_mutex); | |
889 | ||
ff4b8a57 | 890 | subsys_interface_unregister(&mc_cpu_interface); |
bd399063 SB |
891 | |
892 | mutex_unlock(µcode_mutex); | |
893 | put_online_cpus(); | |
894 | ||
3d8986bc | 895 | out_pdev: |
bd399063 SB |
896 | platform_device_unregister(microcode_pdev); |
897 | return error; | |
898 | ||
3e135d88 | 899 | } |
4b703305 | 900 | fs_initcall(save_microcode_in_initrd); |
2d5be37d | 901 | late_initcall(microcode_init); |