2 * CPU Microcode Update Driver for Linux
4 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 * 2013-2016 Borislav Petkov <bp@alien8.de>
8 * X86 CPU microcode early update for Linux:
10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
11 * H Peter Anvin" <hpa@zytor.com>
12 * (C) 2015 Borislav Petkov <bp@alien8.de>
14 * This driver allows to upgrade microcode on x86 processors.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
22 #define pr_fmt(fmt) "microcode: " fmt
24 #include <linux/platform_device.h>
25 #include <linux/stop_machine.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/miscdevice.h>
28 #include <linux/capability.h>
29 #include <linux/firmware.h>
30 #include <linux/kernel.h>
31 #include <linux/delay.h>
32 #include <linux/mutex.h>
33 #include <linux/cpu.h>
34 #include <linux/nmi.h>
38 #include <asm/microcode_intel.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/microcode_amd.h>
41 #include <asm/perf_event.h>
42 #include <asm/microcode.h>
43 #include <asm/processor.h>
44 #include <asm/cmdline.h>
45 #include <asm/setup.h>
47 #define DRIVER_VERSION "2.2"
49 static struct microcode_ops
*microcode_ops
;
50 static bool dis_ucode_ldr
= true;
54 LIST_HEAD(microcode_cache
);
59 * All non cpu-hotplug-callback call sites use:
61 * - microcode_mutex to synchronize with each other;
62 * - get/put_online_cpus() to synchronize with
63 * the cpu-hotplug-callback call sites.
65 * We guarantee that only a single cpu is being
66 * updated at any particular moment of time.
68 static DEFINE_MUTEX(microcode_mutex
);
71 * Serialize late loading so that CPUs get updated one-by-one.
73 static DEFINE_SPINLOCK(update_lock
);
75 struct ucode_cpu_info ucode_cpu_info
[NR_CPUS
];
78 struct cpu_signature
*cpu_sig
;
83 * Those patch levels cannot be updated to newer ones and thus should be final.
85 static u32 final_levels
[] = {
89 0, /* T-101 terminator */
93 * Check the current patch level on this CPU.
96 * - true: if update should stop
99 static bool amd_check_current_patch_level(void)
104 native_rdmsr(MSR_AMD64_PATCH_LEVEL
, lvl
, dummy
);
106 if (IS_ENABLED(CONFIG_X86_32
))
107 levels
= (u32
*)__pa_nodebug(&final_levels
);
109 levels
= final_levels
;
111 for (i
= 0; levels
[i
]; i
++) {
112 if (lvl
== levels
[i
])
118 static bool __init
check_loader_disabled_bsp(void)
120 static const char *__dis_opt_str
= "dis_ucode_ldr";
123 const char *cmdline
= (const char *)__pa_nodebug(boot_command_line
);
124 const char *option
= (const char *)__pa_nodebug(__dis_opt_str
);
125 bool *res
= (bool *)__pa_nodebug(&dis_ucode_ldr
);
127 #else /* CONFIG_X86_64 */
128 const char *cmdline
= boot_command_line
;
129 const char *option
= __dis_opt_str
;
130 bool *res
= &dis_ucode_ldr
;
134 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
135 * completely accurate as xen pv guests don't see that CPUID bit set but
136 * that's good enough as they don't land on the BSP path anyway.
138 if (native_cpuid_ecx(1) & BIT(31))
141 if (x86_cpuid_vendor() == X86_VENDOR_AMD
) {
142 if (amd_check_current_patch_level())
146 if (cmdline_find_option_bool(cmdline
, option
) <= 0)
152 extern struct builtin_fw __start_builtin_fw
[];
153 extern struct builtin_fw __end_builtin_fw
[];
155 bool get_builtin_firmware(struct cpio_data
*cd
, const char *name
)
157 #ifdef CONFIG_FW_LOADER
158 struct builtin_fw
*b_fw
;
160 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++) {
161 if (!strcmp(name
, b_fw
->name
)) {
162 cd
->size
= b_fw
->size
;
163 cd
->data
= b_fw
->data
;
171 void __init
load_ucode_bsp(void)
173 unsigned int cpuid_1_eax
;
179 cpuid_1_eax
= native_cpuid_eax(1);
181 switch (x86_cpuid_vendor()) {
182 case X86_VENDOR_INTEL
:
183 if (x86_family(cpuid_1_eax
) < 6)
188 if (x86_family(cpuid_1_eax
) < 0x10)
197 if (check_loader_disabled_bsp())
201 load_ucode_intel_bsp();
203 load_ucode_amd_bsp(cpuid_1_eax
);
206 static bool check_loader_disabled_ap(void)
209 return *((bool *)__pa_nodebug(&dis_ucode_ldr
));
211 return dis_ucode_ldr
;
215 void load_ucode_ap(void)
217 unsigned int cpuid_1_eax
;
219 if (check_loader_disabled_ap())
222 cpuid_1_eax
= native_cpuid_eax(1);
224 switch (x86_cpuid_vendor()) {
225 case X86_VENDOR_INTEL
:
226 if (x86_family(cpuid_1_eax
) >= 6)
227 load_ucode_intel_ap();
230 if (x86_family(cpuid_1_eax
) >= 0x10)
231 load_ucode_amd_ap(cpuid_1_eax
);
238 static int __init
save_microcode_in_initrd(void)
240 struct cpuinfo_x86
*c
= &boot_cpu_data
;
243 switch (c
->x86_vendor
) {
244 case X86_VENDOR_INTEL
:
246 ret
= save_microcode_in_initrd_intel();
250 ret
= save_microcode_in_initrd_amd(cpuid_eax(1));
261 struct cpio_data
find_microcode_in_initrd(const char *path
, bool use_pa
)
263 #ifdef CONFIG_BLK_DEV_INITRD
264 unsigned long start
= 0;
268 struct boot_params
*params
;
271 params
= (struct boot_params
*)__pa_nodebug(&boot_params
);
273 params
= &boot_params
;
275 size
= params
->hdr
.ramdisk_size
;
278 * Set start only if we have an initrd image. We cannot use initrd_start
279 * because it is not set that early yet.
282 start
= params
->hdr
.ramdisk_image
;
284 # else /* CONFIG_X86_64 */
285 size
= (unsigned long)boot_params
.ext_ramdisk_size
<< 32;
286 size
|= boot_params
.hdr
.ramdisk_size
;
289 start
= (unsigned long)boot_params
.ext_ramdisk_image
<< 32;
290 start
|= boot_params
.hdr
.ramdisk_image
;
292 start
+= PAGE_OFFSET
;
297 * Fixup the start address: after reserve_initrd() runs, initrd_start
298 * has the virtual address of the beginning of the initrd. It also
299 * possibly relocates the ramdisk. In either case, initrd_start contains
300 * the updated address so use that instead.
302 * initrd_gone is for the hotplug case where we've thrown out initrd
307 return (struct cpio_data
){ NULL
, 0, "" };
309 start
= initrd_start
;
312 * The picture with physical addresses is a bit different: we
313 * need to get the *physical* address to which the ramdisk was
314 * relocated, i.e., relocated_ramdisk (not initrd_start) and
315 * since we're running from physical addresses, we need to access
316 * relocated_ramdisk through its *physical* address too.
318 u64
*rr
= (u64
*)__pa_nodebug(&relocated_ramdisk
);
323 return find_cpio_data(path
, (void *)start
, size
, NULL
);
324 #else /* !CONFIG_BLK_DEV_INITRD */
325 return (struct cpio_data
){ NULL
, 0, "" };
329 void reload_early_microcode(void)
333 vendor
= x86_cpuid_vendor();
334 family
= x86_cpuid_family();
337 case X86_VENDOR_INTEL
:
339 reload_ucode_intel();
350 static void collect_cpu_info_local(void *arg
)
352 struct cpu_info_ctx
*ctx
= arg
;
354 ctx
->err
= microcode_ops
->collect_cpu_info(smp_processor_id(),
358 static int collect_cpu_info_on_target(int cpu
, struct cpu_signature
*cpu_sig
)
360 struct cpu_info_ctx ctx
= { .cpu_sig
= cpu_sig
, .err
= 0 };
363 ret
= smp_call_function_single(cpu
, collect_cpu_info_local
, &ctx
, 1);
370 static int collect_cpu_info(int cpu
)
372 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
375 memset(uci
, 0, sizeof(*uci
));
377 ret
= collect_cpu_info_on_target(cpu
, &uci
->cpu_sig
);
384 static void apply_microcode_local(void *arg
)
386 enum ucode_state
*err
= arg
;
388 *err
= microcode_ops
->apply_microcode(smp_processor_id());
391 static int apply_microcode_on_target(int cpu
)
393 enum ucode_state err
;
396 ret
= smp_call_function_single(cpu
, apply_microcode_local
, &err
, 1);
398 if (err
== UCODE_ERROR
)
404 #ifdef CONFIG_MICROCODE_OLD_INTERFACE
405 static int do_microcode_update(const void __user
*buf
, size_t size
)
410 for_each_online_cpu(cpu
) {
411 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
412 enum ucode_state ustate
;
417 ustate
= microcode_ops
->request_microcode_user(cpu
, buf
, size
);
418 if (ustate
== UCODE_ERROR
) {
421 } else if (ustate
== UCODE_OK
)
422 apply_microcode_on_target(cpu
);
428 static int microcode_open(struct inode
*inode
, struct file
*file
)
430 return capable(CAP_SYS_RAWIO
) ? nonseekable_open(inode
, file
) : -EPERM
;
433 static ssize_t
microcode_write(struct file
*file
, const char __user
*buf
,
434 size_t len
, loff_t
*ppos
)
436 ssize_t ret
= -EINVAL
;
438 if ((len
>> PAGE_SHIFT
) > totalram_pages
) {
439 pr_err("too much data (max %ld pages)\n", totalram_pages
);
444 mutex_lock(µcode_mutex
);
446 if (do_microcode_update(buf
, len
) == 0)
450 perf_check_microcode();
452 mutex_unlock(µcode_mutex
);
458 static const struct file_operations microcode_fops
= {
459 .owner
= THIS_MODULE
,
460 .write
= microcode_write
,
461 .open
= microcode_open
,
465 static struct miscdevice microcode_dev
= {
466 .minor
= MICROCODE_MINOR
,
468 .nodename
= "cpu/microcode",
469 .fops
= µcode_fops
,
472 static int __init
microcode_dev_init(void)
476 error
= misc_register(µcode_dev
);
478 pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR
);
485 static void __exit
microcode_dev_exit(void)
487 misc_deregister(µcode_dev
);
490 #define microcode_dev_init() 0
491 #define microcode_dev_exit() do { } while (0)
494 /* fake device for request_firmware */
495 static struct platform_device
*microcode_pdev
;
498 * Late loading dance. Why the heavy-handed stomp_machine effort?
500 * - HT siblings must be idle and not execute other code while the other sibling
501 * is loading microcode in order to avoid any negative interactions caused by
504 * - In addition, microcode update on the cores must be serialized until this
505 * requirement can be relaxed in the future. Right now, this is conservative
508 #define SPINUNIT 100 /* 100 nsec */
510 static int check_online_cpus(void)
512 if (num_online_cpus() == num_present_cpus())
515 pr_err("Not all CPUs online, aborting microcode update.\n");
520 static atomic_t late_cpus_in
;
521 static atomic_t late_cpus_out
;
523 static int __wait_for_cpus(atomic_t
*t
, long long timeout
)
525 int all_cpus
= num_online_cpus();
529 while (atomic_read(t
) < all_cpus
) {
530 if (timeout
< SPINUNIT
) {
531 pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
532 all_cpus
- atomic_read(t
));
539 touch_nmi_watchdog();
548 * 1 - microcode was updated
550 static int __reload_late(void *info
)
552 int cpu
= smp_processor_id();
553 enum ucode_state err
;
557 * Wait for all CPUs to arrive. A load will not be attempted unless all
560 if (__wait_for_cpus(&late_cpus_in
, NSEC_PER_SEC
))
563 spin_lock(&update_lock
);
564 apply_microcode_local(&err
);
565 spin_unlock(&update_lock
);
567 if (err
> UCODE_NFOUND
) {
568 pr_warn("Error reloading microcode on CPU %d\n", cpu
);
570 /* siblings return UCODE_OK because their engine got updated already */
571 } else if (err
== UCODE_UPDATED
|| err
== UCODE_OK
) {
578 * Increase the wait timeout to a safe value here since we're
579 * serializing the microcode update and that could take a while on a
580 * large number of CPUs. And that is fine as the *actual* timeout will
581 * be determined by the last CPU finished updating and thus cut short.
583 if (__wait_for_cpus(&late_cpus_out
, NSEC_PER_SEC
* num_online_cpus()))
584 panic("Timeout during microcode update!\n");
590 * Reload microcode late on all CPUs. Wait for a sec until they
591 * all gather together.
593 static int microcode_reload_late(void)
597 atomic_set(&late_cpus_in
, 0);
598 atomic_set(&late_cpus_out
, 0);
600 ret
= stop_machine_cpuslocked(__reload_late
, NULL
, cpu_online_mask
);
607 static ssize_t
reload_store(struct device
*dev
,
608 struct device_attribute
*attr
,
609 const char *buf
, size_t size
)
611 enum ucode_state tmp_ret
= UCODE_OK
;
612 int bsp
= boot_cpu_data
.cpu_index
;
616 ret
= kstrtoul(buf
, 0, &val
);
623 tmp_ret
= microcode_ops
->request_microcode_fw(bsp
, µcode_pdev
->dev
, true);
624 if (tmp_ret
!= UCODE_NEW
)
629 ret
= check_online_cpus();
633 mutex_lock(µcode_mutex
);
634 ret
= microcode_reload_late();
635 mutex_unlock(µcode_mutex
);
646 static ssize_t
version_show(struct device
*dev
,
647 struct device_attribute
*attr
, char *buf
)
649 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ dev
->id
;
651 return sprintf(buf
, "0x%x\n", uci
->cpu_sig
.rev
);
654 static ssize_t
pf_show(struct device
*dev
,
655 struct device_attribute
*attr
, char *buf
)
657 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ dev
->id
;
659 return sprintf(buf
, "0x%x\n", uci
->cpu_sig
.pf
);
662 static DEVICE_ATTR_WO(reload
);
663 static DEVICE_ATTR(version
, 0400, version_show
, NULL
);
664 static DEVICE_ATTR(processor_flags
, 0400, pf_show
, NULL
);
666 static struct attribute
*mc_default_attrs
[] = {
667 &dev_attr_version
.attr
,
668 &dev_attr_processor_flags
.attr
,
672 static const struct attribute_group mc_attr_group
= {
673 .attrs
= mc_default_attrs
,
677 static void microcode_fini_cpu(int cpu
)
679 if (microcode_ops
->microcode_fini_cpu
)
680 microcode_ops
->microcode_fini_cpu(cpu
);
683 static enum ucode_state
microcode_resume_cpu(int cpu
)
685 if (apply_microcode_on_target(cpu
))
688 pr_debug("CPU%d updated upon resume\n", cpu
);
693 static enum ucode_state
microcode_init_cpu(int cpu
, bool refresh_fw
)
695 enum ucode_state ustate
;
696 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
701 if (collect_cpu_info(cpu
))
704 /* --dimm. Trigger a delayed update? */
705 if (system_state
!= SYSTEM_RUNNING
)
708 ustate
= microcode_ops
->request_microcode_fw(cpu
, µcode_pdev
->dev
, refresh_fw
);
709 if (ustate
== UCODE_NEW
) {
710 pr_debug("CPU%d updated upon init\n", cpu
);
711 apply_microcode_on_target(cpu
);
717 static enum ucode_state
microcode_update_cpu(int cpu
)
719 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
721 /* Refresh CPU microcode revision after resume. */
722 collect_cpu_info(cpu
);
725 return microcode_resume_cpu(cpu
);
727 return microcode_init_cpu(cpu
, false);
730 static int mc_device_add(struct device
*dev
, struct subsys_interface
*sif
)
732 int err
, cpu
= dev
->id
;
734 if (!cpu_online(cpu
))
737 pr_debug("CPU%d added\n", cpu
);
739 err
= sysfs_create_group(&dev
->kobj
, &mc_attr_group
);
743 if (microcode_init_cpu(cpu
, true) == UCODE_ERROR
)
749 static void mc_device_remove(struct device
*dev
, struct subsys_interface
*sif
)
753 if (!cpu_online(cpu
))
756 pr_debug("CPU%d removed\n", cpu
);
757 microcode_fini_cpu(cpu
);
758 sysfs_remove_group(&dev
->kobj
, &mc_attr_group
);
761 static struct subsys_interface mc_cpu_interface
= {
763 .subsys
= &cpu_subsys
,
764 .add_dev
= mc_device_add
,
765 .remove_dev
= mc_device_remove
,
769 * mc_bp_resume - Update boot CPU microcode during resume.
771 static void mc_bp_resume(void)
773 int cpu
= smp_processor_id();
774 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
776 if (uci
->valid
&& uci
->mc
)
777 microcode_ops
->apply_microcode(cpu
);
779 reload_early_microcode();
782 static struct syscore_ops mc_syscore_ops
= {
783 .resume
= mc_bp_resume
,
786 static int mc_cpu_online(unsigned int cpu
)
790 dev
= get_cpu_device(cpu
);
791 microcode_update_cpu(cpu
);
792 pr_debug("CPU%d added\n", cpu
);
794 if (sysfs_create_group(&dev
->kobj
, &mc_attr_group
))
795 pr_err("Failed to create group for CPU%d\n", cpu
);
799 static int mc_cpu_down_prep(unsigned int cpu
)
803 dev
= get_cpu_device(cpu
);
804 /* Suspend is in progress, only remove the interface */
805 sysfs_remove_group(&dev
->kobj
, &mc_attr_group
);
806 pr_debug("CPU%d removed\n", cpu
);
811 static struct attribute
*cpu_root_microcode_attrs
[] = {
812 &dev_attr_reload
.attr
,
816 static const struct attribute_group cpu_root_microcode_group
= {
818 .attrs
= cpu_root_microcode_attrs
,
821 int __init
microcode_init(void)
823 struct cpuinfo_x86
*c
= &boot_cpu_data
;
829 if (c
->x86_vendor
== X86_VENDOR_INTEL
)
830 microcode_ops
= init_intel_microcode();
831 else if (c
->x86_vendor
== X86_VENDOR_AMD
)
832 microcode_ops
= init_amd_microcode();
834 pr_err("no support for this CPU vendor\n");
839 microcode_pdev
= platform_device_register_simple("microcode", -1,
841 if (IS_ERR(microcode_pdev
))
842 return PTR_ERR(microcode_pdev
);
845 mutex_lock(µcode_mutex
);
847 error
= subsys_interface_register(&mc_cpu_interface
);
849 perf_check_microcode();
850 mutex_unlock(µcode_mutex
);
856 error
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
857 &cpu_root_microcode_group
);
860 pr_err("Error creating microcode group!\n");
864 error
= microcode_dev_init();
866 goto out_ucode_group
;
868 register_syscore_ops(&mc_syscore_ops
);
869 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN
, "x86/microcode:online",
870 mc_cpu_online
, mc_cpu_down_prep
);
872 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION
);
877 sysfs_remove_group(&cpu_subsys
.dev_root
->kobj
,
878 &cpu_root_microcode_group
);
882 mutex_lock(µcode_mutex
);
884 subsys_interface_unregister(&mc_cpu_interface
);
886 mutex_unlock(µcode_mutex
);
890 platform_device_unregister(microcode_pdev
);
894 fs_initcall(save_microcode_in_initrd
);
895 late_initcall(microcode_init
);