2 * Intel CPU Microcode Update Driver for Linux
4 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
7 * Intel CPU microcode early update for Linux
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
24 #define pr_fmt(fmt) "microcode: " fmt
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
36 #include <asm/microcode_intel.h>
37 #include <asm/intel-family.h>
38 #include <asm/processor.h>
39 #include <asm/tlbflush.h>
40 #include <asm/setup.h>
43 static const char ucode_path
[] = "kernel/x86/microcode/GenuineIntel.bin";
45 /* Current microcode patch used in early patching on the APs. */
46 static struct microcode_intel
*intel_ucode_patch
;
48 static inline bool cpu_signatures_match(unsigned int s1
, unsigned int p1
,
49 unsigned int s2
, unsigned int p2
)
54 /* Processor flags are either both 0 ... */
58 /* ... or they intersect. */
63 * Returns 1 if update has been found, 0 otherwise.
65 static int find_matching_signature(void *mc
, unsigned int csig
, int cpf
)
67 struct microcode_header_intel
*mc_hdr
= mc
;
68 struct extended_sigtable
*ext_hdr
;
69 struct extended_signature
*ext_sig
;
72 if (cpu_signatures_match(csig
, cpf
, mc_hdr
->sig
, mc_hdr
->pf
))
75 /* Look for ext. headers: */
76 if (get_totalsize(mc_hdr
) <= get_datasize(mc_hdr
) + MC_HEADER_SIZE
)
79 ext_hdr
= mc
+ get_datasize(mc_hdr
) + MC_HEADER_SIZE
;
80 ext_sig
= (void *)ext_hdr
+ EXT_HEADER_SIZE
;
82 for (i
= 0; i
< ext_hdr
->count
; i
++) {
83 if (cpu_signatures_match(csig
, cpf
, ext_sig
->sig
, ext_sig
->pf
))
91 * Returns 1 if update has been found, 0 otherwise.
93 static int has_newer_microcode(void *mc
, unsigned int csig
, int cpf
, int new_rev
)
95 struct microcode_header_intel
*mc_hdr
= mc
;
97 if (mc_hdr
->rev
<= new_rev
)
100 return find_matching_signature(mc
, csig
, cpf
);
104 * Given CPU signature and a microcode patch, this function finds if the
105 * microcode patch has matching family and model with the CPU.
107 * %true - if there's a match
110 static bool microcode_matches(struct microcode_header_intel
*mc_header
,
113 unsigned long total_size
= get_totalsize(mc_header
);
114 unsigned long data_size
= get_datasize(mc_header
);
115 struct extended_sigtable
*ext_header
;
116 unsigned int fam_ucode
, model_ucode
;
117 struct extended_signature
*ext_sig
;
118 unsigned int fam
, model
;
121 fam
= x86_family(sig
);
122 model
= x86_model(sig
);
124 fam_ucode
= x86_family(mc_header
->sig
);
125 model_ucode
= x86_model(mc_header
->sig
);
127 if (fam
== fam_ucode
&& model
== model_ucode
)
130 /* Look for ext. headers: */
131 if (total_size
<= data_size
+ MC_HEADER_SIZE
)
134 ext_header
= (void *) mc_header
+ data_size
+ MC_HEADER_SIZE
;
135 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
;
136 ext_sigcount
= ext_header
->count
;
138 for (i
= 0; i
< ext_sigcount
; i
++) {
139 fam_ucode
= x86_family(ext_sig
->sig
);
140 model_ucode
= x86_model(ext_sig
->sig
);
142 if (fam
== fam_ucode
&& model
== model_ucode
)
150 static struct ucode_patch
*memdup_patch(void *data
, unsigned int size
)
152 struct ucode_patch
*p
;
154 p
= kzalloc(sizeof(struct ucode_patch
), GFP_KERNEL
);
158 p
->data
= kmemdup(data
, size
, GFP_KERNEL
);
167 static void save_microcode_patch(void *data
, unsigned int size
)
169 struct microcode_header_intel
*mc_hdr
, *mc_saved_hdr
;
170 struct ucode_patch
*iter
, *tmp
, *p
= NULL
;
171 bool prev_found
= false;
172 unsigned int sig
, pf
;
174 mc_hdr
= (struct microcode_header_intel
*)data
;
176 list_for_each_entry_safe(iter
, tmp
, µcode_cache
, plist
) {
177 mc_saved_hdr
= (struct microcode_header_intel
*)iter
->data
;
178 sig
= mc_saved_hdr
->sig
;
179 pf
= mc_saved_hdr
->pf
;
181 if (find_matching_signature(data
, sig
, pf
)) {
184 if (mc_hdr
->rev
<= mc_saved_hdr
->rev
)
187 p
= memdup_patch(data
, size
);
189 pr_err("Error allocating buffer %p\n", data
);
191 list_replace(&iter
->plist
, &p
->plist
);
196 * There weren't any previous patches found in the list cache; save the
200 p
= memdup_patch(data
, size
);
202 pr_err("Error allocating buffer for %p\n", data
);
204 list_add_tail(&p
->plist
, µcode_cache
);
211 * Save for early loading. On 32-bit, that needs to be a physical
212 * address as the APs are running from physical addresses, before
213 * paging has been enabled.
215 if (IS_ENABLED(CONFIG_X86_32
))
216 intel_ucode_patch
= (struct microcode_intel
*)__pa_nodebug(p
->data
);
218 intel_ucode_patch
= p
->data
;
221 static int microcode_sanity_check(void *mc
, int print_err
)
223 unsigned long total_size
, data_size
, ext_table_size
;
224 struct microcode_header_intel
*mc_header
= mc
;
225 struct extended_sigtable
*ext_header
= NULL
;
226 u32 sum
, orig_sum
, ext_sigcount
= 0, i
;
227 struct extended_signature
*ext_sig
;
229 total_size
= get_totalsize(mc_header
);
230 data_size
= get_datasize(mc_header
);
232 if (data_size
+ MC_HEADER_SIZE
> total_size
) {
234 pr_err("Error: bad microcode data file size.\n");
238 if (mc_header
->ldrver
!= 1 || mc_header
->hdrver
!= 1) {
240 pr_err("Error: invalid/unknown microcode update format.\n");
244 ext_table_size
= total_size
- (MC_HEADER_SIZE
+ data_size
);
245 if (ext_table_size
) {
246 u32 ext_table_sum
= 0;
249 if ((ext_table_size
< EXT_HEADER_SIZE
)
250 || ((ext_table_size
- EXT_HEADER_SIZE
) % EXT_SIGNATURE_SIZE
)) {
252 pr_err("Error: truncated extended signature table.\n");
256 ext_header
= mc
+ MC_HEADER_SIZE
+ data_size
;
257 if (ext_table_size
!= exttable_size(ext_header
)) {
259 pr_err("Error: extended signature table size mismatch.\n");
263 ext_sigcount
= ext_header
->count
;
266 * Check extended table checksum: the sum of all dwords that
267 * comprise a valid table must be 0.
269 ext_tablep
= (u32
*)ext_header
;
271 i
= ext_table_size
/ sizeof(u32
);
273 ext_table_sum
+= ext_tablep
[i
];
277 pr_warn("Bad extended signature table checksum, aborting.\n");
283 * Calculate the checksum of update data and header. The checksum of
284 * valid update data and header including the extended signature table
288 i
= (MC_HEADER_SIZE
+ data_size
) / sizeof(u32
);
290 orig_sum
+= ((u32
*)mc
)[i
];
294 pr_err("Bad microcode data checksum, aborting.\n");
302 * Check extended signature checksum: 0 => valid.
304 for (i
= 0; i
< ext_sigcount
; i
++) {
305 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
+
306 EXT_SIGNATURE_SIZE
* i
;
308 sum
= (mc_header
->sig
+ mc_header
->pf
+ mc_header
->cksum
) -
309 (ext_sig
->sig
+ ext_sig
->pf
+ ext_sig
->cksum
);
312 pr_err("Bad extended signature checksum, aborting.\n");
320 * Get microcode matching with BSP's model. Only CPUs with the same model as
321 * BSP can stay in the platform.
323 static struct microcode_intel
*
324 scan_microcode(void *data
, size_t size
, struct ucode_cpu_info
*uci
, bool save
)
326 struct microcode_header_intel
*mc_header
;
327 struct microcode_intel
*patch
= NULL
;
328 unsigned int mc_size
;
331 if (size
< sizeof(struct microcode_header_intel
))
334 mc_header
= (struct microcode_header_intel
*)data
;
336 mc_size
= get_totalsize(mc_header
);
339 microcode_sanity_check(data
, 0) < 0)
344 if (!microcode_matches(mc_header
, uci
->cpu_sig
.sig
)) {
350 save_microcode_patch(data
, mc_size
);
356 if (!has_newer_microcode(data
,
363 struct microcode_header_intel
*phdr
= &patch
->hdr
;
365 if (!has_newer_microcode(data
,
372 /* We have a newer patch, save it. */
385 static int collect_cpu_info_early(struct ucode_cpu_info
*uci
)
388 unsigned int family
, model
;
389 struct cpu_signature csig
= { 0 };
390 unsigned int eax
, ebx
, ecx
, edx
;
392 memset(uci
, 0, sizeof(*uci
));
396 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
399 family
= x86_family(eax
);
400 model
= x86_model(eax
);
402 if ((model
>= 5) || (family
> 6)) {
403 /* get processor flags from MSR 0x17 */
404 native_rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
405 csig
.pf
= 1 << ((val
[1] >> 18) & 7);
408 csig
.rev
= intel_get_microcode_revision();
416 static void show_saved_mc(void)
420 unsigned int sig
, pf
, rev
, total_size
, data_size
, date
;
421 struct ucode_cpu_info uci
;
422 struct ucode_patch
*p
;
424 if (list_empty(µcode_cache
)) {
425 pr_debug("no microcode data saved.\n");
429 collect_cpu_info_early(&uci
);
431 sig
= uci
.cpu_sig
.sig
;
433 rev
= uci
.cpu_sig
.rev
;
434 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig
, pf
, rev
);
436 list_for_each_entry(p
, µcode_cache
, plist
) {
437 struct microcode_header_intel
*mc_saved_header
;
438 struct extended_sigtable
*ext_header
;
439 struct extended_signature
*ext_sig
;
442 mc_saved_header
= (struct microcode_header_intel
*)p
->data
;
444 sig
= mc_saved_header
->sig
;
445 pf
= mc_saved_header
->pf
;
446 rev
= mc_saved_header
->rev
;
447 date
= mc_saved_header
->date
;
449 total_size
= get_totalsize(mc_saved_header
);
450 data_size
= get_datasize(mc_saved_header
);
452 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
453 i
++, sig
, pf
, rev
, total_size
,
456 (date
>> 16) & 0xff);
458 /* Look for ext. headers: */
459 if (total_size
<= data_size
+ MC_HEADER_SIZE
)
462 ext_header
= (void *)mc_saved_header
+ data_size
+ MC_HEADER_SIZE
;
463 ext_sigcount
= ext_header
->count
;
464 ext_sig
= (void *)ext_header
+ EXT_HEADER_SIZE
;
466 for (j
= 0; j
< ext_sigcount
; j
++) {
470 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
480 * Save this microcode patch. It will be loaded early when a CPU is
481 * hot-added or resumes.
483 static void save_mc_for_early(u8
*mc
, unsigned int size
)
485 #ifdef CONFIG_HOTPLUG_CPU
486 /* Synchronization during CPU hotplug. */
487 static DEFINE_MUTEX(x86_cpu_microcode_mutex
);
489 mutex_lock(&x86_cpu_microcode_mutex
);
491 save_microcode_patch(mc
, size
);
494 mutex_unlock(&x86_cpu_microcode_mutex
);
498 static bool load_builtin_intel_microcode(struct cpio_data
*cp
)
500 unsigned int eax
= 1, ebx
, ecx
= 0, edx
;
503 if (IS_ENABLED(CONFIG_X86_32
))
506 native_cpuid(&eax
, &ebx
, &ecx
, &edx
);
508 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
509 x86_family(eax
), x86_model(eax
), x86_stepping(eax
));
511 return get_builtin_firmware(cp
, name
);
515 * Print ucode update info.
518 print_ucode_info(struct ucode_cpu_info
*uci
, unsigned int date
)
520 pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
524 (date
>> 16) & 0xff);
529 static int delay_ucode_info
;
530 static int current_mc_date
;
533 * Print early updated ucode info after printk works. This is delayed info dump.
535 void show_ucode_info_early(void)
537 struct ucode_cpu_info uci
;
539 if (delay_ucode_info
) {
540 collect_cpu_info_early(&uci
);
541 print_ucode_info(&uci
, current_mc_date
);
542 delay_ucode_info
= 0;
547 * At this point, we can not call printk() yet. Delay printing microcode info in
548 * show_ucode_info_early() until printk() works.
550 static void print_ucode(struct ucode_cpu_info
*uci
)
552 struct microcode_intel
*mc
;
553 int *delay_ucode_info_p
;
554 int *current_mc_date_p
;
560 delay_ucode_info_p
= (int *)__pa_nodebug(&delay_ucode_info
);
561 current_mc_date_p
= (int *)__pa_nodebug(¤t_mc_date
);
563 *delay_ucode_info_p
= 1;
564 *current_mc_date_p
= mc
->hdr
.date
;
568 static inline void print_ucode(struct ucode_cpu_info
*uci
)
570 struct microcode_intel
*mc
;
576 print_ucode_info(uci
, mc
->hdr
.date
);
580 static int apply_microcode_early(struct ucode_cpu_info
*uci
, bool early
)
582 struct microcode_intel
*mc
;
589 /* write microcode via MSR 0x79 */
590 native_wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
592 rev
= intel_get_microcode_revision();
593 if (rev
!= mc
->hdr
.rev
)
596 uci
->cpu_sig
.rev
= rev
;
601 print_ucode_info(uci
, mc
->hdr
.date
);
606 int __init
save_microcode_in_initrd_intel(void)
608 struct ucode_cpu_info uci
;
612 * initrd is going away, clear patch ptr. We will scan the microcode one
613 * last time before jettisoning and save a patch, if found. Then we will
614 * update that pointer too, with a stable patch address to use when
615 * resuming the cores.
617 intel_ucode_patch
= NULL
;
619 if (!load_builtin_intel_microcode(&cp
))
620 cp
= find_microcode_in_initrd(ucode_path
, false);
622 if (!(cp
.data
&& cp
.size
))
625 collect_cpu_info_early(&uci
);
627 scan_microcode(cp
.data
, cp
.size
, &uci
, true);
635 * @res_patch, output: a pointer to the patch we found.
637 static struct microcode_intel
*__load_ucode_intel(struct ucode_cpu_info
*uci
)
639 static const char *path
;
643 if (IS_ENABLED(CONFIG_X86_32
)) {
644 path
= (const char *)__pa_nodebug(ucode_path
);
651 /* try built-in microcode first */
652 if (!load_builtin_intel_microcode(&cp
))
653 cp
= find_microcode_in_initrd(path
, use_pa
);
655 if (!(cp
.data
&& cp
.size
))
658 collect_cpu_info_early(uci
);
660 return scan_microcode(cp
.data
, cp
.size
, uci
, false);
663 void __init
load_ucode_intel_bsp(void)
665 struct microcode_intel
*patch
;
666 struct ucode_cpu_info uci
;
668 patch
= __load_ucode_intel(&uci
);
674 apply_microcode_early(&uci
, true);
677 void load_ucode_intel_ap(void)
679 struct microcode_intel
*patch
, **iup
;
680 struct ucode_cpu_info uci
;
682 if (IS_ENABLED(CONFIG_X86_32
))
683 iup
= (struct microcode_intel
**) __pa_nodebug(&intel_ucode_patch
);
685 iup
= &intel_ucode_patch
;
689 patch
= __load_ucode_intel(&uci
);
698 if (apply_microcode_early(&uci
, true)) {
699 /* Mixed-silicon system? Try to refetch the proper patch: */
706 static struct microcode_intel
*find_patch(struct ucode_cpu_info
*uci
)
708 struct microcode_header_intel
*phdr
;
709 struct ucode_patch
*iter
, *tmp
;
711 list_for_each_entry_safe(iter
, tmp
, µcode_cache
, plist
) {
713 phdr
= (struct microcode_header_intel
*)iter
->data
;
715 if (phdr
->rev
<= uci
->cpu_sig
.rev
)
718 if (!find_matching_signature(phdr
,
728 void reload_ucode_intel(void)
730 struct microcode_intel
*p
;
731 struct ucode_cpu_info uci
;
733 collect_cpu_info_early(&uci
);
735 p
= find_patch(&uci
);
741 apply_microcode_early(&uci
, false);
744 static int collect_cpu_info(int cpu_num
, struct cpu_signature
*csig
)
746 static struct cpu_signature prev
;
747 struct cpuinfo_x86
*c
= &cpu_data(cpu_num
);
750 memset(csig
, 0, sizeof(*csig
));
752 csig
->sig
= cpuid_eax(0x00000001);
754 if ((c
->x86_model
>= 5) || (c
->x86
> 6)) {
755 /* get processor flags from MSR 0x17 */
756 rdmsr(MSR_IA32_PLATFORM_ID
, val
[0], val
[1]);
757 csig
->pf
= 1 << ((val
[1] >> 18) & 7);
760 csig
->rev
= c
->microcode
;
762 /* No extra locking on prev, races are harmless. */
763 if (csig
->sig
!= prev
.sig
|| csig
->pf
!= prev
.pf
|| csig
->rev
!= prev
.rev
) {
764 pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
765 csig
->sig
, csig
->pf
, csig
->rev
);
772 static int apply_microcode_intel(int cpu
)
774 struct microcode_intel
*mc
;
775 struct ucode_cpu_info
*uci
;
776 struct cpuinfo_x86
*c
;
780 /* We should bind the task to the CPU */
781 if (WARN_ON(raw_smp_processor_id() != cpu
))
784 uci
= ucode_cpu_info
+ cpu
;
787 /* Look for a newer patch in our cache: */
788 mc
= find_patch(uci
);
793 /* write microcode via MSR 0x79 */
794 wrmsrl(MSR_IA32_UCODE_WRITE
, (unsigned long)mc
->bits
);
796 rev
= intel_get_microcode_revision();
798 if (rev
!= mc
->hdr
.rev
) {
799 pr_err("CPU%d update to revision 0x%x failed\n",
804 if (rev
!= prev_rev
) {
805 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
807 mc
->hdr
.date
& 0xffff,
809 (mc
->hdr
.date
>> 16) & 0xff);
815 uci
->cpu_sig
.rev
= rev
;
821 static enum ucode_state
generic_load_microcode(int cpu
, void *data
, size_t size
,
822 int (*get_ucode_data
)(void *, const void *, size_t))
824 struct ucode_cpu_info
*uci
= ucode_cpu_info
+ cpu
;
825 u8
*ucode_ptr
= data
, *new_mc
= NULL
, *mc
= NULL
;
826 int new_rev
= uci
->cpu_sig
.rev
;
827 unsigned int leftover
= size
;
828 unsigned int curr_mc_size
= 0, new_mc_size
= 0;
829 unsigned int csig
, cpf
;
832 struct microcode_header_intel mc_header
;
833 unsigned int mc_size
;
835 if (leftover
< sizeof(mc_header
)) {
836 pr_err("error! Truncated header in microcode data file\n");
840 if (get_ucode_data(&mc_header
, ucode_ptr
, sizeof(mc_header
)))
843 mc_size
= get_totalsize(&mc_header
);
844 if (!mc_size
|| mc_size
> leftover
) {
845 pr_err("error! Bad data in microcode data file\n");
849 /* For performance reasons, reuse mc area when possible */
850 if (!mc
|| mc_size
> curr_mc_size
) {
852 mc
= vmalloc(mc_size
);
855 curr_mc_size
= mc_size
;
858 if (get_ucode_data(mc
, ucode_ptr
, mc_size
) ||
859 microcode_sanity_check(mc
, 1) < 0) {
863 csig
= uci
->cpu_sig
.sig
;
864 cpf
= uci
->cpu_sig
.pf
;
865 if (has_newer_microcode(mc
, csig
, cpf
, new_rev
)) {
867 new_rev
= mc_header
.rev
;
869 new_mc_size
= mc_size
;
870 mc
= NULL
; /* trigger new vmalloc */
873 ucode_ptr
+= mc_size
;
888 uci
->mc
= (struct microcode_intel
*)new_mc
;
891 * If early loading microcode is supported, save this mc into
892 * permanent memory. So it will be loaded early when a CPU is hot added
895 save_mc_for_early(new_mc
, new_mc_size
);
897 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
898 cpu
, new_rev
, uci
->cpu_sig
.rev
);
903 static int get_ucode_fw(void *to
, const void *from
, size_t n
)
909 static bool is_blacklisted(unsigned int cpu
)
911 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
913 if (c
->x86
== 6 && c
->x86_model
== INTEL_FAM6_BROADWELL_X
) {
914 pr_err_once("late loading on model 79 is disabled.\n");
921 static enum ucode_state
request_microcode_fw(int cpu
, struct device
*device
,
925 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
926 const struct firmware
*firmware
;
927 enum ucode_state ret
;
929 if (is_blacklisted(cpu
))
932 sprintf(name
, "intel-ucode/%02x-%02x-%02x",
933 c
->x86
, c
->x86_model
, c
->x86_mask
);
935 if (request_firmware_direct(&firmware
, name
, device
)) {
936 pr_debug("data file %s load failed\n", name
);
940 ret
= generic_load_microcode(cpu
, (void *)firmware
->data
,
941 firmware
->size
, &get_ucode_fw
);
943 release_firmware(firmware
);
948 static int get_ucode_user(void *to
, const void *from
, size_t n
)
950 return copy_from_user(to
, from
, n
);
953 static enum ucode_state
954 request_microcode_user(int cpu
, const void __user
*buf
, size_t size
)
956 if (is_blacklisted(cpu
))
959 return generic_load_microcode(cpu
, (void *)buf
, size
, &get_ucode_user
);
962 static struct microcode_ops microcode_intel_ops
= {
963 .request_microcode_user
= request_microcode_user
,
964 .request_microcode_fw
= request_microcode_fw
,
965 .collect_cpu_info
= collect_cpu_info
,
966 .apply_microcode
= apply_microcode_intel
,
969 struct microcode_ops
* __init
init_intel_microcode(void)
971 struct cpuinfo_x86
*c
= &boot_cpu_data
;
973 if (c
->x86_vendor
!= X86_VENDOR_INTEL
|| c
->x86
< 6 ||
974 cpu_has(c
, X86_FEATURE_IA64
)) {
975 pr_err("Intel CPU family 0x%x not supported\n", c
->x86
);
979 return µcode_intel_ops
;