]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
6b44e72a | 3 | * Intel CPU Microcode Update Driver for Linux |
1da177e4 | 4 | * |
cea58224 | 5 | * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> |
6b44e72a | 6 | * 2006 Shaohua Li <shaohua.li@intel.com> |
1da177e4 | 7 | * |
fe055896 BP |
8 | * Intel CPU microcode early update for Linux |
9 | * | |
10 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | |
11 | * H Peter Anvin" <hpa@zytor.com> | |
1da177e4 | 12 | */ |
f58e1f53 | 13 | |
fe055896 BP |
14 | /* |
15 | * This needs to be before all headers so that pr_debug in printk.h doesn't turn | |
16 | * printk calls into no_printk(). | |
17 | * | |
18 | *#define DEBUG | |
19 | */ | |
6b26e1bf | 20 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 21 | |
fe055896 | 22 | #include <linux/earlycpio.h> |
4bae1967 | 23 | #include <linux/firmware.h> |
4bae1967 | 24 | #include <linux/uaccess.h> |
fe055896 BP |
25 | #include <linux/vmalloc.h> |
26 | #include <linux/initrd.h> | |
4bae1967 | 27 | #include <linux/kernel.h> |
fe055896 BP |
28 | #include <linux/slab.h> |
29 | #include <linux/cpu.h> | |
7e94a7b6 | 30 | #include <linux/uio.h> |
fe055896 | 31 | #include <linux/mm.h> |
1da177e4 | 32 | |
9cd4d78e | 33 | #include <asm/microcode_intel.h> |
723f2828 | 34 | #include <asm/intel-family.h> |
4bae1967 | 35 | #include <asm/processor.h> |
fe055896 BP |
36 | #include <asm/tlbflush.h> |
37 | #include <asm/setup.h> | |
4bae1967 | 38 | #include <asm/msr.h> |
1da177e4 | 39 | |
06b8534c | 40 | static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; |
fe055896 | 41 | |
c26665ab | 42 | /* Current microcode patch used in early patching on the APs. */ |
d7f7dc7b | 43 | static struct microcode_intel *intel_ucode_patch; |
6c545647 | 44 | |
7e702d17 JZ |
45 | /* last level cache size per core */ |
46 | static int llc_size_per_core; | |
47 | ||
8027923a BP |
48 | static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, |
49 | unsigned int s2, unsigned int p2) | |
50 | { | |
51 | if (s1 != s2) | |
52 | return false; | |
53 | ||
54 | /* Processor flags are either both 0 ... */ | |
55 | if (!p1 && !p2) | |
56 | return true; | |
57 | ||
58 | /* ... or they intersect. */ | |
59 | return p1 & p2; | |
60 | } | |
61 | ||
62 | /* | |
63 | * Returns 1 if update has been found, 0 otherwise. | |
64 | */ | |
65 | static int find_matching_signature(void *mc, unsigned int csig, int cpf) | |
66 | { | |
67 | struct microcode_header_intel *mc_hdr = mc; | |
68 | struct extended_sigtable *ext_hdr; | |
69 | struct extended_signature *ext_sig; | |
70 | int i; | |
71 | ||
72 | if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) | |
73 | return 1; | |
74 | ||
75 | /* Look for ext. headers: */ | |
76 | if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) | |
77 | return 0; | |
78 | ||
79 | ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; | |
80 | ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; | |
81 | ||
82 | for (i = 0; i < ext_hdr->count; i++) { | |
83 | if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) | |
84 | return 1; | |
85 | ext_sig++; | |
86 | } | |
87 | return 0; | |
88 | } | |
89 | ||
90 | /* | |
91 | * Returns 1 if update has been found, 0 otherwise. | |
92 | */ | |
93 | static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) | |
94 | { | |
95 | struct microcode_header_intel *mc_hdr = mc; | |
96 | ||
97 | if (mc_hdr->rev <= new_rev) | |
98 | return 0; | |
99 | ||
100 | return find_matching_signature(mc, csig, cpf); | |
101 | } | |
102 | ||
aa78c1cc | 103 | static struct ucode_patch *memdup_patch(void *data, unsigned int size) |
fe055896 | 104 | { |
06b8534c | 105 | struct ucode_patch *p; |
fe055896 | 106 | |
9fcf5ba2 | 107 | p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); |
06b8534c | 108 | if (!p) |
aa78c1cc | 109 | return NULL; |
fe055896 | 110 | |
06b8534c BP |
111 | p->data = kmemdup(data, size, GFP_KERNEL); |
112 | if (!p->data) { | |
113 | kfree(p); | |
aa78c1cc | 114 | return NULL; |
fe055896 BP |
115 | } |
116 | ||
06b8534c | 117 | return p; |
fe055896 BP |
118 | } |
119 | ||
2d54711d | 120 | static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) |
fe055896 BP |
121 | { |
122 | struct microcode_header_intel *mc_hdr, *mc_saved_hdr; | |
bd207330 | 123 | struct ucode_patch *iter, *tmp, *p = NULL; |
06b8534c | 124 | bool prev_found = false; |
fe055896 | 125 | unsigned int sig, pf; |
fe055896 | 126 | |
06b8534c | 127 | mc_hdr = (struct microcode_header_intel *)data; |
fe055896 | 128 | |
06b8534c BP |
129 | list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { |
130 | mc_saved_hdr = (struct microcode_header_intel *)iter->data; | |
fe055896 BP |
131 | sig = mc_saved_hdr->sig; |
132 | pf = mc_saved_hdr->pf; | |
133 | ||
06b8534c BP |
134 | if (find_matching_signature(data, sig, pf)) { |
135 | prev_found = true; | |
fe055896 | 136 | |
06b8534c BP |
137 | if (mc_hdr->rev <= mc_saved_hdr->rev) |
138 | continue; | |
fe055896 | 139 | |
aa78c1cc BP |
140 | p = memdup_patch(data, size); |
141 | if (!p) | |
06b8534c | 142 | pr_err("Error allocating buffer %p\n", data); |
0218c766 | 143 | else { |
06b8534c | 144 | list_replace(&iter->plist, &p->plist); |
0218c766 ZD |
145 | kfree(iter->data); |
146 | kfree(iter); | |
147 | } | |
06b8534c | 148 | } |
fe055896 BP |
149 | } |
150 | ||
06b8534c BP |
151 | /* |
152 | * There weren't any previous patches found in the list cache; save the | |
153 | * newly found. | |
154 | */ | |
155 | if (!prev_found) { | |
aa78c1cc BP |
156 | p = memdup_patch(data, size); |
157 | if (!p) | |
06b8534c BP |
158 | pr_err("Error allocating buffer for %p\n", data); |
159 | else | |
160 | list_add_tail(&p->plist, µcode_cache); | |
161 | } | |
bd207330 | 162 | |
aa78c1cc BP |
163 | if (!p) |
164 | return; | |
165 | ||
2d54711d CY |
166 | if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) |
167 | return; | |
168 | ||
bd207330 BP |
169 | /* |
170 | * Save for early loading. On 32-bit, that needs to be a physical | |
171 | * address as the APs are running from physical addresses, before | |
172 | * paging has been enabled. | |
173 | */ | |
aa78c1cc BP |
174 | if (IS_ENABLED(CONFIG_X86_32)) |
175 | intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); | |
176 | else | |
177 | intel_ucode_patch = p->data; | |
fe055896 BP |
178 | } |
179 | ||
8027923a BP |
180 | static int microcode_sanity_check(void *mc, int print_err) |
181 | { | |
182 | unsigned long total_size, data_size, ext_table_size; | |
183 | struct microcode_header_intel *mc_header = mc; | |
184 | struct extended_sigtable *ext_header = NULL; | |
185 | u32 sum, orig_sum, ext_sigcount = 0, i; | |
186 | struct extended_signature *ext_sig; | |
187 | ||
188 | total_size = get_totalsize(mc_header); | |
189 | data_size = get_datasize(mc_header); | |
190 | ||
191 | if (data_size + MC_HEADER_SIZE > total_size) { | |
192 | if (print_err) | |
193 | pr_err("Error: bad microcode data file size.\n"); | |
194 | return -EINVAL; | |
195 | } | |
196 | ||
197 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | |
198 | if (print_err) | |
199 | pr_err("Error: invalid/unknown microcode update format.\n"); | |
200 | return -EINVAL; | |
201 | } | |
202 | ||
203 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | |
204 | if (ext_table_size) { | |
205 | u32 ext_table_sum = 0; | |
206 | u32 *ext_tablep; | |
207 | ||
208 | if ((ext_table_size < EXT_HEADER_SIZE) | |
209 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | |
210 | if (print_err) | |
211 | pr_err("Error: truncated extended signature table.\n"); | |
212 | return -EINVAL; | |
213 | } | |
214 | ||
215 | ext_header = mc + MC_HEADER_SIZE + data_size; | |
216 | if (ext_table_size != exttable_size(ext_header)) { | |
217 | if (print_err) | |
218 | pr_err("Error: extended signature table size mismatch.\n"); | |
219 | return -EFAULT; | |
220 | } | |
221 | ||
222 | ext_sigcount = ext_header->count; | |
223 | ||
224 | /* | |
225 | * Check extended table checksum: the sum of all dwords that | |
226 | * comprise a valid table must be 0. | |
227 | */ | |
228 | ext_tablep = (u32 *)ext_header; | |
229 | ||
230 | i = ext_table_size / sizeof(u32); | |
231 | while (i--) | |
232 | ext_table_sum += ext_tablep[i]; | |
233 | ||
234 | if (ext_table_sum) { | |
235 | if (print_err) | |
236 | pr_warn("Bad extended signature table checksum, aborting.\n"); | |
237 | return -EINVAL; | |
238 | } | |
239 | } | |
240 | ||
241 | /* | |
242 | * Calculate the checksum of update data and header. The checksum of | |
243 | * valid update data and header including the extended signature table | |
244 | * must be 0. | |
245 | */ | |
246 | orig_sum = 0; | |
247 | i = (MC_HEADER_SIZE + data_size) / sizeof(u32); | |
248 | while (i--) | |
249 | orig_sum += ((u32 *)mc)[i]; | |
250 | ||
251 | if (orig_sum) { | |
252 | if (print_err) | |
253 | pr_err("Bad microcode data checksum, aborting.\n"); | |
254 | return -EINVAL; | |
255 | } | |
256 | ||
257 | if (!ext_table_size) | |
258 | return 0; | |
259 | ||
260 | /* | |
261 | * Check extended signature checksum: 0 => valid. | |
262 | */ | |
263 | for (i = 0; i < ext_sigcount; i++) { | |
264 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE + | |
265 | EXT_SIGNATURE_SIZE * i; | |
266 | ||
267 | sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - | |
268 | (ext_sig->sig + ext_sig->pf + ext_sig->cksum); | |
269 | if (sum) { | |
270 | if (print_err) | |
271 | pr_err("Bad extended signature checksum, aborting.\n"); | |
272 | return -EINVAL; | |
273 | } | |
274 | } | |
275 | return 0; | |
276 | } | |
277 | ||
fe055896 BP |
278 | /* |
279 | * Get microcode matching with BSP's model. Only CPUs with the same model as | |
280 | * BSP can stay in the platform. | |
281 | */ | |
06b8534c BP |
282 | static struct microcode_intel * |
283 | scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) | |
fe055896 | 284 | { |
f96fde53 | 285 | struct microcode_header_intel *mc_header; |
06b8534c | 286 | struct microcode_intel *patch = NULL; |
f96fde53 | 287 | unsigned int mc_size; |
fe055896 | 288 | |
06b8534c BP |
289 | while (size) { |
290 | if (size < sizeof(struct microcode_header_intel)) | |
fe055896 BP |
291 | break; |
292 | ||
06b8534c | 293 | mc_header = (struct microcode_header_intel *)data; |
fe055896 BP |
294 | |
295 | mc_size = get_totalsize(mc_header); | |
06b8534c BP |
296 | if (!mc_size || |
297 | mc_size > size || | |
298 | microcode_sanity_check(data, 0) < 0) | |
fe055896 BP |
299 | break; |
300 | ||
06b8534c | 301 | size -= mc_size; |
fe055896 | 302 | |
2d54711d CY |
303 | if (!find_matching_signature(data, uci->cpu_sig.sig, |
304 | uci->cpu_sig.pf)) { | |
06b8534c | 305 | data += mc_size; |
fe055896 BP |
306 | continue; |
307 | } | |
308 | ||
06b8534c | 309 | if (save) { |
2d54711d | 310 | save_microcode_patch(uci, data, mc_size); |
06b8534c BP |
311 | goto next; |
312 | } | |
fe055896 | 313 | |
fe055896 | 314 | |
06b8534c BP |
315 | if (!patch) { |
316 | if (!has_newer_microcode(data, | |
317 | uci->cpu_sig.sig, | |
318 | uci->cpu_sig.pf, | |
319 | uci->cpu_sig.rev)) | |
320 | goto next; | |
fe055896 | 321 | |
06b8534c BP |
322 | } else { |
323 | struct microcode_header_intel *phdr = &patch->hdr; | |
324 | ||
325 | if (!has_newer_microcode(data, | |
326 | phdr->sig, | |
327 | phdr->pf, | |
328 | phdr->rev)) | |
329 | goto next; | |
330 | } | |
fe055896 | 331 | |
06b8534c BP |
332 | /* We have a newer patch, save it. */ |
333 | patch = data; | |
fe055896 | 334 | |
06b8534c BP |
335 | next: |
336 | data += mc_size; | |
337 | } | |
f96fde53 | 338 | |
06b8534c BP |
339 | if (size) |
340 | return NULL; | |
341 | ||
342 | return patch; | |
fe055896 BP |
343 | } |
344 | ||
345 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | |
346 | { | |
347 | unsigned int val[2]; | |
348 | unsigned int family, model; | |
06b8534c | 349 | struct cpu_signature csig = { 0 }; |
fe055896 BP |
350 | unsigned int eax, ebx, ecx, edx; |
351 | ||
fe055896 BP |
352 | memset(uci, 0, sizeof(*uci)); |
353 | ||
354 | eax = 0x00000001; | |
355 | ecx = 0; | |
356 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
357 | csig.sig = eax; | |
358 | ||
06b8534c BP |
359 | family = x86_family(eax); |
360 | model = x86_model(eax); | |
fe055896 BP |
361 | |
362 | if ((model >= 5) || (family > 6)) { | |
363 | /* get processor flags from MSR 0x17 */ | |
364 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | |
365 | csig.pf = 1 << ((val[1] >> 18) & 7); | |
366 | } | |
fe055896 | 367 | |
4167709b | 368 | csig.rev = intel_get_microcode_revision(); |
fe055896 BP |
369 | |
370 | uci->cpu_sig = csig; | |
371 | uci->valid = 1; | |
372 | ||
373 | return 0; | |
374 | } | |
375 | ||
fe055896 BP |
376 | static void show_saved_mc(void) |
377 | { | |
c595ac2b | 378 | #ifdef DEBUG |
06b8534c | 379 | int i = 0, j; |
fe055896 BP |
380 | unsigned int sig, pf, rev, total_size, data_size, date; |
381 | struct ucode_cpu_info uci; | |
06b8534c | 382 | struct ucode_patch *p; |
fe055896 | 383 | |
06b8534c | 384 | if (list_empty(µcode_cache)) { |
fe055896 BP |
385 | pr_debug("no microcode data saved.\n"); |
386 | return; | |
387 | } | |
fe055896 BP |
388 | |
389 | collect_cpu_info_early(&uci); | |
390 | ||
06b8534c BP |
391 | sig = uci.cpu_sig.sig; |
392 | pf = uci.cpu_sig.pf; | |
393 | rev = uci.cpu_sig.rev; | |
fe055896 BP |
394 | pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); |
395 | ||
06b8534c | 396 | list_for_each_entry(p, µcode_cache, plist) { |
fe055896 BP |
397 | struct microcode_header_intel *mc_saved_header; |
398 | struct extended_sigtable *ext_header; | |
fe055896 | 399 | struct extended_signature *ext_sig; |
06b8534c BP |
400 | int ext_sigcount; |
401 | ||
402 | mc_saved_header = (struct microcode_header_intel *)p->data; | |
403 | ||
404 | sig = mc_saved_header->sig; | |
405 | pf = mc_saved_header->pf; | |
406 | rev = mc_saved_header->rev; | |
407 | date = mc_saved_header->date; | |
fe055896 | 408 | |
06b8534c BP |
409 | total_size = get_totalsize(mc_saved_header); |
410 | data_size = get_datasize(mc_saved_header); | |
fe055896 | 411 | |
c19ca6cb | 412 | pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", |
06b8534c | 413 | i++, sig, pf, rev, total_size, |
fe055896 BP |
414 | date & 0xffff, |
415 | date >> 24, | |
416 | (date >> 16) & 0xff); | |
417 | ||
418 | /* Look for ext. headers: */ | |
419 | if (total_size <= data_size + MC_HEADER_SIZE) | |
420 | continue; | |
421 | ||
06b8534c | 422 | ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; |
fe055896 BP |
423 | ext_sigcount = ext_header->count; |
424 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | |
425 | ||
426 | for (j = 0; j < ext_sigcount; j++) { | |
427 | sig = ext_sig->sig; | |
428 | pf = ext_sig->pf; | |
429 | ||
430 | pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", | |
431 | j, sig, pf); | |
432 | ||
433 | ext_sig++; | |
434 | } | |
fe055896 | 435 | } |
fe055896 | 436 | #endif |
c595ac2b | 437 | } |
fe055896 | 438 | |
fe055896 | 439 | /* |
06b8534c BP |
440 | * Save this microcode patch. It will be loaded early when a CPU is |
441 | * hot-added or resumes. | |
fe055896 | 442 | */ |
2d54711d | 443 | static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size) |
fe055896 | 444 | { |
9f3cc2a0 | 445 | /* Synchronization during CPU hotplug. */ |
0c5fa827 BP |
446 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); |
447 | ||
fe055896 BP |
448 | mutex_lock(&x86_cpu_microcode_mutex); |
449 | ||
2d54711d | 450 | save_microcode_patch(uci, mc, size); |
fe055896 BP |
451 | show_saved_mc(); |
452 | ||
fe055896 | 453 | mutex_unlock(&x86_cpu_microcode_mutex); |
0c5fa827 | 454 | } |
fe055896 | 455 | |
06b8534c | 456 | static bool load_builtin_intel_microcode(struct cpio_data *cp) |
fe055896 | 457 | { |
06b8534c | 458 | unsigned int eax = 1, ebx, ecx = 0, edx; |
fe055896 BP |
459 | char name[30]; |
460 | ||
06b8534c BP |
461 | if (IS_ENABLED(CONFIG_X86_32)) |
462 | return false; | |
463 | ||
fe055896 BP |
464 | native_cpuid(&eax, &ebx, &ecx, &edx); |
465 | ||
99f925ce BP |
466 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
467 | x86_family(eax), x86_model(eax), x86_stepping(eax)); | |
fe055896 BP |
468 | |
469 | return get_builtin_firmware(cp, name); | |
fe055896 BP |
470 | } |
471 | ||
fe055896 BP |
472 | /* |
473 | * Print ucode update info. | |
474 | */ | |
475 | static void | |
476 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | |
477 | { | |
b7f500ae BP |
478 | pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", |
479 | uci->cpu_sig.rev, | |
480 | date & 0xffff, | |
481 | date >> 24, | |
482 | (date >> 16) & 0xff); | |
fe055896 BP |
483 | } |
484 | ||
485 | #ifdef CONFIG_X86_32 | |
486 | ||
487 | static int delay_ucode_info; | |
488 | static int current_mc_date; | |
489 | ||
490 | /* | |
491 | * Print early updated ucode info after printk works. This is delayed info dump. | |
492 | */ | |
493 | void show_ucode_info_early(void) | |
494 | { | |
495 | struct ucode_cpu_info uci; | |
496 | ||
497 | if (delay_ucode_info) { | |
498 | collect_cpu_info_early(&uci); | |
499 | print_ucode_info(&uci, current_mc_date); | |
500 | delay_ucode_info = 0; | |
501 | } | |
502 | } | |
503 | ||
504 | /* | |
06b8534c | 505 | * At this point, we can not call printk() yet. Delay printing microcode info in |
fe055896 BP |
506 | * show_ucode_info_early() until printk() works. |
507 | */ | |
508 | static void print_ucode(struct ucode_cpu_info *uci) | |
509 | { | |
de778275 | 510 | struct microcode_intel *mc; |
fe055896 BP |
511 | int *delay_ucode_info_p; |
512 | int *current_mc_date_p; | |
513 | ||
de778275 BP |
514 | mc = uci->mc; |
515 | if (!mc) | |
fe055896 BP |
516 | return; |
517 | ||
518 | delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); | |
519 | current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); | |
520 | ||
521 | *delay_ucode_info_p = 1; | |
de778275 | 522 | *current_mc_date_p = mc->hdr.date; |
fe055896 BP |
523 | } |
524 | #else | |
525 | ||
fe055896 BP |
526 | static inline void print_ucode(struct ucode_cpu_info *uci) |
527 | { | |
de778275 | 528 | struct microcode_intel *mc; |
fe055896 | 529 | |
de778275 BP |
530 | mc = uci->mc; |
531 | if (!mc) | |
fe055896 BP |
532 | return; |
533 | ||
de778275 | 534 | print_ucode_info(uci, mc->hdr.date); |
fe055896 BP |
535 | } |
536 | #endif | |
537 | ||
538 | static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | |
539 | { | |
de778275 | 540 | struct microcode_intel *mc; |
4167709b | 541 | u32 rev; |
fe055896 | 542 | |
de778275 BP |
543 | mc = uci->mc; |
544 | if (!mc) | |
fe055896 BP |
545 | return 0; |
546 | ||
c182d2b7 AR |
547 | /* |
548 | * Save us the MSR write below - which is a particular expensive | |
549 | * operation - when the other hyperthread has updated the microcode | |
550 | * already. | |
551 | */ | |
552 | rev = intel_get_microcode_revision(); | |
553 | if (rev >= mc->hdr.rev) { | |
554 | uci->cpu_sig.rev = rev; | |
555 | return UCODE_OK; | |
556 | } | |
557 | ||
91df9fdf AR |
558 | /* |
559 | * Writeback and invalidate caches before updating microcode to avoid | |
560 | * internal issues depending on what the microcode is updating. | |
561 | */ | |
562 | native_wbinvd(); | |
563 | ||
fe055896 | 564 | /* write microcode via MSR 0x79 */ |
c416e611 | 565 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
fe055896 | 566 | |
4167709b BP |
567 | rev = intel_get_microcode_revision(); |
568 | if (rev != mc->hdr.rev) | |
fe055896 BP |
569 | return -1; |
570 | ||
4167709b | 571 | uci->cpu_sig.rev = rev; |
fe055896 BP |
572 | |
573 | if (early) | |
574 | print_ucode(uci); | |
575 | else | |
de778275 | 576 | print_ucode_info(uci, mc->hdr.date); |
fe055896 BP |
577 | |
578 | return 0; | |
579 | } | |
580 | ||
fe055896 BP |
581 | int __init save_microcode_in_initrd_intel(void) |
582 | { | |
06b8534c BP |
583 | struct ucode_cpu_info uci; |
584 | struct cpio_data cp; | |
fe055896 | 585 | |
bd207330 BP |
586 | /* |
587 | * initrd is going away, clear patch ptr. We will scan the microcode one | |
588 | * last time before jettisoning and save a patch, if found. Then we will | |
589 | * update that pointer too, with a stable patch address to use when | |
590 | * resuming the cores. | |
591 | */ | |
592 | intel_ucode_patch = NULL; | |
593 | ||
06b8534c BP |
594 | if (!load_builtin_intel_microcode(&cp)) |
595 | cp = find_microcode_in_initrd(ucode_path, false); | |
fe055896 | 596 | |
06b8534c BP |
597 | if (!(cp.data && cp.size)) |
598 | return 0; | |
fe055896 | 599 | |
06b8534c | 600 | collect_cpu_info_early(&uci); |
6c545647 | 601 | |
06b8534c | 602 | scan_microcode(cp.data, cp.size, &uci, true); |
6c545647 | 603 | |
06b8534c | 604 | show_saved_mc(); |
6c545647 | 605 | |
06b8534c BP |
606 | return 0; |
607 | } | |
6c545647 | 608 | |
06b8534c BP |
609 | /* |
610 | * @res_patch, output: a pointer to the patch we found. | |
611 | */ | |
612 | static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) | |
613 | { | |
614 | static const char *path; | |
615 | struct cpio_data cp; | |
616 | bool use_pa; | |
6c545647 | 617 | |
06b8534c BP |
618 | if (IS_ENABLED(CONFIG_X86_32)) { |
619 | path = (const char *)__pa_nodebug(ucode_path); | |
620 | use_pa = true; | |
621 | } else { | |
622 | path = ucode_path; | |
623 | use_pa = false; | |
6c545647 | 624 | } |
6c545647 | 625 | |
06b8534c BP |
626 | /* try built-in microcode first */ |
627 | if (!load_builtin_intel_microcode(&cp)) | |
628 | cp = find_microcode_in_initrd(path, use_pa); | |
6c545647 | 629 | |
06b8534c BP |
630 | if (!(cp.data && cp.size)) |
631 | return NULL; | |
6c545647 | 632 | |
06b8534c | 633 | collect_cpu_info_early(uci); |
6c545647 | 634 | |
06b8534c | 635 | return scan_microcode(cp.data, cp.size, uci, false); |
6c545647 BP |
636 | } |
637 | ||
06b8534c | 638 | void __init load_ucode_intel_bsp(void) |
fe055896 | 639 | { |
06b8534c | 640 | struct microcode_intel *patch; |
fe055896 | 641 | struct ucode_cpu_info uci; |
fe055896 | 642 | |
06b8534c BP |
643 | patch = __load_ucode_intel(&uci); |
644 | if (!patch) | |
fe055896 BP |
645 | return; |
646 | ||
06b8534c | 647 | uci.mc = patch; |
fe055896 BP |
648 | |
649 | apply_microcode_early(&uci, true); | |
650 | } | |
651 | ||
06b8534c | 652 | void load_ucode_intel_ap(void) |
fe055896 | 653 | { |
06b8534c BP |
654 | struct microcode_intel *patch, **iup; |
655 | struct ucode_cpu_info uci; | |
264285ac | 656 | |
06b8534c BP |
657 | if (IS_ENABLED(CONFIG_X86_32)) |
658 | iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); | |
659 | else | |
660 | iup = &intel_ucode_patch; | |
661 | ||
662 | reget: | |
663 | if (!*iup) { | |
664 | patch = __load_ucode_intel(&uci); | |
665 | if (!patch) | |
666 | return; | |
6c545647 | 667 | |
06b8534c BP |
668 | *iup = patch; |
669 | } | |
670 | ||
671 | uci.mc = *iup; | |
672 | ||
673 | if (apply_microcode_early(&uci, true)) { | |
674 | /* Mixed-silicon system? Try to refetch the proper patch: */ | |
675 | *iup = NULL; | |
676 | ||
677 | goto reget; | |
678 | } | |
fe055896 BP |
679 | } |
680 | ||
06b8534c | 681 | static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) |
fe055896 | 682 | { |
06b8534c BP |
683 | struct microcode_header_intel *phdr; |
684 | struct ucode_patch *iter, *tmp; | |
fe055896 | 685 | |
06b8534c | 686 | list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { |
fe055896 | 687 | |
06b8534c | 688 | phdr = (struct microcode_header_intel *)iter->data; |
efaad554 | 689 | |
06b8534c BP |
690 | if (phdr->rev <= uci->cpu_sig.rev) |
691 | continue; | |
efaad554 | 692 | |
06b8534c BP |
693 | if (!find_matching_signature(phdr, |
694 | uci->cpu_sig.sig, | |
695 | uci->cpu_sig.pf)) | |
696 | continue; | |
fe055896 | 697 | |
06b8534c BP |
698 | return iter->data; |
699 | } | |
700 | return NULL; | |
fe055896 BP |
701 | } |
702 | ||
703 | void reload_ucode_intel(void) | |
704 | { | |
06b8534c | 705 | struct microcode_intel *p; |
fe055896 | 706 | struct ucode_cpu_info uci; |
fe055896 BP |
707 | |
708 | collect_cpu_info_early(&uci); | |
709 | ||
06b8534c BP |
710 | p = find_patch(&uci); |
711 | if (!p) | |
fe055896 BP |
712 | return; |
713 | ||
06b8534c BP |
714 | uci.mc = p; |
715 | ||
fe055896 BP |
716 | apply_microcode_early(&uci, false); |
717 | } | |
718 | ||
d45de409 | 719 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) |
1da177e4 | 720 | { |
354542d0 | 721 | static struct cpu_signature prev; |
92cb7612 | 722 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); |
1da177e4 LT |
723 | unsigned int val[2]; |
724 | ||
d45de409 | 725 | memset(csig, 0, sizeof(*csig)); |
1da177e4 | 726 | |
d45de409 | 727 | csig->sig = cpuid_eax(0x00000001); |
9a3110bf SL |
728 | |
729 | if ((c->x86_model >= 5) || (c->x86 > 6)) { | |
730 | /* get processor flags from MSR 0x17 */ | |
731 | rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | |
d45de409 | 732 | csig->pf = 1 << ((val[1] >> 18) & 7); |
1da177e4 LT |
733 | } |
734 | ||
506ed6b5 | 735 | csig->rev = c->microcode; |
354542d0 AK |
736 | |
737 | /* No extra locking on prev, races are harmless. */ | |
738 | if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { | |
739 | pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", | |
740 | csig->sig, csig->pf, csig->rev); | |
741 | prev = *csig; | |
742 | } | |
d45de409 DA |
743 | |
744 | return 0; | |
1da177e4 LT |
745 | } |
746 | ||
3f1f576a | 747 | static enum ucode_state apply_microcode_intel(int cpu) |
1da177e4 | 748 | { |
d8c3b52c | 749 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
c182d2b7 | 750 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
d8c3b52c | 751 | struct microcode_intel *mc; |
8da38eba | 752 | enum ucode_state ret; |
354542d0 | 753 | static int prev_rev; |
4167709b | 754 | u32 rev; |
4bae1967 | 755 | |
9a3110bf | 756 | /* We should bind the task to the CPU */ |
26cbaa4d | 757 | if (WARN_ON(raw_smp_processor_id() != cpu)) |
3f1f576a | 758 | return UCODE_ERROR; |
9a3110bf | 759 | |
d8c3b52c BP |
760 | /* Look for a newer patch in our cache: */ |
761 | mc = find_patch(uci); | |
06b8534c | 762 | if (!mc) { |
d8c3b52c | 763 | mc = uci->mc; |
06b8534c | 764 | if (!mc) |
3f1f576a | 765 | return UCODE_NFOUND; |
06b8534c | 766 | } |
9cd4d78e | 767 | |
c182d2b7 AR |
768 | /* |
769 | * Save us the MSR write below - which is a particular expensive | |
770 | * operation - when the other hyperthread has updated the microcode | |
771 | * already. | |
772 | */ | |
773 | rev = intel_get_microcode_revision(); | |
774 | if (rev >= mc->hdr.rev) { | |
8da38eba FS |
775 | ret = UCODE_OK; |
776 | goto out; | |
c182d2b7 AR |
777 | } |
778 | ||
91df9fdf AR |
779 | /* |
780 | * Writeback and invalidate caches before updating microcode to avoid | |
781 | * internal issues depending on what the microcode is updating. | |
782 | */ | |
783 | native_wbinvd(); | |
784 | ||
1da177e4 | 785 | /* write microcode via MSR 0x79 */ |
c416e611 | 786 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
245067d1 | 787 | |
4167709b | 788 | rev = intel_get_microcode_revision(); |
1da177e4 | 789 | |
4167709b | 790 | if (rev != mc->hdr.rev) { |
f58e1f53 | 791 | pr_err("CPU%d update to revision 0x%x failed\n", |
26cbaa4d | 792 | cpu, mc->hdr.rev); |
3f1f576a | 793 | return UCODE_ERROR; |
9a3110bf | 794 | } |
26cbaa4d | 795 | |
4167709b | 796 | if (rev != prev_rev) { |
354542d0 | 797 | pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", |
4167709b | 798 | rev, |
354542d0 AK |
799 | mc->hdr.date & 0xffff, |
800 | mc->hdr.date >> 24, | |
801 | (mc->hdr.date >> 16) & 0xff); | |
4167709b | 802 | prev_rev = rev; |
354542d0 | 803 | } |
4bae1967 | 804 | |
8da38eba FS |
805 | ret = UCODE_UPDATED; |
806 | ||
807 | out: | |
4167709b | 808 | uci->cpu_sig.rev = rev; |
8da38eba | 809 | c->microcode = rev; |
871b72dd | 810 | |
370a132b PB |
811 | /* Update boot_cpu_data's revision too, if we're on the BSP: */ |
812 | if (c->cpu_index == boot_cpu_data.cpu_index) | |
813 | boot_cpu_data.microcode = rev; | |
814 | ||
8da38eba | 815 | return ret; |
1da177e4 LT |
816 | } |
817 | ||
7e94a7b6 | 818 | static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) |
9a3110bf | 819 | { |
a0a29b62 | 820 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
2e86222c | 821 | unsigned int curr_mc_size = 0, new_mc_size = 0; |
2613f36e | 822 | enum ucode_state ret = UCODE_OK; |
7e94a7b6 JH |
823 | int new_rev = uci->cpu_sig.rev; |
824 | u8 *new_mc = NULL, *mc = NULL; | |
825 | unsigned int csig, cpf; | |
9a3110bf | 826 | |
7e94a7b6 | 827 | while (iov_iter_count(iter)) { |
a0a29b62 | 828 | struct microcode_header_intel mc_header; |
7e94a7b6 JH |
829 | unsigned int mc_size, data_size; |
830 | u8 *data; | |
9a3110bf | 831 | |
7e94a7b6 JH |
832 | if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { |
833 | pr_err("error! Truncated or inaccessible header in microcode data file\n"); | |
35a9ff4e QC |
834 | break; |
835 | } | |
836 | ||
a0a29b62 | 837 | mc_size = get_totalsize(&mc_header); |
7e94a7b6 JH |
838 | if (mc_size < sizeof(mc_header)) { |
839 | pr_err("error! Bad data in microcode data file (totalsize too small)\n"); | |
840 | break; | |
841 | } | |
842 | data_size = mc_size - sizeof(mc_header); | |
843 | if (data_size > iov_iter_count(iter)) { | |
844 | pr_err("error! Bad data in microcode data file (truncated file?)\n"); | |
a0a29b62 DA |
845 | break; |
846 | } | |
a30a6a2c | 847 | |
938179b4 DS |
848 | /* For performance reasons, reuse mc area when possible */ |
849 | if (!mc || mc_size > curr_mc_size) { | |
5cdd2de0 | 850 | vfree(mc); |
938179b4 DS |
851 | mc = vmalloc(mc_size); |
852 | if (!mc) | |
853 | break; | |
854 | curr_mc_size = mc_size; | |
855 | } | |
a0a29b62 | 856 | |
7e94a7b6 JH |
857 | memcpy(mc, &mc_header, sizeof(mc_header)); |
858 | data = mc + sizeof(mc_header); | |
859 | if (!copy_from_iter_full(data, data_size, iter) || | |
9cd4d78e | 860 | microcode_sanity_check(mc, 1) < 0) { |
a0a29b62 DA |
861 | break; |
862 | } | |
863 | ||
9cd4d78e FY |
864 | csig = uci->cpu_sig.sig; |
865 | cpf = uci->cpu_sig.pf; | |
8de3eafc | 866 | if (has_newer_microcode(mc, csig, cpf, new_rev)) { |
5cdd2de0 | 867 | vfree(new_mc); |
a0a29b62 DA |
868 | new_rev = mc_header.rev; |
869 | new_mc = mc; | |
2e86222c | 870 | new_mc_size = mc_size; |
938179b4 | 871 | mc = NULL; /* trigger new vmalloc */ |
2613f36e | 872 | ret = UCODE_NEW; |
938179b4 | 873 | } |
a30a6a2c SL |
874 | } |
875 | ||
5cdd2de0 | 876 | vfree(mc); |
938179b4 | 877 | |
7e94a7b6 | 878 | if (iov_iter_count(iter)) { |
5cdd2de0 | 879 | vfree(new_mc); |
f61337d9 | 880 | return UCODE_ERROR; |
871b72dd | 881 | } |
4bae1967 | 882 | |
f61337d9 BP |
883 | if (!new_mc) |
884 | return UCODE_NFOUND; | |
a0a29b62 | 885 | |
5cdd2de0 | 886 | vfree(uci->mc); |
4bae1967 IM |
887 | uci->mc = (struct microcode_intel *)new_mc; |
888 | ||
9cd4d78e FY |
889 | /* |
890 | * If early loading microcode is supported, save this mc into | |
891 | * permanent memory. So it will be loaded early when a CPU is hot added | |
892 | * or resumes. | |
893 | */ | |
2d54711d | 894 | save_mc_for_early(uci, new_mc, new_mc_size); |
9cd4d78e | 895 | |
f58e1f53 JP |
896 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
897 | cpu, new_rev, uci->cpu_sig.rev); | |
f61337d9 | 898 | |
2613f36e | 899 | return ret; |
a30a6a2c SL |
900 | } |
901 | ||
723f2828 BP |
902 | static bool is_blacklisted(unsigned int cpu) |
903 | { | |
904 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
905 | ||
b94b7373 JZ |
906 | /* |
907 | * Late loading on model 79 with microcode revision less than 0x0b000021 | |
7e702d17 JZ |
908 | * and LLC size per core bigger than 2.5MB may result in a system hang. |
909 | * This behavior is documented in item BDF90, #334165 (Intel Xeon | |
910 | * Processor E7-8800/4800 v4 Product Family). | |
b94b7373 JZ |
911 | */ |
912 | if (c->x86 == 6 && | |
913 | c->x86_model == INTEL_FAM6_BROADWELL_X && | |
b399151c | 914 | c->x86_stepping == 0x01 && |
7e702d17 | 915 | llc_size_per_core > 2621440 && |
b94b7373 JZ |
916 | c->microcode < 0x0b000021) { |
917 | pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); | |
918 | pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | |
723f2828 BP |
919 | return true; |
920 | } | |
921 | ||
922 | return false; | |
923 | } | |
924 | ||
48e30685 BP |
925 | static enum ucode_state request_microcode_fw(int cpu, struct device *device, |
926 | bool refresh_fw) | |
a30a6a2c | 927 | { |
92cb7612 | 928 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
a30a6a2c | 929 | const struct firmware *firmware; |
7e94a7b6 | 930 | struct iov_iter iter; |
871b72dd | 931 | enum ucode_state ret; |
7e94a7b6 JH |
932 | struct kvec kvec; |
933 | char name[30]; | |
a30a6a2c | 934 | |
723f2828 BP |
935 | if (is_blacklisted(cpu)) |
936 | return UCODE_NFOUND; | |
937 | ||
3e135d88 | 938 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
b399151c | 939 | c->x86, c->x86_model, c->x86_stepping); |
871b72dd | 940 | |
75da02b2 | 941 | if (request_firmware_direct(&firmware, name, device)) { |
f58e1f53 | 942 | pr_debug("data file %s load failed\n", name); |
871b72dd | 943 | return UCODE_NFOUND; |
a30a6a2c | 944 | } |
a0a29b62 | 945 | |
7e94a7b6 JH |
946 | kvec.iov_base = (void *)firmware->data; |
947 | kvec.iov_len = firmware->size; | |
948 | iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size); | |
949 | ret = generic_load_microcode(cpu, &iter); | |
a0a29b62 | 950 | |
a30a6a2c SL |
951 | release_firmware(firmware); |
952 | ||
a0a29b62 DA |
953 | return ret; |
954 | } | |
955 | ||
871b72dd DA |
956 | static enum ucode_state |
957 | request_microcode_user(int cpu, const void __user *buf, size_t size) | |
a0a29b62 | 958 | { |
7e94a7b6 JH |
959 | struct iov_iter iter; |
960 | struct iovec iov; | |
961 | ||
723f2828 BP |
962 | if (is_blacklisted(cpu)) |
963 | return UCODE_NFOUND; | |
964 | ||
7e94a7b6 JH |
965 | iov.iov_base = (void __user *)buf; |
966 | iov.iov_len = size; | |
967 | iov_iter_init(&iter, WRITE, &iov, 1, size); | |
968 | ||
969 | return generic_load_microcode(cpu, &iter); | |
a30a6a2c SL |
970 | } |
971 | ||
4db646b1 | 972 | static struct microcode_ops microcode_intel_ops = { |
a0a29b62 DA |
973 | .request_microcode_user = request_microcode_user, |
974 | .request_microcode_fw = request_microcode_fw, | |
8d86f390 | 975 | .collect_cpu_info = collect_cpu_info, |
532ed374 | 976 | .apply_microcode = apply_microcode_intel, |
8d86f390 PO |
977 | }; |
978 | ||
7e702d17 JZ |
979 | static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) |
980 | { | |
24dbc600 | 981 | u64 llc_size = c->x86_cache_size * 1024ULL; |
7e702d17 JZ |
982 | |
983 | do_div(llc_size, c->x86_max_cores); | |
984 | ||
985 | return (int)llc_size; | |
986 | } | |
987 | ||
18dbc916 | 988 | struct microcode_ops * __init init_intel_microcode(void) |
8d86f390 | 989 | { |
9a2bc335 | 990 | struct cpuinfo_x86 *c = &boot_cpu_data; |
7164b3f5 SB |
991 | |
992 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | |
993 | cpu_has(c, X86_FEATURE_IA64)) { | |
994 | pr_err("Intel CPU family 0x%x not supported\n", c->x86); | |
995 | return NULL; | |
996 | } | |
997 | ||
7e702d17 JZ |
998 | llc_size_per_core = calc_llc_size_per_core(c); |
999 | ||
18dbc916 | 1000 | return µcode_intel_ops; |
8d86f390 | 1001 | } |