]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
6b44e72a | 3 | * Intel CPU Microcode Update Driver for Linux |
1da177e4 | 4 | * |
cea58224 | 5 | * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> |
6b44e72a | 6 | * 2006 Shaohua Li <shaohua.li@intel.com> |
1da177e4 | 7 | * |
fe055896 BP |
8 | * Intel CPU microcode early update for Linux |
9 | * | |
10 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | |
11 | * H Peter Anvin" <hpa@zytor.com> | |
1da177e4 | 12 | */ |
f58e1f53 | 13 | |
fe055896 BP |
14 | /* |
15 | * This needs to be before all headers so that pr_debug in printk.h doesn't turn | |
16 | * printk calls into no_printk(). | |
17 | * | |
18 | *#define DEBUG | |
19 | */ | |
6b26e1bf | 20 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 21 | |
fe055896 | 22 | #include <linux/earlycpio.h> |
4bae1967 | 23 | #include <linux/firmware.h> |
4bae1967 | 24 | #include <linux/uaccess.h> |
fe055896 BP |
25 | #include <linux/vmalloc.h> |
26 | #include <linux/initrd.h> | |
4bae1967 | 27 | #include <linux/kernel.h> |
fe055896 BP |
28 | #include <linux/slab.h> |
29 | #include <linux/cpu.h> | |
7e94a7b6 | 30 | #include <linux/uio.h> |
fe055896 | 31 | #include <linux/mm.h> |
1da177e4 | 32 | |
9cd4d78e | 33 | #include <asm/microcode_intel.h> |
723f2828 | 34 | #include <asm/intel-family.h> |
4bae1967 | 35 | #include <asm/processor.h> |
fe055896 BP |
36 | #include <asm/tlbflush.h> |
37 | #include <asm/setup.h> | |
4bae1967 | 38 | #include <asm/msr.h> |
1da177e4 | 39 | |
06b8534c | 40 | static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; |
fe055896 | 41 | |
c26665ab | 42 | /* Current microcode patch used in early patching on the APs. */ |
d7f7dc7b | 43 | static struct microcode_intel *intel_ucode_patch; |
6c545647 | 44 | |
7e702d17 JZ |
45 | /* last level cache size per core */ |
46 | static int llc_size_per_core; | |
47 | ||
8027923a BP |
48 | static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, |
49 | unsigned int s2, unsigned int p2) | |
50 | { | |
51 | if (s1 != s2) | |
52 | return false; | |
53 | ||
54 | /* Processor flags are either both 0 ... */ | |
55 | if (!p1 && !p2) | |
56 | return true; | |
57 | ||
58 | /* ... or they intersect. */ | |
59 | return p1 & p2; | |
60 | } | |
61 | ||
62 | /* | |
63 | * Returns 1 if update has been found, 0 otherwise. | |
64 | */ | |
65 | static int find_matching_signature(void *mc, unsigned int csig, int cpf) | |
66 | { | |
67 | struct microcode_header_intel *mc_hdr = mc; | |
68 | struct extended_sigtable *ext_hdr; | |
69 | struct extended_signature *ext_sig; | |
70 | int i; | |
71 | ||
72 | if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) | |
73 | return 1; | |
74 | ||
75 | /* Look for ext. headers: */ | |
76 | if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) | |
77 | return 0; | |
78 | ||
79 | ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; | |
80 | ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; | |
81 | ||
82 | for (i = 0; i < ext_hdr->count; i++) { | |
83 | if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) | |
84 | return 1; | |
85 | ext_sig++; | |
86 | } | |
87 | return 0; | |
88 | } | |
89 | ||
90 | /* | |
91 | * Returns 1 if update has been found, 0 otherwise. | |
92 | */ | |
93 | static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) | |
94 | { | |
95 | struct microcode_header_intel *mc_hdr = mc; | |
96 | ||
97 | if (mc_hdr->rev <= new_rev) | |
98 | return 0; | |
99 | ||
100 | return find_matching_signature(mc, csig, cpf); | |
101 | } | |
102 | ||
fe055896 BP |
103 | /* |
104 | * Given CPU signature and a microcode patch, this function finds if the | |
105 | * microcode patch has matching family and model with the CPU. | |
06b8534c BP |
106 | * |
107 | * %true - if there's a match | |
108 | * %false - otherwise | |
fe055896 | 109 | */ |
06b8534c BP |
110 | static bool microcode_matches(struct microcode_header_intel *mc_header, |
111 | unsigned long sig) | |
fe055896 | 112 | { |
fe055896 BP |
113 | unsigned long total_size = get_totalsize(mc_header); |
114 | unsigned long data_size = get_datasize(mc_header); | |
06b8534c BP |
115 | struct extended_sigtable *ext_header; |
116 | unsigned int fam_ucode, model_ucode; | |
fe055896 | 117 | struct extended_signature *ext_sig; |
06b8534c BP |
118 | unsigned int fam, model; |
119 | int ext_sigcount, i; | |
fe055896 | 120 | |
99f925ce | 121 | fam = x86_family(sig); |
fe055896 BP |
122 | model = x86_model(sig); |
123 | ||
99f925ce | 124 | fam_ucode = x86_family(mc_header->sig); |
fe055896 BP |
125 | model_ucode = x86_model(mc_header->sig); |
126 | ||
127 | if (fam == fam_ucode && model == model_ucode) | |
06b8534c | 128 | return true; |
fe055896 BP |
129 | |
130 | /* Look for ext. headers: */ | |
131 | if (total_size <= data_size + MC_HEADER_SIZE) | |
06b8534c | 132 | return false; |
fe055896 BP |
133 | |
134 | ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; | |
135 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | |
136 | ext_sigcount = ext_header->count; | |
137 | ||
138 | for (i = 0; i < ext_sigcount; i++) { | |
99f925ce | 139 | fam_ucode = x86_family(ext_sig->sig); |
fe055896 BP |
140 | model_ucode = x86_model(ext_sig->sig); |
141 | ||
142 | if (fam == fam_ucode && model == model_ucode) | |
06b8534c | 143 | return true; |
fe055896 BP |
144 | |
145 | ext_sig++; | |
146 | } | |
06b8534c | 147 | return false; |
fe055896 BP |
148 | } |
149 | ||
aa78c1cc | 150 | static struct ucode_patch *memdup_patch(void *data, unsigned int size) |
fe055896 | 151 | { |
06b8534c | 152 | struct ucode_patch *p; |
fe055896 | 153 | |
9fcf5ba2 | 154 | p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); |
06b8534c | 155 | if (!p) |
aa78c1cc | 156 | return NULL; |
fe055896 | 157 | |
06b8534c BP |
158 | p->data = kmemdup(data, size, GFP_KERNEL); |
159 | if (!p->data) { | |
160 | kfree(p); | |
aa78c1cc | 161 | return NULL; |
fe055896 BP |
162 | } |
163 | ||
06b8534c | 164 | return p; |
fe055896 BP |
165 | } |
166 | ||
06b8534c | 167 | static void save_microcode_patch(void *data, unsigned int size) |
fe055896 BP |
168 | { |
169 | struct microcode_header_intel *mc_hdr, *mc_saved_hdr; | |
bd207330 | 170 | struct ucode_patch *iter, *tmp, *p = NULL; |
06b8534c | 171 | bool prev_found = false; |
fe055896 | 172 | unsigned int sig, pf; |
fe055896 | 173 | |
06b8534c | 174 | mc_hdr = (struct microcode_header_intel *)data; |
fe055896 | 175 | |
06b8534c BP |
176 | list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { |
177 | mc_saved_hdr = (struct microcode_header_intel *)iter->data; | |
fe055896 BP |
178 | sig = mc_saved_hdr->sig; |
179 | pf = mc_saved_hdr->pf; | |
180 | ||
06b8534c BP |
181 | if (find_matching_signature(data, sig, pf)) { |
182 | prev_found = true; | |
fe055896 | 183 | |
06b8534c BP |
184 | if (mc_hdr->rev <= mc_saved_hdr->rev) |
185 | continue; | |
fe055896 | 186 | |
aa78c1cc BP |
187 | p = memdup_patch(data, size); |
188 | if (!p) | |
06b8534c | 189 | pr_err("Error allocating buffer %p\n", data); |
0218c766 | 190 | else { |
06b8534c | 191 | list_replace(&iter->plist, &p->plist); |
0218c766 ZD |
192 | kfree(iter->data); |
193 | kfree(iter); | |
194 | } | |
06b8534c | 195 | } |
fe055896 BP |
196 | } |
197 | ||
06b8534c BP |
198 | /* |
199 | * There weren't any previous patches found in the list cache; save the | |
200 | * newly found. | |
201 | */ | |
202 | if (!prev_found) { | |
aa78c1cc BP |
203 | p = memdup_patch(data, size); |
204 | if (!p) | |
06b8534c BP |
205 | pr_err("Error allocating buffer for %p\n", data); |
206 | else | |
207 | list_add_tail(&p->plist, µcode_cache); | |
208 | } | |
bd207330 | 209 | |
aa78c1cc BP |
210 | if (!p) |
211 | return; | |
212 | ||
bd207330 BP |
213 | /* |
214 | * Save for early loading. On 32-bit, that needs to be a physical | |
215 | * address as the APs are running from physical addresses, before | |
216 | * paging has been enabled. | |
217 | */ | |
aa78c1cc BP |
218 | if (IS_ENABLED(CONFIG_X86_32)) |
219 | intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); | |
220 | else | |
221 | intel_ucode_patch = p->data; | |
fe055896 BP |
222 | } |
223 | ||
8027923a BP |
224 | static int microcode_sanity_check(void *mc, int print_err) |
225 | { | |
226 | unsigned long total_size, data_size, ext_table_size; | |
227 | struct microcode_header_intel *mc_header = mc; | |
228 | struct extended_sigtable *ext_header = NULL; | |
229 | u32 sum, orig_sum, ext_sigcount = 0, i; | |
230 | struct extended_signature *ext_sig; | |
231 | ||
232 | total_size = get_totalsize(mc_header); | |
233 | data_size = get_datasize(mc_header); | |
234 | ||
235 | if (data_size + MC_HEADER_SIZE > total_size) { | |
236 | if (print_err) | |
237 | pr_err("Error: bad microcode data file size.\n"); | |
238 | return -EINVAL; | |
239 | } | |
240 | ||
241 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | |
242 | if (print_err) | |
243 | pr_err("Error: invalid/unknown microcode update format.\n"); | |
244 | return -EINVAL; | |
245 | } | |
246 | ||
247 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | |
248 | if (ext_table_size) { | |
249 | u32 ext_table_sum = 0; | |
250 | u32 *ext_tablep; | |
251 | ||
252 | if ((ext_table_size < EXT_HEADER_SIZE) | |
253 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | |
254 | if (print_err) | |
255 | pr_err("Error: truncated extended signature table.\n"); | |
256 | return -EINVAL; | |
257 | } | |
258 | ||
259 | ext_header = mc + MC_HEADER_SIZE + data_size; | |
260 | if (ext_table_size != exttable_size(ext_header)) { | |
261 | if (print_err) | |
262 | pr_err("Error: extended signature table size mismatch.\n"); | |
263 | return -EFAULT; | |
264 | } | |
265 | ||
266 | ext_sigcount = ext_header->count; | |
267 | ||
268 | /* | |
269 | * Check extended table checksum: the sum of all dwords that | |
270 | * comprise a valid table must be 0. | |
271 | */ | |
272 | ext_tablep = (u32 *)ext_header; | |
273 | ||
274 | i = ext_table_size / sizeof(u32); | |
275 | while (i--) | |
276 | ext_table_sum += ext_tablep[i]; | |
277 | ||
278 | if (ext_table_sum) { | |
279 | if (print_err) | |
280 | pr_warn("Bad extended signature table checksum, aborting.\n"); | |
281 | return -EINVAL; | |
282 | } | |
283 | } | |
284 | ||
285 | /* | |
286 | * Calculate the checksum of update data and header. The checksum of | |
287 | * valid update data and header including the extended signature table | |
288 | * must be 0. | |
289 | */ | |
290 | orig_sum = 0; | |
291 | i = (MC_HEADER_SIZE + data_size) / sizeof(u32); | |
292 | while (i--) | |
293 | orig_sum += ((u32 *)mc)[i]; | |
294 | ||
295 | if (orig_sum) { | |
296 | if (print_err) | |
297 | pr_err("Bad microcode data checksum, aborting.\n"); | |
298 | return -EINVAL; | |
299 | } | |
300 | ||
301 | if (!ext_table_size) | |
302 | return 0; | |
303 | ||
304 | /* | |
305 | * Check extended signature checksum: 0 => valid. | |
306 | */ | |
307 | for (i = 0; i < ext_sigcount; i++) { | |
308 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE + | |
309 | EXT_SIGNATURE_SIZE * i; | |
310 | ||
311 | sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - | |
312 | (ext_sig->sig + ext_sig->pf + ext_sig->cksum); | |
313 | if (sum) { | |
314 | if (print_err) | |
315 | pr_err("Bad extended signature checksum, aborting.\n"); | |
316 | return -EINVAL; | |
317 | } | |
318 | } | |
319 | return 0; | |
320 | } | |
321 | ||
fe055896 BP |
322 | /* |
323 | * Get microcode matching with BSP's model. Only CPUs with the same model as | |
324 | * BSP can stay in the platform. | |
325 | */ | |
06b8534c BP |
326 | static struct microcode_intel * |
327 | scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) | |
fe055896 | 328 | { |
f96fde53 | 329 | struct microcode_header_intel *mc_header; |
06b8534c | 330 | struct microcode_intel *patch = NULL; |
f96fde53 | 331 | unsigned int mc_size; |
fe055896 | 332 | |
06b8534c BP |
333 | while (size) { |
334 | if (size < sizeof(struct microcode_header_intel)) | |
fe055896 BP |
335 | break; |
336 | ||
06b8534c | 337 | mc_header = (struct microcode_header_intel *)data; |
fe055896 BP |
338 | |
339 | mc_size = get_totalsize(mc_header); | |
06b8534c BP |
340 | if (!mc_size || |
341 | mc_size > size || | |
342 | microcode_sanity_check(data, 0) < 0) | |
fe055896 BP |
343 | break; |
344 | ||
06b8534c | 345 | size -= mc_size; |
fe055896 | 346 | |
06b8534c BP |
347 | if (!microcode_matches(mc_header, uci->cpu_sig.sig)) { |
348 | data += mc_size; | |
fe055896 BP |
349 | continue; |
350 | } | |
351 | ||
06b8534c BP |
352 | if (save) { |
353 | save_microcode_patch(data, mc_size); | |
354 | goto next; | |
355 | } | |
fe055896 | 356 | |
fe055896 | 357 | |
06b8534c BP |
358 | if (!patch) { |
359 | if (!has_newer_microcode(data, | |
360 | uci->cpu_sig.sig, | |
361 | uci->cpu_sig.pf, | |
362 | uci->cpu_sig.rev)) | |
363 | goto next; | |
fe055896 | 364 | |
06b8534c BP |
365 | } else { |
366 | struct microcode_header_intel *phdr = &patch->hdr; | |
367 | ||
368 | if (!has_newer_microcode(data, | |
369 | phdr->sig, | |
370 | phdr->pf, | |
371 | phdr->rev)) | |
372 | goto next; | |
373 | } | |
fe055896 | 374 | |
06b8534c BP |
375 | /* We have a newer patch, save it. */ |
376 | patch = data; | |
fe055896 | 377 | |
06b8534c BP |
378 | next: |
379 | data += mc_size; | |
380 | } | |
f96fde53 | 381 | |
06b8534c BP |
382 | if (size) |
383 | return NULL; | |
384 | ||
385 | return patch; | |
fe055896 BP |
386 | } |
387 | ||
388 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | |
389 | { | |
390 | unsigned int val[2]; | |
391 | unsigned int family, model; | |
06b8534c | 392 | struct cpu_signature csig = { 0 }; |
fe055896 BP |
393 | unsigned int eax, ebx, ecx, edx; |
394 | ||
fe055896 BP |
395 | memset(uci, 0, sizeof(*uci)); |
396 | ||
397 | eax = 0x00000001; | |
398 | ecx = 0; | |
399 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
400 | csig.sig = eax; | |
401 | ||
06b8534c BP |
402 | family = x86_family(eax); |
403 | model = x86_model(eax); | |
fe055896 BP |
404 | |
405 | if ((model >= 5) || (family > 6)) { | |
406 | /* get processor flags from MSR 0x17 */ | |
407 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | |
408 | csig.pf = 1 << ((val[1] >> 18) & 7); | |
409 | } | |
fe055896 | 410 | |
4167709b | 411 | csig.rev = intel_get_microcode_revision(); |
fe055896 BP |
412 | |
413 | uci->cpu_sig = csig; | |
414 | uci->valid = 1; | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
fe055896 BP |
419 | static void show_saved_mc(void) |
420 | { | |
c595ac2b | 421 | #ifdef DEBUG |
06b8534c | 422 | int i = 0, j; |
fe055896 BP |
423 | unsigned int sig, pf, rev, total_size, data_size, date; |
424 | struct ucode_cpu_info uci; | |
06b8534c | 425 | struct ucode_patch *p; |
fe055896 | 426 | |
06b8534c | 427 | if (list_empty(µcode_cache)) { |
fe055896 BP |
428 | pr_debug("no microcode data saved.\n"); |
429 | return; | |
430 | } | |
fe055896 BP |
431 | |
432 | collect_cpu_info_early(&uci); | |
433 | ||
06b8534c BP |
434 | sig = uci.cpu_sig.sig; |
435 | pf = uci.cpu_sig.pf; | |
436 | rev = uci.cpu_sig.rev; | |
fe055896 BP |
437 | pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); |
438 | ||
06b8534c | 439 | list_for_each_entry(p, µcode_cache, plist) { |
fe055896 BP |
440 | struct microcode_header_intel *mc_saved_header; |
441 | struct extended_sigtable *ext_header; | |
fe055896 | 442 | struct extended_signature *ext_sig; |
06b8534c BP |
443 | int ext_sigcount; |
444 | ||
445 | mc_saved_header = (struct microcode_header_intel *)p->data; | |
446 | ||
447 | sig = mc_saved_header->sig; | |
448 | pf = mc_saved_header->pf; | |
449 | rev = mc_saved_header->rev; | |
450 | date = mc_saved_header->date; | |
fe055896 | 451 | |
06b8534c BP |
452 | total_size = get_totalsize(mc_saved_header); |
453 | data_size = get_datasize(mc_saved_header); | |
fe055896 | 454 | |
c19ca6cb | 455 | pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", |
06b8534c | 456 | i++, sig, pf, rev, total_size, |
fe055896 BP |
457 | date & 0xffff, |
458 | date >> 24, | |
459 | (date >> 16) & 0xff); | |
460 | ||
461 | /* Look for ext. headers: */ | |
462 | if (total_size <= data_size + MC_HEADER_SIZE) | |
463 | continue; | |
464 | ||
06b8534c | 465 | ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; |
fe055896 BP |
466 | ext_sigcount = ext_header->count; |
467 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | |
468 | ||
469 | for (j = 0; j < ext_sigcount; j++) { | |
470 | sig = ext_sig->sig; | |
471 | pf = ext_sig->pf; | |
472 | ||
473 | pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", | |
474 | j, sig, pf); | |
475 | ||
476 | ext_sig++; | |
477 | } | |
fe055896 | 478 | } |
fe055896 | 479 | #endif |
c595ac2b | 480 | } |
fe055896 | 481 | |
fe055896 | 482 | /* |
06b8534c BP |
483 | * Save this microcode patch. It will be loaded early when a CPU is |
484 | * hot-added or resumes. | |
fe055896 | 485 | */ |
06b8534c | 486 | static void save_mc_for_early(u8 *mc, unsigned int size) |
fe055896 | 487 | { |
9f3cc2a0 | 488 | /* Synchronization during CPU hotplug. */ |
0c5fa827 BP |
489 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); |
490 | ||
fe055896 BP |
491 | mutex_lock(&x86_cpu_microcode_mutex); |
492 | ||
06b8534c | 493 | save_microcode_patch(mc, size); |
fe055896 BP |
494 | show_saved_mc(); |
495 | ||
fe055896 | 496 | mutex_unlock(&x86_cpu_microcode_mutex); |
0c5fa827 | 497 | } |
fe055896 | 498 | |
06b8534c | 499 | static bool load_builtin_intel_microcode(struct cpio_data *cp) |
fe055896 | 500 | { |
06b8534c | 501 | unsigned int eax = 1, ebx, ecx = 0, edx; |
fe055896 BP |
502 | char name[30]; |
503 | ||
06b8534c BP |
504 | if (IS_ENABLED(CONFIG_X86_32)) |
505 | return false; | |
506 | ||
fe055896 BP |
507 | native_cpuid(&eax, &ebx, &ecx, &edx); |
508 | ||
99f925ce BP |
509 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
510 | x86_family(eax), x86_model(eax), x86_stepping(eax)); | |
fe055896 BP |
511 | |
512 | return get_builtin_firmware(cp, name); | |
fe055896 BP |
513 | } |
514 | ||
fe055896 BP |
515 | /* |
516 | * Print ucode update info. | |
517 | */ | |
518 | static void | |
519 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | |
520 | { | |
b7f500ae BP |
521 | pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", |
522 | uci->cpu_sig.rev, | |
523 | date & 0xffff, | |
524 | date >> 24, | |
525 | (date >> 16) & 0xff); | |
fe055896 BP |
526 | } |
527 | ||
528 | #ifdef CONFIG_X86_32 | |
529 | ||
530 | static int delay_ucode_info; | |
531 | static int current_mc_date; | |
532 | ||
533 | /* | |
534 | * Print early updated ucode info after printk works. This is delayed info dump. | |
535 | */ | |
536 | void show_ucode_info_early(void) | |
537 | { | |
538 | struct ucode_cpu_info uci; | |
539 | ||
540 | if (delay_ucode_info) { | |
541 | collect_cpu_info_early(&uci); | |
542 | print_ucode_info(&uci, current_mc_date); | |
543 | delay_ucode_info = 0; | |
544 | } | |
545 | } | |
546 | ||
547 | /* | |
06b8534c | 548 | * At this point, we can not call printk() yet. Delay printing microcode info in |
fe055896 BP |
549 | * show_ucode_info_early() until printk() works. |
550 | */ | |
551 | static void print_ucode(struct ucode_cpu_info *uci) | |
552 | { | |
de778275 | 553 | struct microcode_intel *mc; |
fe055896 BP |
554 | int *delay_ucode_info_p; |
555 | int *current_mc_date_p; | |
556 | ||
de778275 BP |
557 | mc = uci->mc; |
558 | if (!mc) | |
fe055896 BP |
559 | return; |
560 | ||
561 | delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); | |
562 | current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); | |
563 | ||
564 | *delay_ucode_info_p = 1; | |
de778275 | 565 | *current_mc_date_p = mc->hdr.date; |
fe055896 BP |
566 | } |
567 | #else | |
568 | ||
fe055896 BP |
569 | static inline void print_ucode(struct ucode_cpu_info *uci) |
570 | { | |
de778275 | 571 | struct microcode_intel *mc; |
fe055896 | 572 | |
de778275 BP |
573 | mc = uci->mc; |
574 | if (!mc) | |
fe055896 BP |
575 | return; |
576 | ||
de778275 | 577 | print_ucode_info(uci, mc->hdr.date); |
fe055896 BP |
578 | } |
579 | #endif | |
580 | ||
581 | static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | |
582 | { | |
de778275 | 583 | struct microcode_intel *mc; |
4167709b | 584 | u32 rev; |
fe055896 | 585 | |
de778275 BP |
586 | mc = uci->mc; |
587 | if (!mc) | |
fe055896 BP |
588 | return 0; |
589 | ||
c182d2b7 AR |
590 | /* |
591 | * Save us the MSR write below - which is a particular expensive | |
592 | * operation - when the other hyperthread has updated the microcode | |
593 | * already. | |
594 | */ | |
595 | rev = intel_get_microcode_revision(); | |
596 | if (rev >= mc->hdr.rev) { | |
597 | uci->cpu_sig.rev = rev; | |
598 | return UCODE_OK; | |
599 | } | |
600 | ||
91df9fdf AR |
601 | /* |
602 | * Writeback and invalidate caches before updating microcode to avoid | |
603 | * internal issues depending on what the microcode is updating. | |
604 | */ | |
605 | native_wbinvd(); | |
606 | ||
fe055896 | 607 | /* write microcode via MSR 0x79 */ |
c416e611 | 608 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
fe055896 | 609 | |
4167709b BP |
610 | rev = intel_get_microcode_revision(); |
611 | if (rev != mc->hdr.rev) | |
fe055896 BP |
612 | return -1; |
613 | ||
4167709b | 614 | uci->cpu_sig.rev = rev; |
fe055896 BP |
615 | |
616 | if (early) | |
617 | print_ucode(uci); | |
618 | else | |
de778275 | 619 | print_ucode_info(uci, mc->hdr.date); |
fe055896 BP |
620 | |
621 | return 0; | |
622 | } | |
623 | ||
fe055896 BP |
624 | int __init save_microcode_in_initrd_intel(void) |
625 | { | |
06b8534c BP |
626 | struct ucode_cpu_info uci; |
627 | struct cpio_data cp; | |
fe055896 | 628 | |
bd207330 BP |
629 | /* |
630 | * initrd is going away, clear patch ptr. We will scan the microcode one | |
631 | * last time before jettisoning and save a patch, if found. Then we will | |
632 | * update that pointer too, with a stable patch address to use when | |
633 | * resuming the cores. | |
634 | */ | |
635 | intel_ucode_patch = NULL; | |
636 | ||
06b8534c BP |
637 | if (!load_builtin_intel_microcode(&cp)) |
638 | cp = find_microcode_in_initrd(ucode_path, false); | |
fe055896 | 639 | |
06b8534c BP |
640 | if (!(cp.data && cp.size)) |
641 | return 0; | |
fe055896 | 642 | |
06b8534c | 643 | collect_cpu_info_early(&uci); |
6c545647 | 644 | |
06b8534c | 645 | scan_microcode(cp.data, cp.size, &uci, true); |
6c545647 | 646 | |
06b8534c | 647 | show_saved_mc(); |
6c545647 | 648 | |
06b8534c BP |
649 | return 0; |
650 | } | |
6c545647 | 651 | |
06b8534c BP |
652 | /* |
653 | * @res_patch, output: a pointer to the patch we found. | |
654 | */ | |
655 | static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) | |
656 | { | |
657 | static const char *path; | |
658 | struct cpio_data cp; | |
659 | bool use_pa; | |
6c545647 | 660 | |
06b8534c BP |
661 | if (IS_ENABLED(CONFIG_X86_32)) { |
662 | path = (const char *)__pa_nodebug(ucode_path); | |
663 | use_pa = true; | |
664 | } else { | |
665 | path = ucode_path; | |
666 | use_pa = false; | |
6c545647 | 667 | } |
6c545647 | 668 | |
06b8534c BP |
669 | /* try built-in microcode first */ |
670 | if (!load_builtin_intel_microcode(&cp)) | |
671 | cp = find_microcode_in_initrd(path, use_pa); | |
6c545647 | 672 | |
06b8534c BP |
673 | if (!(cp.data && cp.size)) |
674 | return NULL; | |
6c545647 | 675 | |
06b8534c | 676 | collect_cpu_info_early(uci); |
6c545647 | 677 | |
06b8534c | 678 | return scan_microcode(cp.data, cp.size, uci, false); |
6c545647 BP |
679 | } |
680 | ||
06b8534c | 681 | void __init load_ucode_intel_bsp(void) |
fe055896 | 682 | { |
06b8534c | 683 | struct microcode_intel *patch; |
fe055896 | 684 | struct ucode_cpu_info uci; |
fe055896 | 685 | |
06b8534c BP |
686 | patch = __load_ucode_intel(&uci); |
687 | if (!patch) | |
fe055896 BP |
688 | return; |
689 | ||
06b8534c | 690 | uci.mc = patch; |
fe055896 BP |
691 | |
692 | apply_microcode_early(&uci, true); | |
693 | } | |
694 | ||
06b8534c | 695 | void load_ucode_intel_ap(void) |
fe055896 | 696 | { |
06b8534c BP |
697 | struct microcode_intel *patch, **iup; |
698 | struct ucode_cpu_info uci; | |
264285ac | 699 | |
06b8534c BP |
700 | if (IS_ENABLED(CONFIG_X86_32)) |
701 | iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); | |
702 | else | |
703 | iup = &intel_ucode_patch; | |
704 | ||
705 | reget: | |
706 | if (!*iup) { | |
707 | patch = __load_ucode_intel(&uci); | |
708 | if (!patch) | |
709 | return; | |
6c545647 | 710 | |
06b8534c BP |
711 | *iup = patch; |
712 | } | |
713 | ||
714 | uci.mc = *iup; | |
715 | ||
716 | if (apply_microcode_early(&uci, true)) { | |
717 | /* Mixed-silicon system? Try to refetch the proper patch: */ | |
718 | *iup = NULL; | |
719 | ||
720 | goto reget; | |
721 | } | |
fe055896 BP |
722 | } |
723 | ||
06b8534c | 724 | static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) |
fe055896 | 725 | { |
06b8534c BP |
726 | struct microcode_header_intel *phdr; |
727 | struct ucode_patch *iter, *tmp; | |
fe055896 | 728 | |
06b8534c | 729 | list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { |
fe055896 | 730 | |
06b8534c | 731 | phdr = (struct microcode_header_intel *)iter->data; |
efaad554 | 732 | |
06b8534c BP |
733 | if (phdr->rev <= uci->cpu_sig.rev) |
734 | continue; | |
efaad554 | 735 | |
06b8534c BP |
736 | if (!find_matching_signature(phdr, |
737 | uci->cpu_sig.sig, | |
738 | uci->cpu_sig.pf)) | |
739 | continue; | |
fe055896 | 740 | |
06b8534c BP |
741 | return iter->data; |
742 | } | |
743 | return NULL; | |
fe055896 BP |
744 | } |
745 | ||
746 | void reload_ucode_intel(void) | |
747 | { | |
06b8534c | 748 | struct microcode_intel *p; |
fe055896 | 749 | struct ucode_cpu_info uci; |
fe055896 BP |
750 | |
751 | collect_cpu_info_early(&uci); | |
752 | ||
06b8534c BP |
753 | p = find_patch(&uci); |
754 | if (!p) | |
fe055896 BP |
755 | return; |
756 | ||
06b8534c BP |
757 | uci.mc = p; |
758 | ||
fe055896 BP |
759 | apply_microcode_early(&uci, false); |
760 | } | |
761 | ||
d45de409 | 762 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) |
1da177e4 | 763 | { |
354542d0 | 764 | static struct cpu_signature prev; |
92cb7612 | 765 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); |
1da177e4 LT |
766 | unsigned int val[2]; |
767 | ||
d45de409 | 768 | memset(csig, 0, sizeof(*csig)); |
1da177e4 | 769 | |
d45de409 | 770 | csig->sig = cpuid_eax(0x00000001); |
9a3110bf SL |
771 | |
772 | if ((c->x86_model >= 5) || (c->x86 > 6)) { | |
773 | /* get processor flags from MSR 0x17 */ | |
774 | rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | |
d45de409 | 775 | csig->pf = 1 << ((val[1] >> 18) & 7); |
1da177e4 LT |
776 | } |
777 | ||
506ed6b5 | 778 | csig->rev = c->microcode; |
354542d0 AK |
779 | |
780 | /* No extra locking on prev, races are harmless. */ | |
781 | if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { | |
782 | pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", | |
783 | csig->sig, csig->pf, csig->rev); | |
784 | prev = *csig; | |
785 | } | |
d45de409 DA |
786 | |
787 | return 0; | |
1da177e4 LT |
788 | } |
789 | ||
3f1f576a | 790 | static enum ucode_state apply_microcode_intel(int cpu) |
1da177e4 | 791 | { |
d8c3b52c | 792 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
c182d2b7 | 793 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
d8c3b52c | 794 | struct microcode_intel *mc; |
8da38eba | 795 | enum ucode_state ret; |
354542d0 | 796 | static int prev_rev; |
4167709b | 797 | u32 rev; |
4bae1967 | 798 | |
9a3110bf | 799 | /* We should bind the task to the CPU */ |
26cbaa4d | 800 | if (WARN_ON(raw_smp_processor_id() != cpu)) |
3f1f576a | 801 | return UCODE_ERROR; |
9a3110bf | 802 | |
d8c3b52c BP |
803 | /* Look for a newer patch in our cache: */ |
804 | mc = find_patch(uci); | |
06b8534c | 805 | if (!mc) { |
d8c3b52c | 806 | mc = uci->mc; |
06b8534c | 807 | if (!mc) |
3f1f576a | 808 | return UCODE_NFOUND; |
06b8534c | 809 | } |
9cd4d78e | 810 | |
c182d2b7 AR |
811 | /* |
812 | * Save us the MSR write below - which is a particular expensive | |
813 | * operation - when the other hyperthread has updated the microcode | |
814 | * already. | |
815 | */ | |
816 | rev = intel_get_microcode_revision(); | |
817 | if (rev >= mc->hdr.rev) { | |
8da38eba FS |
818 | ret = UCODE_OK; |
819 | goto out; | |
c182d2b7 AR |
820 | } |
821 | ||
91df9fdf AR |
822 | /* |
823 | * Writeback and invalidate caches before updating microcode to avoid | |
824 | * internal issues depending on what the microcode is updating. | |
825 | */ | |
826 | native_wbinvd(); | |
827 | ||
1da177e4 | 828 | /* write microcode via MSR 0x79 */ |
c416e611 | 829 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
245067d1 | 830 | |
4167709b | 831 | rev = intel_get_microcode_revision(); |
1da177e4 | 832 | |
4167709b | 833 | if (rev != mc->hdr.rev) { |
f58e1f53 | 834 | pr_err("CPU%d update to revision 0x%x failed\n", |
26cbaa4d | 835 | cpu, mc->hdr.rev); |
3f1f576a | 836 | return UCODE_ERROR; |
9a3110bf | 837 | } |
26cbaa4d | 838 | |
4167709b | 839 | if (rev != prev_rev) { |
354542d0 | 840 | pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", |
4167709b | 841 | rev, |
354542d0 AK |
842 | mc->hdr.date & 0xffff, |
843 | mc->hdr.date >> 24, | |
844 | (mc->hdr.date >> 16) & 0xff); | |
4167709b | 845 | prev_rev = rev; |
354542d0 | 846 | } |
4bae1967 | 847 | |
8da38eba FS |
848 | ret = UCODE_UPDATED; |
849 | ||
850 | out: | |
4167709b | 851 | uci->cpu_sig.rev = rev; |
8da38eba | 852 | c->microcode = rev; |
871b72dd | 853 | |
370a132b PB |
854 | /* Update boot_cpu_data's revision too, if we're on the BSP: */ |
855 | if (c->cpu_index == boot_cpu_data.cpu_index) | |
856 | boot_cpu_data.microcode = rev; | |
857 | ||
8da38eba | 858 | return ret; |
1da177e4 LT |
859 | } |
860 | ||
7e94a7b6 | 861 | static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) |
9a3110bf | 862 | { |
a0a29b62 | 863 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
2e86222c | 864 | unsigned int curr_mc_size = 0, new_mc_size = 0; |
2613f36e | 865 | enum ucode_state ret = UCODE_OK; |
7e94a7b6 JH |
866 | int new_rev = uci->cpu_sig.rev; |
867 | u8 *new_mc = NULL, *mc = NULL; | |
868 | unsigned int csig, cpf; | |
9a3110bf | 869 | |
7e94a7b6 | 870 | while (iov_iter_count(iter)) { |
a0a29b62 | 871 | struct microcode_header_intel mc_header; |
7e94a7b6 JH |
872 | unsigned int mc_size, data_size; |
873 | u8 *data; | |
9a3110bf | 874 | |
7e94a7b6 JH |
875 | if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { |
876 | pr_err("error! Truncated or inaccessible header in microcode data file\n"); | |
35a9ff4e QC |
877 | break; |
878 | } | |
879 | ||
a0a29b62 | 880 | mc_size = get_totalsize(&mc_header); |
7e94a7b6 JH |
881 | if (mc_size < sizeof(mc_header)) { |
882 | pr_err("error! Bad data in microcode data file (totalsize too small)\n"); | |
883 | break; | |
884 | } | |
885 | data_size = mc_size - sizeof(mc_header); | |
886 | if (data_size > iov_iter_count(iter)) { | |
887 | pr_err("error! Bad data in microcode data file (truncated file?)\n"); | |
a0a29b62 DA |
888 | break; |
889 | } | |
a30a6a2c | 890 | |
938179b4 DS |
891 | /* For performance reasons, reuse mc area when possible */ |
892 | if (!mc || mc_size > curr_mc_size) { | |
5cdd2de0 | 893 | vfree(mc); |
938179b4 DS |
894 | mc = vmalloc(mc_size); |
895 | if (!mc) | |
896 | break; | |
897 | curr_mc_size = mc_size; | |
898 | } | |
a0a29b62 | 899 | |
7e94a7b6 JH |
900 | memcpy(mc, &mc_header, sizeof(mc_header)); |
901 | data = mc + sizeof(mc_header); | |
902 | if (!copy_from_iter_full(data, data_size, iter) || | |
9cd4d78e | 903 | microcode_sanity_check(mc, 1) < 0) { |
a0a29b62 DA |
904 | break; |
905 | } | |
906 | ||
9cd4d78e FY |
907 | csig = uci->cpu_sig.sig; |
908 | cpf = uci->cpu_sig.pf; | |
8de3eafc | 909 | if (has_newer_microcode(mc, csig, cpf, new_rev)) { |
5cdd2de0 | 910 | vfree(new_mc); |
a0a29b62 DA |
911 | new_rev = mc_header.rev; |
912 | new_mc = mc; | |
2e86222c | 913 | new_mc_size = mc_size; |
938179b4 | 914 | mc = NULL; /* trigger new vmalloc */ |
2613f36e | 915 | ret = UCODE_NEW; |
938179b4 | 916 | } |
a30a6a2c SL |
917 | } |
918 | ||
5cdd2de0 | 919 | vfree(mc); |
938179b4 | 920 | |
7e94a7b6 | 921 | if (iov_iter_count(iter)) { |
5cdd2de0 | 922 | vfree(new_mc); |
f61337d9 | 923 | return UCODE_ERROR; |
871b72dd | 924 | } |
4bae1967 | 925 | |
f61337d9 BP |
926 | if (!new_mc) |
927 | return UCODE_NFOUND; | |
a0a29b62 | 928 | |
5cdd2de0 | 929 | vfree(uci->mc); |
4bae1967 IM |
930 | uci->mc = (struct microcode_intel *)new_mc; |
931 | ||
9cd4d78e FY |
932 | /* |
933 | * If early loading microcode is supported, save this mc into | |
934 | * permanent memory. So it will be loaded early when a CPU is hot added | |
935 | * or resumes. | |
936 | */ | |
2e86222c | 937 | save_mc_for_early(new_mc, new_mc_size); |
9cd4d78e | 938 | |
f58e1f53 JP |
939 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
940 | cpu, new_rev, uci->cpu_sig.rev); | |
f61337d9 | 941 | |
2613f36e | 942 | return ret; |
a30a6a2c SL |
943 | } |
944 | ||
723f2828 BP |
945 | static bool is_blacklisted(unsigned int cpu) |
946 | { | |
947 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
948 | ||
b94b7373 JZ |
949 | /* |
950 | * Late loading on model 79 with microcode revision less than 0x0b000021 | |
7e702d17 JZ |
951 | * and LLC size per core bigger than 2.5MB may result in a system hang. |
952 | * This behavior is documented in item BDF90, #334165 (Intel Xeon | |
953 | * Processor E7-8800/4800 v4 Product Family). | |
b94b7373 JZ |
954 | */ |
955 | if (c->x86 == 6 && | |
956 | c->x86_model == INTEL_FAM6_BROADWELL_X && | |
b399151c | 957 | c->x86_stepping == 0x01 && |
7e702d17 | 958 | llc_size_per_core > 2621440 && |
b94b7373 JZ |
959 | c->microcode < 0x0b000021) { |
960 | pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); | |
961 | pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | |
723f2828 BP |
962 | return true; |
963 | } | |
964 | ||
965 | return false; | |
966 | } | |
967 | ||
48e30685 BP |
968 | static enum ucode_state request_microcode_fw(int cpu, struct device *device, |
969 | bool refresh_fw) | |
a30a6a2c | 970 | { |
92cb7612 | 971 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
a30a6a2c | 972 | const struct firmware *firmware; |
7e94a7b6 | 973 | struct iov_iter iter; |
871b72dd | 974 | enum ucode_state ret; |
7e94a7b6 JH |
975 | struct kvec kvec; |
976 | char name[30]; | |
a30a6a2c | 977 | |
723f2828 BP |
978 | if (is_blacklisted(cpu)) |
979 | return UCODE_NFOUND; | |
980 | ||
3e135d88 | 981 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
b399151c | 982 | c->x86, c->x86_model, c->x86_stepping); |
871b72dd | 983 | |
75da02b2 | 984 | if (request_firmware_direct(&firmware, name, device)) { |
f58e1f53 | 985 | pr_debug("data file %s load failed\n", name); |
871b72dd | 986 | return UCODE_NFOUND; |
a30a6a2c | 987 | } |
a0a29b62 | 988 | |
7e94a7b6 JH |
989 | kvec.iov_base = (void *)firmware->data; |
990 | kvec.iov_len = firmware->size; | |
991 | iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size); | |
992 | ret = generic_load_microcode(cpu, &iter); | |
a0a29b62 | 993 | |
a30a6a2c SL |
994 | release_firmware(firmware); |
995 | ||
a0a29b62 DA |
996 | return ret; |
997 | } | |
998 | ||
871b72dd DA |
999 | static enum ucode_state |
1000 | request_microcode_user(int cpu, const void __user *buf, size_t size) | |
a0a29b62 | 1001 | { |
7e94a7b6 JH |
1002 | struct iov_iter iter; |
1003 | struct iovec iov; | |
1004 | ||
723f2828 BP |
1005 | if (is_blacklisted(cpu)) |
1006 | return UCODE_NFOUND; | |
1007 | ||
7e94a7b6 JH |
1008 | iov.iov_base = (void __user *)buf; |
1009 | iov.iov_len = size; | |
1010 | iov_iter_init(&iter, WRITE, &iov, 1, size); | |
1011 | ||
1012 | return generic_load_microcode(cpu, &iter); | |
a30a6a2c SL |
1013 | } |
1014 | ||
4db646b1 | 1015 | static struct microcode_ops microcode_intel_ops = { |
a0a29b62 DA |
1016 | .request_microcode_user = request_microcode_user, |
1017 | .request_microcode_fw = request_microcode_fw, | |
8d86f390 | 1018 | .collect_cpu_info = collect_cpu_info, |
532ed374 | 1019 | .apply_microcode = apply_microcode_intel, |
8d86f390 PO |
1020 | }; |
1021 | ||
7e702d17 JZ |
1022 | static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) |
1023 | { | |
24dbc600 | 1024 | u64 llc_size = c->x86_cache_size * 1024ULL; |
7e702d17 JZ |
1025 | |
1026 | do_div(llc_size, c->x86_max_cores); | |
1027 | ||
1028 | return (int)llc_size; | |
1029 | } | |
1030 | ||
18dbc916 | 1031 | struct microcode_ops * __init init_intel_microcode(void) |
8d86f390 | 1032 | { |
9a2bc335 | 1033 | struct cpuinfo_x86 *c = &boot_cpu_data; |
7164b3f5 SB |
1034 | |
1035 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | |
1036 | cpu_has(c, X86_FEATURE_IA64)) { | |
1037 | pr_err("Intel CPU family 0x%x not supported\n", c->x86); | |
1038 | return NULL; | |
1039 | } | |
1040 | ||
7e702d17 JZ |
1041 | llc_size_per_core = calc_llc_size_per_core(c); |
1042 | ||
18dbc916 | 1043 | return µcode_intel_ops; |
8d86f390 | 1044 | } |