]>
Commit | Line | Data |
---|---|---|
69c60c88 | 1 | #include <linux/export.h> |
1da177e4 | 2 | #include <linux/bitops.h> |
5cdd174f | 3 | #include <linux/elf.h> |
1da177e4 | 4 | #include <linux/mm.h> |
8d71a2ea | 5 | |
8bdbd962 | 6 | #include <linux/io.h> |
c98fdeaa | 7 | #include <linux/sched.h> |
4e26d11f | 8 | #include <linux/random.h> |
1da177e4 | 9 | #include <asm/processor.h> |
d3f7eae1 | 10 | #include <asm/apic.h> |
1f442d70 | 11 | #include <asm/cpu.h> |
26bfa5f8 | 12 | #include <asm/smp.h> |
42937e81 | 13 | #include <asm/pci-direct.h> |
b466bdb6 | 14 | #include <asm/delay.h> |
1da177e4 | 15 | |
8d71a2ea | 16 | #ifdef CONFIG_X86_64 |
8d71a2ea YL |
17 | # include <asm/mmconfig.h> |
18 | # include <asm/cacheflush.h> | |
19 | #endif | |
20 | ||
1da177e4 LT |
21 | #include "cpu.h" |
22 | ||
cc2749e4 AG |
23 | /* |
24 | * nodes_per_socket: Stores the number of nodes per socket. | |
25 | * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX | |
26 | * Node Identifiers[10:8] | |
27 | */ | |
28 | static u32 nodes_per_socket = 1; | |
29 | ||
2c929ce6 BP |
30 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) |
31 | { | |
2c929ce6 BP |
32 | u32 gprs[8] = { 0 }; |
33 | int err; | |
34 | ||
682469a5 BP |
35 | WARN_ONCE((boot_cpu_data.x86 != 0xf), |
36 | "%s should only be used on K8!\n", __func__); | |
2c929ce6 BP |
37 | |
38 | gprs[1] = msr; | |
39 | gprs[7] = 0x9c5a203a; | |
40 | ||
41 | err = rdmsr_safe_regs(gprs); | |
42 | ||
43 | *p = gprs[0] | ((u64)gprs[2] << 32); | |
44 | ||
45 | return err; | |
46 | } | |
47 | ||
48 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | |
49 | { | |
2c929ce6 BP |
50 | u32 gprs[8] = { 0 }; |
51 | ||
682469a5 BP |
52 | WARN_ONCE((boot_cpu_data.x86 != 0xf), |
53 | "%s should only be used on K8!\n", __func__); | |
2c929ce6 BP |
54 | |
55 | gprs[0] = (u32)val; | |
56 | gprs[1] = msr; | |
57 | gprs[2] = val >> 32; | |
58 | gprs[7] = 0x9c5a203a; | |
59 | ||
60 | return wrmsr_safe_regs(gprs); | |
61 | } | |
62 | ||
1da177e4 LT |
63 | /* |
64 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | |
65 | * misexecution of code under Linux. Owners of such processors should | |
66 | * contact AMD for precise details and a CPU swap. | |
67 | * | |
68 | * See http://www.multimania.com/poulot/k6bug.html | |
d7de8649 AH |
69 | * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" |
70 | * (Publication # 21266 Issue Date: August 1998) | |
1da177e4 LT |
71 | * |
72 | * The following test is erm.. interesting. AMD neglected to up | |
73 | * the chip setting when fixing the bug but they also tweaked some | |
74 | * performance at the same time.. | |
75 | */ | |
fb87a298 | 76 | |
277d5b40 AK |
77 | extern __visible void vide(void); |
78 | __asm__(".globl vide\n\t.align 4\nvide: ret"); | |
1da177e4 | 79 | |
148f9bb8 | 80 | static void init_amd_k5(struct cpuinfo_x86 *c) |
11fdd252 | 81 | { |
26bfa5f8 | 82 | #ifdef CONFIG_X86_32 |
11fdd252 YL |
83 | /* |
84 | * General Systems BIOSen alias the cpu frequency registers | |
85 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | |
86 | * drivers subsequently pokes it, and changes the CPU speed. | |
87 | * Workaround : Remove the unneeded alias. | |
88 | */ | |
89 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | |
90 | #define CBAR_ENB (0x80000000) | |
91 | #define CBAR_KEY (0X000000CB) | |
92 | if (c->x86_model == 9 || c->x86_model == 10) { | |
8bdbd962 AC |
93 | if (inl(CBAR) & CBAR_ENB) |
94 | outl(0 | CBAR_KEY, CBAR); | |
11fdd252 | 95 | } |
26bfa5f8 | 96 | #endif |
11fdd252 YL |
97 | } |
98 | ||
148f9bb8 | 99 | static void init_amd_k6(struct cpuinfo_x86 *c) |
11fdd252 | 100 | { |
26bfa5f8 | 101 | #ifdef CONFIG_X86_32 |
11fdd252 | 102 | u32 l, h; |
46a84132 | 103 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); |
11fdd252 YL |
104 | |
105 | if (c->x86_model < 6) { | |
106 | /* Based on AMD doc 20734R - June 2000 */ | |
107 | if (c->x86_model == 0) { | |
108 | clear_cpu_cap(c, X86_FEATURE_APIC); | |
109 | set_cpu_cap(c, X86_FEATURE_PGE); | |
110 | } | |
111 | return; | |
112 | } | |
113 | ||
114 | if (c->x86_model == 6 && c->x86_mask == 1) { | |
115 | const int K6_BUG_LOOP = 1000000; | |
116 | int n; | |
117 | void (*f_vide)(void); | |
37963666 | 118 | u64 d, d2; |
11fdd252 YL |
119 | |
120 | printk(KERN_INFO "AMD K6 stepping B detected - "); | |
121 | ||
122 | /* | |
123 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | |
124 | * calls at the same time. | |
125 | */ | |
126 | ||
127 | n = K6_BUG_LOOP; | |
128 | f_vide = vide; | |
4ea1636b | 129 | d = rdtsc(); |
11fdd252 YL |
130 | while (n--) |
131 | f_vide(); | |
4ea1636b | 132 | d2 = rdtsc(); |
11fdd252 YL |
133 | d = d2-d; |
134 | ||
135 | if (d > 20*K6_BUG_LOOP) | |
8bdbd962 AC |
136 | printk(KERN_CONT |
137 | "system stability may be impaired when more than 32 MB are used.\n"); | |
11fdd252 | 138 | else |
8bdbd962 | 139 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
11fdd252 YL |
140 | } |
141 | ||
142 | /* K6 with old style WHCR */ | |
143 | if (c->x86_model < 8 || | |
144 | (c->x86_model == 8 && c->x86_mask < 8)) { | |
145 | /* We can only write allocate on the low 508Mb */ | |
146 | if (mbytes > 508) | |
147 | mbytes = 508; | |
148 | ||
149 | rdmsr(MSR_K6_WHCR, l, h); | |
150 | if ((l&0x0000FFFF) == 0) { | |
151 | unsigned long flags; | |
152 | l = (1<<0)|((mbytes/4)<<1); | |
153 | local_irq_save(flags); | |
154 | wbinvd(); | |
155 | wrmsr(MSR_K6_WHCR, l, h); | |
156 | local_irq_restore(flags); | |
157 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | |
158 | mbytes); | |
159 | } | |
160 | return; | |
161 | } | |
162 | ||
163 | if ((c->x86_model == 8 && c->x86_mask > 7) || | |
164 | c->x86_model == 9 || c->x86_model == 13) { | |
165 | /* The more serious chips .. */ | |
166 | ||
167 | if (mbytes > 4092) | |
168 | mbytes = 4092; | |
169 | ||
170 | rdmsr(MSR_K6_WHCR, l, h); | |
171 | if ((l&0xFFFF0000) == 0) { | |
172 | unsigned long flags; | |
173 | l = ((mbytes>>2)<<22)|(1<<16); | |
174 | local_irq_save(flags); | |
175 | wbinvd(); | |
176 | wrmsr(MSR_K6_WHCR, l, h); | |
177 | local_irq_restore(flags); | |
178 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | |
179 | mbytes); | |
180 | } | |
181 | ||
182 | return; | |
183 | } | |
184 | ||
185 | if (c->x86_model == 10) { | |
186 | /* AMD Geode LX is model 10 */ | |
187 | /* placeholder for any needed mods */ | |
188 | return; | |
189 | } | |
26bfa5f8 | 190 | #endif |
11fdd252 YL |
191 | } |
192 | ||
26bfa5f8 | 193 | static void init_amd_k7(struct cpuinfo_x86 *c) |
1f442d70 | 194 | { |
26bfa5f8 BP |
195 | #ifdef CONFIG_X86_32 |
196 | u32 l, h; | |
197 | ||
198 | /* | |
199 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | |
200 | * to enable SSE on Palomino/Morgan/Barton CPU's. | |
201 | * If the BIOS didn't enable it already, enable it here. | |
202 | */ | |
203 | if (c->x86_model >= 6 && c->x86_model <= 10) { | |
204 | if (!cpu_has(c, X86_FEATURE_XMM)) { | |
205 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | |
206 | msr_clear_bit(MSR_K7_HWCR, 15); | |
207 | set_cpu_cap(c, X86_FEATURE_XMM); | |
208 | } | |
209 | } | |
210 | ||
211 | /* | |
212 | * It's been determined by AMD that Athlons since model 8 stepping 1 | |
213 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | |
214 | * As per AMD technical note 27212 0.2 | |
215 | */ | |
216 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | |
217 | rdmsr(MSR_K7_CLK_CTL, l, h); | |
218 | if ((l & 0xfff00000) != 0x20000000) { | |
219 | printk(KERN_INFO | |
220 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | |
221 | l, ((l & 0x000fffff)|0x20000000)); | |
222 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | |
223 | } | |
224 | } | |
225 | ||
226 | set_cpu_cap(c, X86_FEATURE_K7); | |
227 | ||
1f442d70 | 228 | /* calling is from identify_secondary_cpu() ? */ |
f6e9456c | 229 | if (!c->cpu_index) |
1f442d70 YL |
230 | return; |
231 | ||
232 | /* | |
233 | * Certain Athlons might work (for various values of 'work') in SMP | |
234 | * but they are not certified as MP capable. | |
235 | */ | |
236 | /* Athlon 660/661 is valid. */ | |
237 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
238 | (c->x86_mask == 1))) | |
1077c932 | 239 | return; |
1f442d70 YL |
240 | |
241 | /* Duron 670 is valid */ | |
242 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
1077c932 | 243 | return; |
1f442d70 YL |
244 | |
245 | /* | |
246 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
247 | * bit. It's worth noting that the A5 stepping (662) of some | |
248 | * Athlon XP's have the MP bit set. | |
249 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
250 | * more. | |
251 | */ | |
252 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
253 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
254 | (c->x86_model > 7)) | |
26bfa5f8 | 255 | if (cpu_has(c, X86_FEATURE_MP)) |
1077c932 | 256 | return; |
1f442d70 YL |
257 | |
258 | /* If we get here, not a certified SMP capable AMD system. */ | |
259 | ||
260 | /* | |
261 | * Don't taint if we are running SMP kernel on a single non-MP | |
262 | * approved Athlon | |
263 | */ | |
264 | WARN_ONCE(1, "WARNING: This combination of AMD" | |
7da8b6dd | 265 | " processors is not suitable for SMP.\n"); |
8c90487c | 266 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); |
6c62aa4a | 267 | #endif |
26bfa5f8 | 268 | } |
6c62aa4a | 269 | |
645a7919 | 270 | #ifdef CONFIG_NUMA |
bbc9e2f4 TH |
271 | /* |
272 | * To workaround broken NUMA config. Read the comment in | |
273 | * srat_detect_node(). | |
274 | */ | |
148f9bb8 | 275 | static int nearby_node(int apicid) |
6c62aa4a YL |
276 | { |
277 | int i, node; | |
278 | ||
279 | for (i = apicid - 1; i >= 0; i--) { | |
bbc9e2f4 | 280 | node = __apicid_to_node[i]; |
6c62aa4a YL |
281 | if (node != NUMA_NO_NODE && node_online(node)) |
282 | return node; | |
283 | } | |
284 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | |
bbc9e2f4 | 285 | node = __apicid_to_node[i]; |
6c62aa4a YL |
286 | if (node != NUMA_NO_NODE && node_online(node)) |
287 | return node; | |
288 | } | |
289 | return first_node(node_online_map); /* Shouldn't happen */ | |
290 | } | |
291 | #endif | |
11fdd252 | 292 | |
4a376ec3 | 293 | /* |
23588c38 AH |
294 | * Fixup core topology information for |
295 | * (1) AMD multi-node processors | |
296 | * Assumption: Number of cores in each internal node is the same. | |
6057b4d3 | 297 | * (2) AMD processors supporting compute units |
4a376ec3 | 298 | */ |
c8e56d20 | 299 | #ifdef CONFIG_SMP |
148f9bb8 | 300 | static void amd_get_topology(struct cpuinfo_x86 *c) |
4a376ec3 | 301 | { |
cc2749e4 | 302 | u32 cores_per_cu = 1; |
23588c38 | 303 | u8 node_id; |
4a376ec3 AH |
304 | int cpu = smp_processor_id(); |
305 | ||
23588c38 | 306 | /* get information required for multi-node processors */ |
193f3fcb | 307 | if (cpu_has_topoext) { |
6057b4d3 AH |
308 | u32 eax, ebx, ecx, edx; |
309 | ||
310 | cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); | |
cc2749e4 | 311 | nodes_per_socket = ((ecx >> 8) & 7) + 1; |
6057b4d3 AH |
312 | node_id = ecx & 7; |
313 | ||
314 | /* get compute unit information */ | |
315 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | |
316 | c->compute_unit_id = ebx & 0xff; | |
9e81509e | 317 | cores_per_cu += ((ebx >> 8) & 3); |
23588c38 | 318 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { |
6057b4d3 AH |
319 | u64 value; |
320 | ||
23588c38 | 321 | rdmsrl(MSR_FAM10H_NODE_ID, value); |
cc2749e4 | 322 | nodes_per_socket = ((value >> 3) & 7) + 1; |
23588c38 AH |
323 | node_id = value & 7; |
324 | } else | |
4a376ec3 AH |
325 | return; |
326 | ||
23588c38 | 327 | /* fixup multi-node processor information */ |
cc2749e4 | 328 | if (nodes_per_socket > 1) { |
6057b4d3 | 329 | u32 cores_per_node; |
d518573d | 330 | u32 cus_per_node; |
6057b4d3 | 331 | |
23588c38 | 332 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); |
cc2749e4 | 333 | cores_per_node = c->x86_max_cores / nodes_per_socket; |
d518573d | 334 | cus_per_node = cores_per_node / cores_per_cu; |
9d260ebc | 335 | |
23588c38 AH |
336 | /* store NodeID, use llc_shared_map to store sibling info */ |
337 | per_cpu(cpu_llc_id, cpu) = node_id; | |
4a376ec3 | 338 | |
9e81509e | 339 | /* core id has to be in the [0 .. cores_per_node - 1] range */ |
d518573d AH |
340 | c->cpu_core_id %= cores_per_node; |
341 | c->compute_unit_id %= cus_per_node; | |
23588c38 | 342 | } |
4a376ec3 AH |
343 | } |
344 | #endif | |
345 | ||
11fdd252 | 346 | /* |
aa5e5dc2 | 347 | * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. |
11fdd252 YL |
348 | * Assumes number of cores is a power of two. |
349 | */ | |
148f9bb8 | 350 | static void amd_detect_cmp(struct cpuinfo_x86 *c) |
11fdd252 | 351 | { |
c8e56d20 | 352 | #ifdef CONFIG_SMP |
11fdd252 | 353 | unsigned bits; |
99bd0c0f | 354 | int cpu = smp_processor_id(); |
11fdd252 YL |
355 | |
356 | bits = c->x86_coreid_bits; | |
11fdd252 YL |
357 | /* Low order bits define the core id (index of core in socket) */ |
358 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | |
359 | /* Convert the initial APIC ID into the socket ID */ | |
360 | c->phys_proc_id = c->initial_apicid >> bits; | |
99bd0c0f AH |
361 | /* use socket ID also for last level cache */ |
362 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | |
23588c38 | 363 | amd_get_topology(c); |
11fdd252 YL |
364 | #endif |
365 | } | |
366 | ||
8b84c8df | 367 | u16 amd_get_nb_id(int cpu) |
6a812691 | 368 | { |
8b84c8df | 369 | u16 id = 0; |
6a812691 AH |
370 | #ifdef CONFIG_SMP |
371 | id = per_cpu(cpu_llc_id, cpu); | |
372 | #endif | |
373 | return id; | |
374 | } | |
375 | EXPORT_SYMBOL_GPL(amd_get_nb_id); | |
376 | ||
cc2749e4 AG |
377 | u32 amd_get_nodes_per_socket(void) |
378 | { | |
379 | return nodes_per_socket; | |
380 | } | |
381 | EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket); | |
382 | ||
148f9bb8 | 383 | static void srat_detect_node(struct cpuinfo_x86 *c) |
6c62aa4a | 384 | { |
645a7919 | 385 | #ifdef CONFIG_NUMA |
6c62aa4a YL |
386 | int cpu = smp_processor_id(); |
387 | int node; | |
0d96b9ff | 388 | unsigned apicid = c->apicid; |
6c62aa4a | 389 | |
bbc9e2f4 TH |
390 | node = numa_cpu_node(cpu); |
391 | if (node == NUMA_NO_NODE) | |
392 | node = per_cpu(cpu_llc_id, cpu); | |
6c62aa4a | 393 | |
64be4c1c | 394 | /* |
68894632 AH |
395 | * On multi-fabric platform (e.g. Numascale NumaChip) a |
396 | * platform-specific handler needs to be called to fixup some | |
397 | * IDs of the CPU. | |
64be4c1c | 398 | */ |
68894632 | 399 | if (x86_cpuinit.fixup_cpu_id) |
64be4c1c DB |
400 | x86_cpuinit.fixup_cpu_id(c, node); |
401 | ||
6c62aa4a | 402 | if (!node_online(node)) { |
bbc9e2f4 TH |
403 | /* |
404 | * Two possibilities here: | |
405 | * | |
406 | * - The CPU is missing memory and no node was created. In | |
407 | * that case try picking one from a nearby CPU. | |
408 | * | |
409 | * - The APIC IDs differ from the HyperTransport node IDs | |
410 | * which the K8 northbridge parsing fills in. Assume | |
411 | * they are all increased by a constant offset, but in | |
412 | * the same order as the HT nodeids. If that doesn't | |
413 | * result in a usable node fall back to the path for the | |
414 | * previous case. | |
415 | * | |
416 | * This workaround operates directly on the mapping between | |
417 | * APIC ID and NUMA node, assuming certain relationship | |
418 | * between APIC ID, HT node ID and NUMA topology. As going | |
419 | * through CPU mapping may alter the outcome, directly | |
420 | * access __apicid_to_node[]. | |
421 | */ | |
6c62aa4a YL |
422 | int ht_nodeid = c->initial_apicid; |
423 | ||
424 | if (ht_nodeid >= 0 && | |
bbc9e2f4 TH |
425 | __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) |
426 | node = __apicid_to_node[ht_nodeid]; | |
6c62aa4a YL |
427 | /* Pick a nearby node */ |
428 | if (!node_online(node)) | |
429 | node = nearby_node(apicid); | |
430 | } | |
431 | numa_set_node(cpu, node); | |
6c62aa4a YL |
432 | #endif |
433 | } | |
434 | ||
148f9bb8 | 435 | static void early_init_amd_mc(struct cpuinfo_x86 *c) |
11fdd252 | 436 | { |
c8e56d20 | 437 | #ifdef CONFIG_SMP |
11fdd252 YL |
438 | unsigned bits, ecx; |
439 | ||
440 | /* Multi core CPU? */ | |
441 | if (c->extended_cpuid_level < 0x80000008) | |
442 | return; | |
443 | ||
444 | ecx = cpuid_ecx(0x80000008); | |
445 | ||
446 | c->x86_max_cores = (ecx & 0xff) + 1; | |
447 | ||
448 | /* CPU telling us the core id bits shift? */ | |
449 | bits = (ecx >> 12) & 0xF; | |
450 | ||
451 | /* Otherwise recompute */ | |
452 | if (bits == 0) { | |
453 | while ((1 << bits) < c->x86_max_cores) | |
454 | bits++; | |
455 | } | |
456 | ||
457 | c->x86_coreid_bits = bits; | |
458 | #endif | |
459 | } | |
460 | ||
148f9bb8 | 461 | static void bsp_init_amd(struct cpuinfo_x86 *c) |
8fa8b035 | 462 | { |
26bfa5f8 BP |
463 | |
464 | #ifdef CONFIG_X86_64 | |
465 | if (c->x86 >= 0xf) { | |
466 | unsigned long long tseg; | |
467 | ||
468 | /* | |
469 | * Split up direct mapping around the TSEG SMM area. | |
470 | * Don't do it for gbpages because there seems very little | |
471 | * benefit in doing so. | |
472 | */ | |
473 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | |
474 | unsigned long pfn = tseg >> PAGE_SHIFT; | |
475 | ||
476 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | |
477 | if (pfn_range_is_mapped(pfn, pfn + 1)) | |
478 | set_memory_4k((unsigned long)__va(tseg), 1); | |
479 | } | |
480 | } | |
481 | #endif | |
482 | ||
8fa8b035 BP |
483 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { |
484 | ||
485 | if (c->x86 > 0x10 || | |
486 | (c->x86 == 0x10 && c->x86_model >= 0x2)) { | |
487 | u64 val; | |
488 | ||
489 | rdmsrl(MSR_K7_HWCR, val); | |
490 | if (!(val & BIT(24))) | |
491 | printk(KERN_WARNING FW_BUG "TSC doesn't count " | |
492 | "with P0 frequency!\n"); | |
493 | } | |
494 | } | |
495 | ||
496 | if (c->x86 == 0x15) { | |
497 | unsigned long upperbit; | |
498 | u32 cpuid, assoc; | |
499 | ||
500 | cpuid = cpuid_edx(0x80000005); | |
501 | assoc = cpuid >> 16 & 0xff; | |
502 | upperbit = ((cpuid >> 24) << 10) / assoc; | |
503 | ||
504 | va_align.mask = (upperbit - 1) & PAGE_MASK; | |
505 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; | |
4e26d11f HMG |
506 | |
507 | /* A random value per boot for bit slice [12:upper_bit) */ | |
508 | va_align.bits = get_random_int() & va_align.mask; | |
8fa8b035 | 509 | } |
b466bdb6 HR |
510 | |
511 | if (cpu_has(c, X86_FEATURE_MWAITX)) | |
512 | use_mwaitx_delay(); | |
8fa8b035 BP |
513 | } |
514 | ||
148f9bb8 | 515 | static void early_init_amd(struct cpuinfo_x86 *c) |
2b16a235 | 516 | { |
11fdd252 YL |
517 | early_init_amd_mc(c); |
518 | ||
40fb1715 VP |
519 | /* |
520 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
521 | * with P/T states and does not stop in deep C-states | |
522 | */ | |
523 | if (c->x86_power & (1 << 8)) { | |
e3224234 | 524 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
40fb1715 | 525 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
c98fdeaa | 526 | if (!check_tsc_unstable()) |
35af99e6 | 527 | set_sched_clock_stable(); |
40fb1715 | 528 | } |
5fef55fd | 529 | |
6c62aa4a YL |
530 | #ifdef CONFIG_X86_64 |
531 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | |
532 | #else | |
5fef55fd | 533 | /* Set MTRR capability flag if appropriate */ |
6c62aa4a YL |
534 | if (c->x86 == 5) |
535 | if (c->x86_model == 13 || c->x86_model == 9 || | |
536 | (c->x86_model == 8 && c->x86_mask >= 8)) | |
537 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | |
538 | #endif | |
42937e81 | 539 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) |
b9d16a2a AG |
540 | /* |
541 | * ApicID can always be treated as an 8-bit value for AMD APIC versions | |
542 | * >= 0x10, but even old K8s came out of reset with version 0x10. So, we | |
543 | * can safely set X86_FEATURE_EXTD_APICID unconditionally for families | |
544 | * after 16h. | |
545 | */ | |
546 | if (cpu_has_apic && c->x86 > 0x16) { | |
547 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | |
548 | } else if (cpu_has_apic && c->x86 >= 0xf) { | |
549 | /* check CPU config space for extended APIC ID */ | |
42937e81 AH |
550 | unsigned int val; |
551 | val = read_pci_config(0, 24, 0, 0x68); | |
552 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) | |
553 | set_cpu_cap(c, X86_FEATURE_EXTD_APICID); | |
554 | } | |
555 | #endif | |
3b564968 | 556 | |
c1118b36 PB |
557 | /* |
558 | * This is only needed to tell the kernel whether to use VMCALL | |
559 | * and VMMCALL. VMMCALL is never executed except under virt, so | |
560 | * we can set it unconditionally. | |
561 | */ | |
562 | set_cpu_cap(c, X86_FEATURE_VMMCALL); | |
563 | ||
3b564968 | 564 | /* F16h erratum 793, CVE-2013-6885 */ |
8f86a737 BP |
565 | if (c->x86 == 0x16 && c->x86_model <= 0xf) |
566 | msr_set_bit(MSR_AMD64_LS_CFG, 15); | |
2b16a235 AK |
567 | } |
568 | ||
e6ee94d5 | 569 | static const int amd_erratum_383[]; |
7d7dc116 | 570 | static const int amd_erratum_400[]; |
8c6b79bb | 571 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); |
e6ee94d5 | 572 | |
26bfa5f8 BP |
573 | static void init_amd_k8(struct cpuinfo_x86 *c) |
574 | { | |
575 | u32 level; | |
576 | u64 value; | |
577 | ||
578 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | |
579 | level = cpuid_eax(1); | |
580 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | |
581 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
582 | ||
583 | /* | |
584 | * Some BIOSes incorrectly force this feature, but only K8 revision D | |
585 | * (model = 0x14) and later actually support it. | |
586 | * (AMD Erratum #110, docId: 25759). | |
587 | */ | |
588 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { | |
589 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | |
590 | if (!rdmsrl_amd_safe(0xc001100d, &value)) { | |
591 | value &= ~BIT_64(32); | |
592 | wrmsrl_amd_safe(0xc001100d, value); | |
593 | } | |
594 | } | |
595 | ||
596 | if (!c->x86_model_id[0]) | |
597 | strcpy(c->x86_model_id, "Hammer"); | |
6f9b63a0 BP |
598 | |
599 | #ifdef CONFIG_SMP | |
600 | /* | |
601 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | |
602 | * bit 6 of msr C001_0015 | |
603 | * | |
604 | * Errata 63 for SH-B3 steppings | |
605 | * Errata 122 for all steppings (F+ have it disabled by default) | |
606 | */ | |
607 | msr_set_bit(MSR_K7_HWCR, 6); | |
608 | #endif | |
26bfa5f8 BP |
609 | } |
610 | ||
611 | static void init_amd_gh(struct cpuinfo_x86 *c) | |
612 | { | |
613 | #ifdef CONFIG_X86_64 | |
614 | /* do this for boot cpu */ | |
615 | if (c == &boot_cpu_data) | |
616 | check_enable_amd_mmconf_dmi(); | |
617 | ||
618 | fam10h_check_enable_mmcfg(); | |
619 | #endif | |
620 | ||
621 | /* | |
622 | * Disable GART TLB Walk Errors on Fam10h. We do this here because this | |
623 | * is always needed when GART is enabled, even in a kernel which has no | |
624 | * MCE support built in. BIOS should disable GartTlbWlk Errors already. | |
625 | * If it doesn't, we do it here as suggested by the BKDG. | |
626 | * | |
627 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | |
628 | */ | |
629 | msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); | |
630 | ||
631 | /* | |
632 | * On family 10h BIOS may not have properly enabled WC+ support, causing | |
633 | * it to be converted to CD memtype. This may result in performance | |
634 | * degradation for certain nested-paging guests. Prevent this conversion | |
635 | * by clearing bit 24 in MSR_AMD64_BU_CFG2. | |
636 | * | |
637 | * NOTE: we want to use the _safe accessors so as not to #GP kvm | |
638 | * guests on older kvm hosts. | |
639 | */ | |
640 | msr_clear_bit(MSR_AMD64_BU_CFG2, 24); | |
641 | ||
642 | if (cpu_has_amd_erratum(c, amd_erratum_383)) | |
643 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | |
644 | } | |
645 | ||
646 | static void init_amd_bd(struct cpuinfo_x86 *c) | |
647 | { | |
648 | u64 value; | |
649 | ||
650 | /* re-enable TopologyExtensions if switched off by BIOS */ | |
651 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | |
652 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | |
653 | ||
654 | if (msr_set_bit(0xc0011005, 54) > 0) { | |
655 | rdmsrl(0xc0011005, value); | |
656 | if (value & BIT_64(54)) { | |
657 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | |
658 | pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); | |
659 | } | |
660 | } | |
661 | } | |
662 | ||
663 | /* | |
664 | * The way access filter has a performance penalty on some workloads. | |
665 | * Disable it on the affected CPUs. | |
666 | */ | |
667 | if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | |
668 | if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { | |
669 | value |= 0x1E; | |
670 | wrmsrl_safe(0xc0011021, value); | |
671 | } | |
672 | } | |
673 | } | |
674 | ||
148f9bb8 | 675 | static void init_amd(struct cpuinfo_x86 *c) |
1da177e4 | 676 | { |
8e8da023 | 677 | u32 dummy; |
7d318d77 | 678 | |
2b16a235 AK |
679 | early_init_amd(c); |
680 | ||
fb87a298 PC |
681 | /* |
682 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | |
16282a8e | 683 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
fb87a298 | 684 | */ |
16282a8e | 685 | clear_cpu_cap(c, 0*32+31); |
fb87a298 | 686 | |
12d8a961 | 687 | if (c->x86 >= 0x10) |
6c62aa4a | 688 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
0d96b9ff YL |
689 | |
690 | /* get apicid instead of initial apic id from cpuid */ | |
691 | c->apicid = hard_smp_processor_id(); | |
11fdd252 YL |
692 | |
693 | /* K6s reports MCEs but don't actually have all the MSRs */ | |
694 | if (c->x86 < 6) | |
695 | clear_cpu_cap(c, X86_FEATURE_MCE); | |
26bfa5f8 BP |
696 | |
697 | switch (c->x86) { | |
698 | case 4: init_amd_k5(c); break; | |
699 | case 5: init_amd_k6(c); break; | |
700 | case 6: init_amd_k7(c); break; | |
701 | case 0xf: init_amd_k8(c); break; | |
702 | case 0x10: init_amd_gh(c); break; | |
703 | case 0x15: init_amd_bd(c); break; | |
704 | } | |
11fdd252 | 705 | |
6c62aa4a | 706 | /* Enable workaround for FXSAVE leak */ |
18bd057b | 707 | if (c->x86 >= 6) |
9b13a93d | 708 | set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); |
1da177e4 | 709 | |
27c13ece | 710 | cpu_detect_cache_sizes(c); |
3dd9d514 | 711 | |
11fdd252 | 712 | /* Multi core CPU? */ |
6c62aa4a | 713 | if (c->extended_cpuid_level >= 0x80000008) { |
11fdd252 | 714 | amd_detect_cmp(c); |
6c62aa4a YL |
715 | srat_detect_node(c); |
716 | } | |
faee9a5d | 717 | |
6c62aa4a | 718 | #ifdef CONFIG_X86_32 |
11fdd252 | 719 | detect_ht(c); |
6c62aa4a | 720 | #endif |
39b3a791 | 721 | |
04a15418 | 722 | init_amd_cacheinfo(c); |
3556ddfa | 723 | |
12d8a961 | 724 | if (c->x86 >= 0xf) |
11fdd252 | 725 | set_cpu_cap(c, X86_FEATURE_K8); |
de421863 | 726 | |
11fdd252 YL |
727 | if (cpu_has_xmm2) { |
728 | /* MFENCE stops RDTSC speculation */ | |
16282a8e | 729 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
11fdd252 | 730 | } |
6c62aa4a | 731 | |
e9cdd343 BO |
732 | /* |
733 | * Family 0x12 and above processors have APIC timer | |
734 | * running in deep C states. | |
735 | */ | |
736 | if (c->x86 > 0x11) | |
b87cf80a | 737 | set_cpu_cap(c, X86_FEATURE_ARAT); |
5bbc097d | 738 | |
8c6b79bb | 739 | if (cpu_has_amd_erratum(c, amd_erratum_400)) |
7d7dc116 BP |
740 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
741 | ||
8e8da023 | 742 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); |
a930dc45 BP |
743 | |
744 | /* 3DNow or LM implies PREFETCHW */ | |
745 | if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) | |
746 | if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) | |
747 | set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); | |
61f01dd9 AL |
748 | |
749 | /* AMD CPUs don't reset SS attributes on SYSRET */ | |
750 | set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); | |
1da177e4 LT |
751 | } |
752 | ||
6c62aa4a | 753 | #ifdef CONFIG_X86_32 |
148f9bb8 | 754 | static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1da177e4 LT |
755 | { |
756 | /* AMD errata T13 (order #21922) */ | |
757 | if ((c->x86 == 6)) { | |
8bdbd962 AC |
758 | /* Duron Rev A0 */ |
759 | if (c->x86_model == 3 && c->x86_mask == 0) | |
1da177e4 | 760 | size = 64; |
8bdbd962 | 761 | /* Tbird rev A1/A2 */ |
1da177e4 | 762 | if (c->x86_model == 4 && |
8bdbd962 | 763 | (c->x86_mask == 0 || c->x86_mask == 1)) |
1da177e4 LT |
764 | size = 256; |
765 | } | |
766 | return size; | |
767 | } | |
6c62aa4a | 768 | #endif |
1da177e4 | 769 | |
148f9bb8 | 770 | static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) |
b46882e4 BP |
771 | { |
772 | u32 ebx, eax, ecx, edx; | |
773 | u16 mask = 0xfff; | |
774 | ||
775 | if (c->x86 < 0xf) | |
776 | return; | |
777 | ||
778 | if (c->extended_cpuid_level < 0x80000006) | |
779 | return; | |
780 | ||
781 | cpuid(0x80000006, &eax, &ebx, &ecx, &edx); | |
782 | ||
783 | tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; | |
784 | tlb_lli_4k[ENTRIES] = ebx & mask; | |
785 | ||
786 | /* | |
787 | * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB | |
788 | * characteristics from the CPUID function 0x80000005 instead. | |
789 | */ | |
790 | if (c->x86 == 0xf) { | |
791 | cpuid(0x80000005, &eax, &ebx, &ecx, &edx); | |
792 | mask = 0xff; | |
793 | } | |
794 | ||
795 | /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | |
d1393367 BP |
796 | if (!((eax >> 16) & mask)) |
797 | tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; | |
798 | else | |
b46882e4 | 799 | tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; |
b46882e4 BP |
800 | |
801 | /* a 4M entry uses two 2M entries */ | |
802 | tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; | |
803 | ||
804 | /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ | |
805 | if (!(eax & mask)) { | |
806 | /* Erratum 658 */ | |
807 | if (c->x86 == 0x15 && c->x86_model <= 0x1f) { | |
808 | tlb_lli_2m[ENTRIES] = 1024; | |
809 | } else { | |
810 | cpuid(0x80000005, &eax, &ebx, &ecx, &edx); | |
811 | tlb_lli_2m[ENTRIES] = eax & 0xff; | |
812 | } | |
813 | } else | |
814 | tlb_lli_2m[ENTRIES] = eax & mask; | |
815 | ||
816 | tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; | |
817 | } | |
818 | ||
148f9bb8 | 819 | static const struct cpu_dev amd_cpu_dev = { |
1da177e4 | 820 | .c_vendor = "AMD", |
fb87a298 | 821 | .c_ident = { "AuthenticAMD" }, |
6c62aa4a | 822 | #ifdef CONFIG_X86_32 |
09dc68d9 JB |
823 | .legacy_models = { |
824 | { .family = 4, .model_names = | |
1da177e4 LT |
825 | { |
826 | [3] = "486 DX/2", | |
827 | [7] = "486 DX/2-WB", | |
fb87a298 PC |
828 | [8] = "486 DX/4", |
829 | [9] = "486 DX/4-WB", | |
1da177e4 | 830 | [14] = "Am5x86-WT", |
fb87a298 | 831 | [15] = "Am5x86-WB" |
1da177e4 LT |
832 | } |
833 | }, | |
834 | }, | |
09dc68d9 | 835 | .legacy_cache_size = amd_size_cache, |
6c62aa4a | 836 | #endif |
03ae5768 | 837 | .c_early_init = early_init_amd, |
b46882e4 | 838 | .c_detect_tlb = cpu_detect_tlb_amd, |
8fa8b035 | 839 | .c_bsp_init = bsp_init_amd, |
1da177e4 | 840 | .c_init = init_amd, |
10a434fc | 841 | .c_x86_vendor = X86_VENDOR_AMD, |
1da177e4 LT |
842 | }; |
843 | ||
10a434fc | 844 | cpu_dev_register(amd_cpu_dev); |
d78d671d HR |
845 | |
846 | /* | |
847 | * AMD errata checking | |
848 | * | |
849 | * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or | |
850 | * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that | |
851 | * have an OSVW id assigned, which it takes as first argument. Both take a | |
852 | * variable number of family-specific model-stepping ranges created by | |
7d7dc116 | 853 | * AMD_MODEL_RANGE(). |
d78d671d HR |
854 | * |
855 | * Example: | |
856 | * | |
857 | * const int amd_erratum_319[] = | |
858 | * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), | |
859 | * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), | |
860 | * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); | |
861 | */ | |
862 | ||
7d7dc116 BP |
863 | #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } |
864 | #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } | |
865 | #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ | |
866 | ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) | |
867 | #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) | |
868 | #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) | |
869 | #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) | |
870 | ||
871 | static const int amd_erratum_400[] = | |
328935e6 | 872 | AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), |
9d8888c2 HR |
873 | AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); |
874 | ||
e6ee94d5 | 875 | static const int amd_erratum_383[] = |
1be85a6d | 876 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); |
9d8888c2 | 877 | |
8c6b79bb TK |
878 | |
879 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) | |
d78d671d | 880 | { |
d78d671d HR |
881 | int osvw_id = *erratum++; |
882 | u32 range; | |
883 | u32 ms; | |
884 | ||
d78d671d HR |
885 | if (osvw_id >= 0 && osvw_id < 65536 && |
886 | cpu_has(cpu, X86_FEATURE_OSVW)) { | |
887 | u64 osvw_len; | |
888 | ||
889 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); | |
890 | if (osvw_id < osvw_len) { | |
891 | u64 osvw_bits; | |
892 | ||
893 | rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), | |
894 | osvw_bits); | |
895 | return osvw_bits & (1ULL << (osvw_id & 0x3f)); | |
896 | } | |
897 | } | |
898 | ||
899 | /* OSVW unavailable or ID unknown, match family-model-stepping range */ | |
07a7795c | 900 | ms = (cpu->x86_model << 4) | cpu->x86_mask; |
d78d671d HR |
901 | while ((range = *erratum++)) |
902 | if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && | |
903 | (ms >= AMD_MODEL_RANGE_START(range)) && | |
904 | (ms <= AMD_MODEL_RANGE_END(range))) | |
905 | return true; | |
906 | ||
907 | return false; | |
908 | } | |
d6d55f0b JS |
909 | |
910 | void set_dr_addr_mask(unsigned long mask, int dr) | |
911 | { | |
912 | if (!cpu_has_bpext) | |
913 | return; | |
914 | ||
915 | switch (dr) { | |
916 | case 0: | |
917 | wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0); | |
918 | break; | |
919 | case 1: | |
920 | case 2: | |
921 | case 3: | |
922 | wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0); | |
923 | break; | |
924 | default: | |
925 | break; | |
926 | } | |
927 | } |