]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/bitops.h> | |
3 | #include <linux/mm.h> | |
8d71a2ea | 4 | |
1da177e4 LT |
5 | #include <asm/io.h> |
6 | #include <asm/processor.h> | |
d3f7eae1 | 7 | #include <asm/apic.h> |
1f442d70 | 8 | #include <asm/cpu.h> |
1da177e4 | 9 | |
8d71a2ea YL |
10 | #ifdef CONFIG_X86_64 |
11 | # include <asm/numa_64.h> | |
12 | # include <asm/mmconfig.h> | |
13 | # include <asm/cacheflush.h> | |
14 | #endif | |
15 | ||
1da177e4 LT |
16 | #include "cpu.h" |
17 | ||
6c62aa4a | 18 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
19 | /* |
20 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | |
21 | * misexecution of code under Linux. Owners of such processors should | |
22 | * contact AMD for precise details and a CPU swap. | |
23 | * | |
24 | * See http://www.multimania.com/poulot/k6bug.html | |
25 | * http://www.amd.com/K6/k6docs/revgd.html | |
26 | * | |
27 | * The following test is erm.. interesting. AMD neglected to up | |
28 | * the chip setting when fixing the bug but they also tweaked some | |
29 | * performance at the same time.. | |
30 | */ | |
fb87a298 | 31 | |
1da177e4 LT |
32 | extern void vide(void); |
33 | __asm__(".align 4\nvide: ret"); | |
34 | ||
11fdd252 YL |
35 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) |
36 | { | |
37 | /* | |
38 | * General Systems BIOSen alias the cpu frequency registers | |
39 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | |
40 | * drivers subsequently pokes it, and changes the CPU speed. | |
41 | * Workaround : Remove the unneeded alias. | |
42 | */ | |
43 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | |
44 | #define CBAR_ENB (0x80000000) | |
45 | #define CBAR_KEY (0X000000CB) | |
46 | if (c->x86_model == 9 || c->x86_model == 10) { | |
47 | if (inl (CBAR) & CBAR_ENB) | |
48 | outl (0 | CBAR_KEY, CBAR); | |
49 | } | |
50 | } | |
51 | ||
52 | ||
53 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |
54 | { | |
55 | u32 l, h; | |
56 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | |
57 | ||
58 | if (c->x86_model < 6) { | |
59 | /* Based on AMD doc 20734R - June 2000 */ | |
60 | if (c->x86_model == 0) { | |
61 | clear_cpu_cap(c, X86_FEATURE_APIC); | |
62 | set_cpu_cap(c, X86_FEATURE_PGE); | |
63 | } | |
64 | return; | |
65 | } | |
66 | ||
67 | if (c->x86_model == 6 && c->x86_mask == 1) { | |
68 | const int K6_BUG_LOOP = 1000000; | |
69 | int n; | |
70 | void (*f_vide)(void); | |
71 | unsigned long d, d2; | |
72 | ||
73 | printk(KERN_INFO "AMD K6 stepping B detected - "); | |
74 | ||
75 | /* | |
76 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | |
77 | * calls at the same time. | |
78 | */ | |
79 | ||
80 | n = K6_BUG_LOOP; | |
81 | f_vide = vide; | |
82 | rdtscl(d); | |
83 | while (n--) | |
84 | f_vide(); | |
85 | rdtscl(d2); | |
86 | d = d2-d; | |
87 | ||
88 | if (d > 20*K6_BUG_LOOP) | |
89 | printk("system stability may be impaired when more than 32 MB are used.\n"); | |
90 | else | |
91 | printk("probably OK (after B9730xxxx).\n"); | |
92 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | |
93 | } | |
94 | ||
95 | /* K6 with old style WHCR */ | |
96 | if (c->x86_model < 8 || | |
97 | (c->x86_model == 8 && c->x86_mask < 8)) { | |
98 | /* We can only write allocate on the low 508Mb */ | |
99 | if (mbytes > 508) | |
100 | mbytes = 508; | |
101 | ||
102 | rdmsr(MSR_K6_WHCR, l, h); | |
103 | if ((l&0x0000FFFF) == 0) { | |
104 | unsigned long flags; | |
105 | l = (1<<0)|((mbytes/4)<<1); | |
106 | local_irq_save(flags); | |
107 | wbinvd(); | |
108 | wrmsr(MSR_K6_WHCR, l, h); | |
109 | local_irq_restore(flags); | |
110 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | |
111 | mbytes); | |
112 | } | |
113 | return; | |
114 | } | |
115 | ||
116 | if ((c->x86_model == 8 && c->x86_mask > 7) || | |
117 | c->x86_model == 9 || c->x86_model == 13) { | |
118 | /* The more serious chips .. */ | |
119 | ||
120 | if (mbytes > 4092) | |
121 | mbytes = 4092; | |
122 | ||
123 | rdmsr(MSR_K6_WHCR, l, h); | |
124 | if ((l&0xFFFF0000) == 0) { | |
125 | unsigned long flags; | |
126 | l = ((mbytes>>2)<<22)|(1<<16); | |
127 | local_irq_save(flags); | |
128 | wbinvd(); | |
129 | wrmsr(MSR_K6_WHCR, l, h); | |
130 | local_irq_restore(flags); | |
131 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | |
132 | mbytes); | |
133 | } | |
134 | ||
135 | return; | |
136 | } | |
137 | ||
138 | if (c->x86_model == 10) { | |
139 | /* AMD Geode LX is model 10 */ | |
140 | /* placeholder for any needed mods */ | |
141 | return; | |
142 | } | |
143 | } | |
144 | ||
1f442d70 YL |
145 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) |
146 | { | |
147 | #ifdef CONFIG_SMP | |
148 | /* calling is from identify_secondary_cpu() ? */ | |
149 | if (c->cpu_index == boot_cpu_id) | |
150 | return; | |
151 | ||
152 | /* | |
153 | * Certain Athlons might work (for various values of 'work') in SMP | |
154 | * but they are not certified as MP capable. | |
155 | */ | |
156 | /* Athlon 660/661 is valid. */ | |
157 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
158 | (c->x86_mask == 1))) | |
159 | goto valid_k7; | |
160 | ||
161 | /* Duron 670 is valid */ | |
162 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
163 | goto valid_k7; | |
164 | ||
165 | /* | |
166 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
167 | * bit. It's worth noting that the A5 stepping (662) of some | |
168 | * Athlon XP's have the MP bit set. | |
169 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
170 | * more. | |
171 | */ | |
172 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
173 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
174 | (c->x86_model > 7)) | |
175 | if (cpu_has_mp) | |
176 | goto valid_k7; | |
177 | ||
178 | /* If we get here, not a certified SMP capable AMD system. */ | |
179 | ||
180 | /* | |
181 | * Don't taint if we are running SMP kernel on a single non-MP | |
182 | * approved Athlon | |
183 | */ | |
184 | WARN_ONCE(1, "WARNING: This combination of AMD" | |
185 | "processors is not suitable for SMP.\n"); | |
186 | if (!test_taint(TAINT_UNSAFE_SMP)) | |
187 | add_taint(TAINT_UNSAFE_SMP); | |
188 | ||
189 | valid_k7: | |
190 | ; | |
191 | #endif | |
192 | } | |
193 | ||
11fdd252 YL |
194 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) |
195 | { | |
196 | u32 l, h; | |
197 | ||
198 | /* | |
199 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | |
200 | * to enable SSE on Palomino/Morgan/Barton CPU's. | |
201 | * If the BIOS didn't enable it already, enable it here. | |
202 | */ | |
203 | if (c->x86_model >= 6 && c->x86_model <= 10) { | |
204 | if (!cpu_has(c, X86_FEATURE_XMM)) { | |
205 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | |
206 | rdmsr(MSR_K7_HWCR, l, h); | |
207 | l &= ~0x00008000; | |
208 | wrmsr(MSR_K7_HWCR, l, h); | |
209 | set_cpu_cap(c, X86_FEATURE_XMM); | |
210 | } | |
211 | } | |
212 | ||
213 | /* | |
214 | * It's been determined by AMD that Athlons since model 8 stepping 1 | |
215 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | |
216 | * As per AMD technical note 27212 0.2 | |
217 | */ | |
218 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | |
219 | rdmsr(MSR_K7_CLK_CTL, l, h); | |
220 | if ((l & 0xfff00000) != 0x20000000) { | |
221 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | |
222 | ((l & 0x000fffff)|0x20000000)); | |
223 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | |
224 | } | |
225 | } | |
226 | ||
227 | set_cpu_cap(c, X86_FEATURE_K7); | |
1f442d70 YL |
228 | |
229 | amd_k7_smp_check(c); | |
11fdd252 | 230 | } |
6c62aa4a YL |
231 | #endif |
232 | ||
233 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
234 | static int __cpuinit nearby_node(int apicid) | |
235 | { | |
236 | int i, node; | |
237 | ||
238 | for (i = apicid - 1; i >= 0; i--) { | |
239 | node = apicid_to_node[i]; | |
240 | if (node != NUMA_NO_NODE && node_online(node)) | |
241 | return node; | |
242 | } | |
243 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | |
244 | node = apicid_to_node[i]; | |
245 | if (node != NUMA_NO_NODE && node_online(node)) | |
246 | return node; | |
247 | } | |
248 | return first_node(node_online_map); /* Shouldn't happen */ | |
249 | } | |
250 | #endif | |
11fdd252 YL |
251 | |
252 | /* | |
253 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | |
254 | * Assumes number of cores is a power of two. | |
255 | */ | |
256 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |
257 | { | |
258 | #ifdef CONFIG_X86_HT | |
259 | unsigned bits; | |
260 | ||
261 | bits = c->x86_coreid_bits; | |
262 | ||
263 | /* Low order bits define the core id (index of core in socket) */ | |
264 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | |
265 | /* Convert the initial APIC ID into the socket ID */ | |
266 | c->phys_proc_id = c->initial_apicid >> bits; | |
267 | #endif | |
268 | } | |
269 | ||
6c62aa4a YL |
270 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) |
271 | { | |
272 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
273 | int cpu = smp_processor_id(); | |
274 | int node; | |
275 | unsigned apicid = hard_smp_processor_id(); | |
276 | ||
277 | node = c->phys_proc_id; | |
278 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | |
279 | node = apicid_to_node[apicid]; | |
280 | if (!node_online(node)) { | |
281 | /* Two possibilities here: | |
282 | - The CPU is missing memory and no node was created. | |
283 | In that case try picking one from a nearby CPU | |
284 | - The APIC IDs differ from the HyperTransport node IDs | |
285 | which the K8 northbridge parsing fills in. | |
286 | Assume they are all increased by a constant offset, | |
287 | but in the same order as the HT nodeids. | |
288 | If that doesn't result in a usable node fall back to the | |
289 | path for the previous case. */ | |
290 | ||
291 | int ht_nodeid = c->initial_apicid; | |
292 | ||
293 | if (ht_nodeid >= 0 && | |
294 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | |
295 | node = apicid_to_node[ht_nodeid]; | |
296 | /* Pick a nearby node */ | |
297 | if (!node_online(node)) | |
298 | node = nearby_node(apicid); | |
299 | } | |
300 | numa_set_node(cpu, node); | |
301 | ||
823b259b | 302 | printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); |
6c62aa4a YL |
303 | #endif |
304 | } | |
305 | ||
11fdd252 YL |
306 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) |
307 | { | |
308 | #ifdef CONFIG_X86_HT | |
309 | unsigned bits, ecx; | |
310 | ||
311 | /* Multi core CPU? */ | |
312 | if (c->extended_cpuid_level < 0x80000008) | |
313 | return; | |
314 | ||
315 | ecx = cpuid_ecx(0x80000008); | |
316 | ||
317 | c->x86_max_cores = (ecx & 0xff) + 1; | |
318 | ||
319 | /* CPU telling us the core id bits shift? */ | |
320 | bits = (ecx >> 12) & 0xF; | |
321 | ||
322 | /* Otherwise recompute */ | |
323 | if (bits == 0) { | |
324 | while ((1 << bits) < c->x86_max_cores) | |
325 | bits++; | |
326 | } | |
327 | ||
328 | c->x86_coreid_bits = bits; | |
329 | #endif | |
330 | } | |
331 | ||
03ae5768 | 332 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
2b16a235 | 333 | { |
11fdd252 YL |
334 | early_init_amd_mc(c); |
335 | ||
40fb1715 VP |
336 | /* |
337 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
338 | * with P/T states and does not stop in deep C-states | |
339 | */ | |
340 | if (c->x86_power & (1 << 8)) { | |
e3224234 | 341 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
40fb1715 VP |
342 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
343 | } | |
5fef55fd | 344 | |
6c62aa4a YL |
345 | #ifdef CONFIG_X86_64 |
346 | set_cpu_cap(c, X86_FEATURE_SYSCALL32); | |
347 | #else | |
5fef55fd | 348 | /* Set MTRR capability flag if appropriate */ |
6c62aa4a YL |
349 | if (c->x86 == 5) |
350 | if (c->x86_model == 13 || c->x86_model == 9 || | |
351 | (c->x86_model == 8 && c->x86_mask >= 8)) | |
352 | set_cpu_cap(c, X86_FEATURE_K6_MTRR); | |
353 | #endif | |
2b16a235 AK |
354 | } |
355 | ||
b4af3f7c | 356 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) |
1da177e4 | 357 | { |
7d318d77 | 358 | #ifdef CONFIG_SMP |
3c92c2ba | 359 | unsigned long long value; |
7d318d77 | 360 | |
fb87a298 PC |
361 | /* |
362 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | |
7d318d77 AK |
363 | * bit 6 of msr C001_0015 |
364 | * | |
365 | * Errata 63 for SH-B3 steppings | |
366 | * Errata 122 for all steppings (F+ have it disabled by default) | |
367 | */ | |
11fdd252 | 368 | if (c->x86 == 0xf) { |
7d318d77 AK |
369 | rdmsrl(MSR_K7_HWCR, value); |
370 | value |= 1 << 6; | |
371 | wrmsrl(MSR_K7_HWCR, value); | |
372 | } | |
373 | #endif | |
374 | ||
2b16a235 AK |
375 | early_init_amd(c); |
376 | ||
fb87a298 PC |
377 | /* |
378 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | |
16282a8e | 379 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway |
fb87a298 | 380 | */ |
16282a8e | 381 | clear_cpu_cap(c, 0*32+31); |
fb87a298 | 382 | |
6c62aa4a YL |
383 | #ifdef CONFIG_X86_64 |
384 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | |
385 | if (c->x86 == 0xf) { | |
386 | u32 level; | |
387 | ||
388 | level = cpuid_eax(1); | |
389 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | |
390 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
391 | } | |
392 | if (c->x86 == 0x10 || c->x86 == 0x11) | |
393 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
394 | #else | |
395 | ||
396 | /* | |
397 | * FIXME: We should handle the K5 here. Set up the write | |
398 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | |
399 | * no bus pipeline) | |
400 | */ | |
401 | ||
fb87a298 PC |
402 | switch (c->x86) { |
403 | case 4: | |
11fdd252 YL |
404 | init_amd_k5(c); |
405 | break; | |
fb87a298 | 406 | case 5: |
11fdd252 | 407 | init_amd_k6(c); |
1da177e4 | 408 | break; |
11fdd252 YL |
409 | case 6: /* An Athlon/Duron */ |
410 | init_amd_k7(c); | |
1da177e4 LT |
411 | break; |
412 | } | |
11fdd252 YL |
413 | |
414 | /* K6s reports MCEs but don't actually have all the MSRs */ | |
415 | if (c->x86 < 6) | |
416 | clear_cpu_cap(c, X86_FEATURE_MCE); | |
6c62aa4a | 417 | #endif |
11fdd252 | 418 | |
6c62aa4a | 419 | /* Enable workaround for FXSAVE leak */ |
18bd057b | 420 | if (c->x86 >= 6) |
16282a8e | 421 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); |
1da177e4 | 422 | |
11fdd252 YL |
423 | if (!c->x86_model_id[0]) { |
424 | switch (c->x86) { | |
425 | case 0xf: | |
426 | /* Should distinguish Models here, but this is only | |
427 | a fallback anyways. */ | |
428 | strcpy(c->x86_model_id, "Hammer"); | |
429 | break; | |
430 | } | |
431 | } | |
3dd9d514 | 432 | |
11fdd252 | 433 | display_cacheinfo(c); |
3dd9d514 | 434 | |
11fdd252 | 435 | /* Multi core CPU? */ |
6c62aa4a | 436 | if (c->extended_cpuid_level >= 0x80000008) { |
11fdd252 | 437 | amd_detect_cmp(c); |
6c62aa4a YL |
438 | srat_detect_node(c); |
439 | } | |
faee9a5d | 440 | |
6c62aa4a | 441 | #ifdef CONFIG_X86_32 |
11fdd252 | 442 | detect_ht(c); |
6c62aa4a | 443 | #endif |
39b3a791 | 444 | |
11fdd252 YL |
445 | if (c->extended_cpuid_level >= 0x80000006) { |
446 | if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) | |
67cddd94 AK |
447 | num_cache_leaves = 4; |
448 | else | |
449 | num_cache_leaves = 3; | |
450 | } | |
3556ddfa | 451 | |
11fdd252 YL |
452 | if (c->x86 >= 0xf && c->x86 <= 0x11) |
453 | set_cpu_cap(c, X86_FEATURE_K8); | |
de421863 | 454 | |
11fdd252 YL |
455 | if (cpu_has_xmm2) { |
456 | /* MFENCE stops RDTSC speculation */ | |
16282a8e | 457 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
11fdd252 | 458 | } |
6c62aa4a YL |
459 | |
460 | #ifdef CONFIG_X86_64 | |
461 | if (c->x86 == 0x10) { | |
462 | /* do this for boot cpu */ | |
463 | if (c == &boot_cpu_data) | |
464 | check_enable_amd_mmconf_dmi(); | |
465 | ||
466 | fam10h_check_enable_mmcfg(); | |
467 | } | |
468 | ||
469 | if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) { | |
470 | unsigned long long tseg; | |
471 | ||
472 | /* | |
473 | * Split up direct mapping around the TSEG SMM area. | |
474 | * Don't do it for gbpages because there seems very little | |
475 | * benefit in doing so. | |
476 | */ | |
477 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | |
478 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | |
479 | if ((tseg>>PMD_SHIFT) < | |
480 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | |
481 | ((tseg>>PMD_SHIFT) < | |
482 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | |
483 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | |
484 | set_memory_4k((unsigned long)__va(tseg), 1); | |
485 | } | |
486 | } | |
487 | #endif | |
1da177e4 LT |
488 | } |
489 | ||
6c62aa4a | 490 | #ifdef CONFIG_X86_32 |
fb87a298 | 491 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1da177e4 LT |
492 | { |
493 | /* AMD errata T13 (order #21922) */ | |
494 | if ((c->x86 == 6)) { | |
495 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | |
496 | size = 64; | |
497 | if (c->x86_model == 4 && | |
fb87a298 | 498 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ |
1da177e4 LT |
499 | size = 256; |
500 | } | |
501 | return size; | |
502 | } | |
6c62aa4a | 503 | #endif |
1da177e4 | 504 | |
02dde8b4 | 505 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { |
1da177e4 | 506 | .c_vendor = "AMD", |
fb87a298 | 507 | .c_ident = { "AuthenticAMD" }, |
6c62aa4a | 508 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
509 | .c_models = { |
510 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | |
511 | { | |
512 | [3] = "486 DX/2", | |
513 | [7] = "486 DX/2-WB", | |
fb87a298 PC |
514 | [8] = "486 DX/4", |
515 | [9] = "486 DX/4-WB", | |
1da177e4 | 516 | [14] = "Am5x86-WT", |
fb87a298 | 517 | [15] = "Am5x86-WB" |
1da177e4 LT |
518 | } |
519 | }, | |
520 | }, | |
6c62aa4a YL |
521 | .c_size_cache = amd_size_cache, |
522 | #endif | |
03ae5768 | 523 | .c_early_init = early_init_amd, |
1da177e4 | 524 | .c_init = init_amd, |
10a434fc | 525 | .c_x86_vendor = X86_VENDOR_AMD, |
1da177e4 LT |
526 | }; |
527 | ||
10a434fc | 528 | cpu_dev_register(amd_cpu_dev); |