]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/init.h> |
2 | #include <linux/bitops.h> | |
3 | #include <linux/mm.h> | |
4 | #include <asm/io.h> | |
5 | #include <asm/processor.h> | |
6 | ||
7 | #include "cpu.h" | |
8 | ||
9 | /* | |
10 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | |
11 | * misexecution of code under Linux. Owners of such processors should | |
12 | * contact AMD for precise details and a CPU swap. | |
13 | * | |
14 | * See http://www.multimania.com/poulot/k6bug.html | |
15 | * http://www.amd.com/K6/k6docs/revgd.html | |
16 | * | |
17 | * The following test is erm.. interesting. AMD neglected to up | |
18 | * the chip setting when fixing the bug but they also tweaked some | |
19 | * performance at the same time.. | |
20 | */ | |
21 | ||
22 | extern void vide(void); | |
23 | __asm__(".align 4\nvide: ret"); | |
24 | ||
25 | static void __init init_amd(struct cpuinfo_x86 *c) | |
26 | { | |
27 | u32 l, h; | |
28 | int mbytes = num_physpages >> (20-PAGE_SHIFT); | |
29 | int r; | |
30 | ||
31 | /* | |
32 | * FIXME: We should handle the K5 here. Set up the write | |
33 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | |
34 | * no bus pipeline) | |
35 | */ | |
36 | ||
37 | /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; | |
38 | 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ | |
39 | clear_bit(0*32+31, c->x86_capability); | |
40 | ||
41 | r = get_model_name(c); | |
42 | ||
43 | switch(c->x86) | |
44 | { | |
45 | case 4: | |
46 | /* | |
47 | * General Systems BIOSen alias the cpu frequency registers | |
48 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | |
49 | * drivers subsequently pokes it, and changes the CPU speed. | |
50 | * Workaround : Remove the unneeded alias. | |
51 | */ | |
52 | #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ | |
53 | #define CBAR_ENB (0x80000000) | |
54 | #define CBAR_KEY (0X000000CB) | |
55 | if (c->x86_model==9 || c->x86_model == 10) { | |
56 | if (inl (CBAR) & CBAR_ENB) | |
57 | outl (0 | CBAR_KEY, CBAR); | |
58 | } | |
59 | break; | |
60 | case 5: | |
61 | if( c->x86_model < 6 ) | |
62 | { | |
63 | /* Based on AMD doc 20734R - June 2000 */ | |
64 | if ( c->x86_model == 0 ) { | |
65 | clear_bit(X86_FEATURE_APIC, c->x86_capability); | |
66 | set_bit(X86_FEATURE_PGE, c->x86_capability); | |
67 | } | |
68 | break; | |
69 | } | |
70 | ||
71 | if ( c->x86_model == 6 && c->x86_mask == 1 ) { | |
72 | const int K6_BUG_LOOP = 1000000; | |
73 | int n; | |
74 | void (*f_vide)(void); | |
75 | unsigned long d, d2; | |
76 | ||
77 | printk(KERN_INFO "AMD K6 stepping B detected - "); | |
78 | ||
79 | /* | |
80 | * It looks like AMD fixed the 2.6.2 bug and improved indirect | |
81 | * calls at the same time. | |
82 | */ | |
83 | ||
84 | n = K6_BUG_LOOP; | |
85 | f_vide = vide; | |
86 | rdtscl(d); | |
87 | while (n--) | |
88 | f_vide(); | |
89 | rdtscl(d2); | |
90 | d = d2-d; | |
91 | ||
92 | /* Knock these two lines out if it debugs out ok */ | |
93 | printk(KERN_INFO "AMD K6 stepping B detected - "); | |
94 | /* -- cut here -- */ | |
95 | if (d > 20*K6_BUG_LOOP) | |
96 | printk("system stability may be impaired when more than 32 MB are used.\n"); | |
97 | else | |
98 | printk("probably OK (after B9730xxxx).\n"); | |
99 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | |
100 | } | |
101 | ||
102 | /* K6 with old style WHCR */ | |
103 | if (c->x86_model < 8 || | |
104 | (c->x86_model== 8 && c->x86_mask < 8)) { | |
105 | /* We can only write allocate on the low 508Mb */ | |
106 | if(mbytes>508) | |
107 | mbytes=508; | |
108 | ||
109 | rdmsr(MSR_K6_WHCR, l, h); | |
110 | if ((l&0x0000FFFF)==0) { | |
111 | unsigned long flags; | |
112 | l=(1<<0)|((mbytes/4)<<1); | |
113 | local_irq_save(flags); | |
114 | wbinvd(); | |
115 | wrmsr(MSR_K6_WHCR, l, h); | |
116 | local_irq_restore(flags); | |
117 | printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", | |
118 | mbytes); | |
119 | } | |
120 | break; | |
121 | } | |
122 | ||
123 | if ((c->x86_model == 8 && c->x86_mask >7) || | |
124 | c->x86_model == 9 || c->x86_model == 13) { | |
125 | /* The more serious chips .. */ | |
126 | ||
127 | if(mbytes>4092) | |
128 | mbytes=4092; | |
129 | ||
130 | rdmsr(MSR_K6_WHCR, l, h); | |
131 | if ((l&0xFFFF0000)==0) { | |
132 | unsigned long flags; | |
133 | l=((mbytes>>2)<<22)|(1<<16); | |
134 | local_irq_save(flags); | |
135 | wbinvd(); | |
136 | wrmsr(MSR_K6_WHCR, l, h); | |
137 | local_irq_restore(flags); | |
138 | printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", | |
139 | mbytes); | |
140 | } | |
141 | ||
142 | /* Set MTRR capability flag if appropriate */ | |
143 | if (c->x86_model == 13 || c->x86_model == 9 || | |
144 | (c->x86_model == 8 && c->x86_mask >= 8)) | |
145 | set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); | |
146 | break; | |
147 | } | |
148 | break; | |
149 | ||
150 | case 6: /* An Athlon/Duron */ | |
151 | ||
152 | /* Bit 15 of Athlon specific MSR 15, needs to be 0 | |
153 | * to enable SSE on Palomino/Morgan/Barton CPU's. | |
154 | * If the BIOS didn't enable it already, enable it here. | |
155 | */ | |
156 | if (c->x86_model >= 6 && c->x86_model <= 10) { | |
157 | if (!cpu_has(c, X86_FEATURE_XMM)) { | |
158 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | |
159 | rdmsr(MSR_K7_HWCR, l, h); | |
160 | l &= ~0x00008000; | |
161 | wrmsr(MSR_K7_HWCR, l, h); | |
162 | set_bit(X86_FEATURE_XMM, c->x86_capability); | |
163 | } | |
164 | } | |
165 | ||
166 | /* It's been determined by AMD that Athlons since model 8 stepping 1 | |
167 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | |
168 | * As per AMD technical note 27212 0.2 | |
169 | */ | |
170 | if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { | |
171 | rdmsr(MSR_K7_CLK_CTL, l, h); | |
172 | if ((l & 0xfff00000) != 0x20000000) { | |
173 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | |
174 | ((l & 0x000fffff)|0x20000000)); | |
175 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | |
176 | } | |
177 | } | |
178 | break; | |
179 | } | |
180 | ||
181 | switch (c->x86) { | |
182 | case 15: | |
183 | set_bit(X86_FEATURE_K8, c->x86_capability); | |
184 | break; | |
185 | case 6: | |
186 | set_bit(X86_FEATURE_K7, c->x86_capability); | |
187 | break; | |
188 | } | |
189 | ||
190 | display_cacheinfo(c); | |
191 | detect_ht(c); | |
192 | ||
193 | #ifdef CONFIG_X86_HT | |
194 | /* AMD dual core looks like HT but isn't really. Hide it from the | |
195 | scheduler. This works around problems with the domain scheduler. | |
196 | Also probably gives slightly better scheduling and disables | |
197 | SMT nice which is harmful on dual core. | |
198 | TBD tune the domain scheduler for dual core. */ | |
199 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) | |
200 | smp_num_siblings = 1; | |
201 | #endif | |
202 | ||
203 | if (cpuid_eax(0x80000000) >= 0x80000008) { | |
204 | c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; | |
205 | if (c->x86_num_cores & (c->x86_num_cores - 1)) | |
206 | c->x86_num_cores = 1; | |
207 | } | |
208 | } | |
209 | ||
210 | static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) | |
211 | { | |
212 | /* AMD errata T13 (order #21922) */ | |
213 | if ((c->x86 == 6)) { | |
214 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | |
215 | size = 64; | |
216 | if (c->x86_model == 4 && | |
217 | (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ | |
218 | size = 256; | |
219 | } | |
220 | return size; | |
221 | } | |
222 | ||
223 | static struct cpu_dev amd_cpu_dev __initdata = { | |
224 | .c_vendor = "AMD", | |
225 | .c_ident = { "AuthenticAMD" }, | |
226 | .c_models = { | |
227 | { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = | |
228 | { | |
229 | [3] = "486 DX/2", | |
230 | [7] = "486 DX/2-WB", | |
231 | [8] = "486 DX/4", | |
232 | [9] = "486 DX/4-WB", | |
233 | [14] = "Am5x86-WT", | |
234 | [15] = "Am5x86-WB" | |
235 | } | |
236 | }, | |
237 | }, | |
238 | .c_init = init_amd, | |
239 | .c_identify = generic_identify, | |
240 | .c_size_cache = amd_size_cache, | |
241 | }; | |
242 | ||
243 | int __init amd_init_cpu(void) | |
244 | { | |
245 | cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; | |
246 | return 0; | |
247 | } | |
248 | ||
249 | //early_arch_initcall(amd_init_cpu); |