]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/i386/kernel/cpu/centaur.c
[PATCH] i386: mark cpu_dev structures as __cpuinitdata
[mirror_ubuntu-bionic-kernel.git] / arch / i386 / kernel / cpu / centaur.c
CommitLineData
1da177e4
LT
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/bitops.h>
4#include <asm/processor.h>
5#include <asm/msr.h>
6#include <asm/e820.h>
52f4a91a 7#include <asm/mtrr.h>
1da177e4
LT
8#include "cpu.h"
9
10#ifdef CONFIG_X86_OOSTORE
11
12static u32 __init power2(u32 x)
13{
14 u32 s=1;
15 while(s<=x)
16 s<<=1;
17 return s>>=1;
18}
19
20
21/*
22 * Set up an actual MCR
23 */
24
25static void __init centaur_mcr_insert(int reg, u32 base, u32 size, int key)
26{
27 u32 lo, hi;
28
29 hi = base & ~0xFFF;
30 lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
31 lo &= ~0xFFF; /* Remove the ctrl value bits */
32 lo |= key; /* Attribute we wish to set */
33 wrmsr(reg+MSR_IDT_MCR0, lo, hi);
34 mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
35}
36
37/*
38 * Figure what we can cover with MCR's
39 *
40 * Shortcut: We know you can't put 4Gig of RAM on a winchip
41 */
42
43static u32 __init ramtop(void) /* 16388 */
44{
45 int i;
46 u32 top = 0;
47 u32 clip = 0xFFFFFFFFUL;
48
49 for (i = 0; i < e820.nr_map; i++) {
50 unsigned long start, end;
51
52 if (e820.map[i].addr > 0xFFFFFFFFUL)
53 continue;
54 /*
55 * Don't MCR over reserved space. Ignore the ISA hole
56 * we frob around that catastrophy already
57 */
58
59 if (e820.map[i].type == E820_RESERVED)
60 {
61 if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
62 clip = e820.map[i].addr;
63 continue;
64 }
65 start = e820.map[i].addr;
66 end = e820.map[i].addr + e820.map[i].size;
67 if (start >= end)
68 continue;
69 if (end > top)
70 top = end;
71 }
72 /* Everything below 'top' should be RAM except for the ISA hole.
73 Because of the limited MCR's we want to map NV/ACPI into our
74 MCR range for gunk in RAM
75
76 Clip might cause us to MCR insufficient RAM but that is an
77 acceptable failure mode and should only bite obscure boxes with
78 a VESA hole at 15Mb
79
80 The second case Clip sometimes kicks in is when the EBDA is marked
81 as reserved. Again we fail safe with reasonable results
82 */
83
84 if(top>clip)
85 top=clip;
86
87 return top;
88}
89
90/*
91 * Compute a set of MCR's to give maximum coverage
92 */
93
94static int __init centaur_mcr_compute(int nr, int key)
95{
96 u32 mem = ramtop();
97 u32 root = power2(mem);
98 u32 base = root;
99 u32 top = root;
100 u32 floor = 0;
101 int ct = 0;
102
103 while(ct<nr)
104 {
105 u32 fspace = 0;
106
107 /*
108 * Find the largest block we will fill going upwards
109 */
110
111 u32 high = power2(mem-top);
112
113 /*
114 * Find the largest block we will fill going downwards
115 */
116
117 u32 low = base/2;
118
119 /*
120 * Don't fill below 1Mb going downwards as there
121 * is an ISA hole in the way.
122 */
123
124 if(base <= 1024*1024)
125 low = 0;
126
127 /*
128 * See how much space we could cover by filling below
129 * the ISA hole
130 */
131
132 if(floor == 0)
133 fspace = 512*1024;
134 else if(floor ==512*1024)
135 fspace = 128*1024;
136
137 /* And forget ROM space */
138
139 /*
140 * Now install the largest coverage we get
141 */
142
143 if(fspace > high && fspace > low)
144 {
145 centaur_mcr_insert(ct, floor, fspace, key);
146 floor += fspace;
147 }
148 else if(high > low)
149 {
150 centaur_mcr_insert(ct, top, high, key);
151 top += high;
152 }
153 else if(low > 0)
154 {
155 base -= low;
156 centaur_mcr_insert(ct, base, low, key);
157 }
158 else break;
159 ct++;
160 }
161 /*
162 * We loaded ct values. We now need to set the mask. The caller
163 * must do this bit.
164 */
165
166 return ct;
167}
168
169static void __init centaur_create_optimal_mcr(void)
170{
171 int i;
172 /*
173 * Allocate up to 6 mcrs to mark as much of ram as possible
174 * as write combining and weak write ordered.
175 *
176 * To experiment with: Linux never uses stack operations for
177 * mmio spaces so we could globally enable stack operation wc
178 *
179 * Load the registers with type 31 - full write combining, all
180 * writes weakly ordered.
181 */
182 int used = centaur_mcr_compute(6, 31);
183
184 /*
185 * Wipe unused MCRs
186 */
187
188 for(i=used;i<8;i++)
189 wrmsr(MSR_IDT_MCR0+i, 0, 0);
190}
191
192static void __init winchip2_create_optimal_mcr(void)
193{
194 u32 lo, hi;
195 int i;
196
197 /*
198 * Allocate up to 6 mcrs to mark as much of ram as possible
199 * as write combining, weak store ordered.
200 *
201 * Load the registers with type 25
202 * 8 - weak write ordering
203 * 16 - weak read ordering
204 * 1 - write combining
205 */
206
207 int used = centaur_mcr_compute(6, 25);
208
209 /*
210 * Mark the registers we are using.
211 */
212
213 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
214 for(i=0;i<used;i++)
215 lo|=1<<(9+i);
216 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
217
218 /*
219 * Wipe unused MCRs
220 */
221
222 for(i=used;i<8;i++)
223 wrmsr(MSR_IDT_MCR0+i, 0, 0);
224}
225
226/*
227 * Handle the MCR key on the Winchip 2.
228 */
229
230static void __init winchip2_unprotect_mcr(void)
231{
232 u32 lo, hi;
233 u32 key;
234
235 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
236 lo&=~0x1C0; /* blank bits 8-6 */
237 key = (lo>>17) & 7;
238 lo |= key<<6; /* replace with unlock key */
239 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
240}
241
242static void __init winchip2_protect_mcr(void)
243{
244 u32 lo, hi;
245
246 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
247 lo&=~0x1C0; /* blank bits 8-6 */
248 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
249}
250#endif /* CONFIG_X86_OOSTORE */
251
252#define ACE_PRESENT (1 << 6)
253#define ACE_ENABLED (1 << 7)
254#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
255
256#define RNG_PRESENT (1 << 2)
257#define RNG_ENABLED (1 << 3)
258#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
259
260static void __init init_c3(struct cpuinfo_x86 *c)
261{
262 u32 lo, hi;
263
264 /* Test for Centaur Extended Feature Flags presence */
265 if (cpuid_eax(0xC0000000) >= 0xC0000001) {
266 u32 tmp = cpuid_edx(0xC0000001);
267
268 /* enable ACE unit, if present and disabled */
269 if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
270 rdmsr (MSR_VIA_FCR, lo, hi);
271 lo |= ACE_FCR; /* enable ACE unit */
272 wrmsr (MSR_VIA_FCR, lo, hi);
273 printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
274 }
275
276 /* enable RNG unit, if present and disabled */
277 if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
278 rdmsr (MSR_VIA_RNG, lo, hi);
279 lo |= RNG_ENABLE; /* enable RNG unit */
280 wrmsr (MSR_VIA_RNG, lo, hi);
281 printk(KERN_INFO "CPU: Enabled h/w RNG\n");
282 }
283
284 /* store Centaur Extended Feature Flags as
285 * word 5 of the CPU capability bit array
286 */
287 c->x86_capability[5] = cpuid_edx(0xC0000001);
288 }
289
290 /* Cyrix III family needs CX8 & PGE explicity enabled. */
291 if (c->x86_model >=6 && c->x86_model <= 9) {
292 rdmsr (MSR_VIA_FCR, lo, hi);
293 lo |= (1<<1 | 1<<7);
294 wrmsr (MSR_VIA_FCR, lo, hi);
295 set_bit(X86_FEATURE_CX8, c->x86_capability);
296 }
297
298 /* Before Nehemiah, the C3's had 3dNOW! */
299 if (c->x86_model >=6 && c->x86_model <9)
300 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
301
302 get_model_name(c);
303 display_cacheinfo(c);
304}
305
306static void __init init_centaur(struct cpuinfo_x86 *c)
307{
308 enum {
309 ECX8=1<<1,
310 EIERRINT=1<<2,
311 DPM=1<<3,
312 DMCE=1<<4,
313 DSTPCLK=1<<5,
314 ELINEAR=1<<6,
315 DSMC=1<<7,
316 DTLOCK=1<<8,
317 EDCTLB=1<<8,
318 EMMX=1<<9,
319 DPDC=1<<11,
320 EBRPRED=1<<12,
321 DIC=1<<13,
322 DDC=1<<14,
323 DNA=1<<15,
324 ERETSTK=1<<16,
325 E2MMX=1<<19,
326 EAMD3D=1<<20,
327 };
328
329 char *name;
330 u32 fcr_set=0;
331 u32 fcr_clr=0;
332 u32 lo,hi,newlo;
333 u32 aa,bb,cc,dd;
334
335 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
336 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
337 clear_bit(0*32+31, c->x86_capability);
338
339 switch (c->x86) {
340
341 case 5:
342 switch(c->x86_model) {
343 case 4:
344 name="C6";
345 fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
346 fcr_clr=DPDC;
347 printk(KERN_NOTICE "Disabling bugged TSC.\n");
348 clear_bit(X86_FEATURE_TSC, c->x86_capability);
349#ifdef CONFIG_X86_OOSTORE
350 centaur_create_optimal_mcr();
351 /* Enable
352 write combining on non-stack, non-string
353 write combining on string, all types
354 weak write ordering
355
356 The C6 original lacks weak read order
357
358 Note 0x120 is write only on Winchip 1 */
359
360 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
361#endif
362 break;
363 case 8:
364 switch(c->x86_mask) {
365 default:
366 name="2";
367 break;
368 case 7 ... 9:
369 name="2A";
370 break;
371 case 10 ... 15:
372 name="2B";
373 break;
374 }
375 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
376 fcr_clr=DPDC;
377#ifdef CONFIG_X86_OOSTORE
378 winchip2_unprotect_mcr();
379 winchip2_create_optimal_mcr();
380 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
381 /* Enable
382 write combining on non-stack, non-string
383 write combining on string, all types
384 weak write ordering
385 */
386 lo|=31;
387 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
388 winchip2_protect_mcr();
389#endif
390 break;
391 case 9:
392 name="3";
393 fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
394 fcr_clr=DPDC;
395#ifdef CONFIG_X86_OOSTORE
396 winchip2_unprotect_mcr();
397 winchip2_create_optimal_mcr();
398 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
399 /* Enable
400 write combining on non-stack, non-string
401 write combining on string, all types
402 weak write ordering
403 */
404 lo|=31;
405 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
406 winchip2_protect_mcr();
407#endif
408 break;
1da177e4
LT
409 default:
410 name="??";
411 }
412
413 rdmsr(MSR_IDT_FCR1, lo, hi);
414 newlo=(lo|fcr_set) & (~fcr_clr);
415
416 if (newlo!=lo) {
417 printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
418 wrmsr(MSR_IDT_FCR1, newlo, hi );
419 } else {
420 printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
421 }
422 /* Emulate MTRRs using Centaur's MCR. */
423 set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
424 /* Report CX8 */
425 set_bit(X86_FEATURE_CX8, c->x86_capability);
426 /* Set 3DNow! on Winchip 2 and above. */
427 if (c->x86_model >=8)
428 set_bit(X86_FEATURE_3DNOW, c->x86_capability);
429 /* See if we can find out some more. */
430 if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
431 /* Yes, we can. */
432 cpuid(0x80000005,&aa,&bb,&cc,&dd);
433 /* Add L1 data and code cache sizes. */
434 c->x86_cache_size = (cc>>24)+(dd>>24);
435 }
436 sprintf( c->x86_model_id, "WinChip %s", name );
437 break;
438
439 case 6:
440 init_c3(c);
441 break;
442 }
443}
444
445static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
446{
447 /* VIA C3 CPUs (670-68F) need further shifting. */
448 if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
449 size >>= 8;
450
451 /* VIA also screwed up Nehemiah stepping 1, and made
452 it return '65KB' instead of '64KB'
453 - Note, it seems this may only be in engineering samples. */
454 if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
455 size -=1;
456
457 return size;
458}
459
95414930 460static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
1da177e4
LT
461 .c_vendor = "Centaur",
462 .c_ident = { "CentaurHauls" },
463 .c_init = init_centaur,
464 .c_size_cache = centaur_size_cache,
465};
466
467int __init centaur_init_cpu(void)
468{
469 cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
470 return 0;
471}
472
473//early_arch_initcall(centaur_init_cpu);
fe38d855
CE
474
475static int __init centaur_exit_cpu(void)
476{
477 cpu_devs[X86_VENDOR_CENTAUR] = NULL;
478 return 0;
479}
480
481late_initcall(centaur_exit_cpu);