]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
i386: move kernel/cpu
authorThomas Gleixner <tglx@linutronix.de>
Thu, 11 Oct 2007 09:16:58 +0000 (11:16 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Oct 2007 09:16:58 +0000 (11:16 +0200)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
32 files changed:
arch/i386/kernel/Makefile_32
arch/i386/kernel/cpu/Makefile [deleted file]
arch/i386/kernel/cpu/addon_cpuid_features.c [deleted file]
arch/i386/kernel/cpu/amd.c [deleted file]
arch/i386/kernel/cpu/bugs.c [deleted file]
arch/i386/kernel/cpu/centaur.c [deleted file]
arch/i386/kernel/cpu/common.c [deleted file]
arch/i386/kernel/cpu/cpu.h [deleted file]
arch/i386/kernel/cpu/cyrix.c [deleted file]
arch/i386/kernel/cpu/intel.c [deleted file]
arch/i386/kernel/cpu/intel_cacheinfo.c [deleted file]
arch/i386/kernel/cpu/nexgen.c [deleted file]
arch/i386/kernel/cpu/perfctr-watchdog.c [deleted file]
arch/i386/kernel/cpu/proc.c [deleted file]
arch/i386/kernel/cpu/transmeta.c [deleted file]
arch/i386/kernel/cpu/umc.c [deleted file]
arch/x86/kernel/cpu/Makefile [new file with mode: 0644]
arch/x86/kernel/cpu/addon_cpuid_features.c [new file with mode: 0644]
arch/x86/kernel/cpu/amd.c [new file with mode: 0644]
arch/x86/kernel/cpu/bugs.c [new file with mode: 0644]
arch/x86/kernel/cpu/centaur.c [new file with mode: 0644]
arch/x86/kernel/cpu/common.c [new file with mode: 0644]
arch/x86/kernel/cpu/cpu.h [new file with mode: 0644]
arch/x86/kernel/cpu/cyrix.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel_cacheinfo.c [new file with mode: 0644]
arch/x86/kernel/cpu/nexgen.c [new file with mode: 0644]
arch/x86/kernel/cpu/perfctr-watchdog.c [new file with mode: 0644]
arch/x86/kernel/cpu/proc.c [new file with mode: 0644]
arch/x86/kernel/cpu/transmeta.c [new file with mode: 0644]
arch/x86/kernel/cpu/umc.c [new file with mode: 0644]
arch/x86_64/kernel/Makefile_64

index af8304b921de95176419378263efce3092e1ae4c..5096f486d389592378e1b6c2d2742c0e19c82393 100644 (file)
@@ -10,7 +10,7 @@ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
                quirks.o i8237.o topology.o alternative.o i8253_32.o tsc_32.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
-obj-y                          += cpu/
+obj-y                          += ../../x86/kernel/cpu/
 obj-y                          += ../../x86/kernel/acpi/
 obj-$(CONFIG_X86_BIOS_REBOOT)  += reboot_32.o
 obj-$(CONFIG_MCA)              += mca_32.o
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
deleted file mode 100644 (file)
index 6687f6d..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Makefile for x86-compatible CPU details and quirks
-#
-
-obj-y  :=      common.o proc.o bugs.o
-
-obj-y  +=      amd.o
-obj-y  +=      cyrix.o
-obj-y  +=      centaur.o
-obj-y  +=      transmeta.o
-obj-y  +=      intel.o intel_cacheinfo.o addon_cpuid_features.o
-obj-y  +=      nexgen.o
-obj-y  +=      umc.o
-
-obj-$(CONFIG_X86_MCE)  +=      ../../../x86/kernel/cpu/mcheck/
-
-obj-$(CONFIG_MTRR)     +=      ../../../x86/kernel/cpu/mtrr/
-obj-$(CONFIG_CPU_FREQ) +=      ../../../x86/kernel/cpu/cpufreq/
-
-obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/i386/kernel/cpu/addon_cpuid_features.c b/arch/i386/kernel/cpu/addon_cpuid_features.c
deleted file mode 100644 (file)
index 3e91d3e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-
-/*
- *     Routines to indentify additional cpu features that are scattered in
- *     cpuid space.
- */
-
-#include <linux/cpu.h>
-
-#include <asm/processor.h>
-
-struct cpuid_bit {
-       u16 feature;
-       u8 reg;
-       u8 bit;
-       u32 level;
-};
-
-enum cpuid_regs {
-       CR_EAX = 0,
-       CR_ECX,
-       CR_EDX,
-       CR_EBX
-};
-
-void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
-{
-       u32 max_level;
-       u32 regs[4];
-       const struct cpuid_bit *cb;
-
-       static const struct cpuid_bit cpuid_bits[] = {
-               { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
-               { 0, 0, 0, 0 }
-       };
-
-       for (cb = cpuid_bits; cb->feature; cb++) {
-
-               /* Verify that the level is valid */
-               max_level = cpuid_eax(cb->level & 0xffff0000);
-               if (max_level < cb->level ||
-                   max_level > (cb->level | 0xffff))
-                       continue;
-
-               cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
-                       &regs[CR_ECX], &regs[CR_EDX]);
-
-               if (regs[cb->reg] & (1 << cb->bit))
-                       set_bit(cb->feature, c->x86_capability);
-       }
-}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
deleted file mode 100644 (file)
index dcf6bbb..0000000
+++ /dev/null
@@ -1,337 +0,0 @@
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/mm.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/apic.h>
-
-#include "cpu.h"
-
-/*
- *     B step AMD K6 before B 9730xxxx have hardware bugs that can cause
- *     misexecution of code under Linux. Owners of such processors should
- *     contact AMD for precise details and a CPU swap.
- *
- *     See     http://www.multimania.com/poulot/k6bug.html
- *             http://www.amd.com/K6/k6docs/revgd.html
- *
- *     The following test is erm.. interesting. AMD neglected to up
- *     the chip setting when fixing the bug but they also tweaked some
- *     performance at the same time..
- */
-extern void vide(void);
-__asm__(".align 4\nvide: ret");
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#define ENABLE_C1E_MASK         0x18000000
-#define CPUID_PROCESSOR_SIGNATURE       1
-#define CPUID_XFAM              0x0ff00000
-#define CPUID_XFAM_K8           0x00000000
-#define CPUID_XFAM_10H          0x00100000
-#define CPUID_XFAM_11H          0x00200000
-#define CPUID_XMOD              0x000f0000
-#define CPUID_XMOD_REV_F        0x00040000
-
-/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
-static __cpuinit int amd_apic_timer_broken(void)
-{
-       u32 lo, hi;
-       u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
-       switch (eax & CPUID_XFAM) {
-       case CPUID_XFAM_K8:
-               if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
-                       break;
-       case CPUID_XFAM_10H:
-       case CPUID_XFAM_11H:
-               rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
-               if (lo & ENABLE_C1E_MASK)
-                       return 1;
-                break;
-        default:
-                /* err on the side of caution */
-               return 1;
-        }
-       return 0;
-}
-#endif
-
-int force_mwait __cpuinitdata;
-
-static void __cpuinit init_amd(struct cpuinfo_x86 *c)
-{
-       u32 l, h;
-       int mbytes = num_physpages >> (20-PAGE_SHIFT);
-       int r;
-
-#ifdef CONFIG_SMP
-       unsigned long long value;
-
-       /* Disable TLB flush filter by setting HWCR.FFDIS on K8
-        * bit 6 of msr C001_0015
-        *
-        * Errata 63 for SH-B3 steppings
-        * Errata 122 for all steppings (F+ have it disabled by default)
-        */
-       if (c->x86 == 15) {
-               rdmsrl(MSR_K7_HWCR, value);
-               value |= 1 << 6;
-               wrmsrl(MSR_K7_HWCR, value);
-       }
-#endif
-
-       /*
-        *      FIXME: We should handle the K5 here. Set up the write
-        *      range and also turn on MSR 83 bits 4 and 31 (write alloc,
-        *      no bus pipeline)
-        */
-
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-       clear_bit(0*32+31, c->x86_capability);
-       
-       r = get_model_name(c);
-
-       switch(c->x86)
-       {
-               case 4:
-               /*
-                * General Systems BIOSen alias the cpu frequency registers
-                * of the Elan at 0x000df000. Unfortuantly, one of the Linux
-                * drivers subsequently pokes it, and changes the CPU speed.
-                * Workaround : Remove the unneeded alias.
-                */
-#define CBAR           (0xfffc) /* Configuration Base Address  (32-bit) */
-#define CBAR_ENB       (0x80000000)
-#define CBAR_KEY       (0X000000CB)
-                       if (c->x86_model==9 || c->x86_model == 10) {
-                               if (inl (CBAR) & CBAR_ENB)
-                                       outl (0 | CBAR_KEY, CBAR);
-                       }
-                       break;
-               case 5:
-                       if( c->x86_model < 6 )
-                       {
-                               /* Based on AMD doc 20734R - June 2000 */
-                               if ( c->x86_model == 0 ) {
-                                       clear_bit(X86_FEATURE_APIC, c->x86_capability);
-                                       set_bit(X86_FEATURE_PGE, c->x86_capability);
-                               }
-                               break;
-                       }
-                       
-                       if ( c->x86_model == 6 && c->x86_mask == 1 ) {
-                               const int K6_BUG_LOOP = 1000000;
-                               int n;
-                               void (*f_vide)(void);
-                               unsigned long d, d2;
-                               
-                               printk(KERN_INFO "AMD K6 stepping B detected - ");
-                               
-                               /*
-                                * It looks like AMD fixed the 2.6.2 bug and improved indirect 
-                                * calls at the same time.
-                                */
-
-                               n = K6_BUG_LOOP;
-                               f_vide = vide;
-                               rdtscl(d);
-                               while (n--) 
-                                       f_vide();
-                               rdtscl(d2);
-                               d = d2-d;
-
-                               if (d > 20*K6_BUG_LOOP) 
-                                       printk("system stability may be impaired when more than 32 MB are used.\n");
-                               else 
-                                       printk("probably OK (after B9730xxxx).\n");
-                               printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
-                       }
-
-                       /* K6 with old style WHCR */
-                       if (c->x86_model < 8 ||
-                          (c->x86_model== 8 && c->x86_mask < 8)) {
-                               /* We can only write allocate on the low 508Mb */
-                               if(mbytes>508)
-                                       mbytes=508;
-
-                               rdmsr(MSR_K6_WHCR, l, h);
-                               if ((l&0x0000FFFF)==0) {
-                                       unsigned long flags;
-                                       l=(1<<0)|((mbytes/4)<<1);
-                                       local_irq_save(flags);
-                                       wbinvd();
-                                       wrmsr(MSR_K6_WHCR, l, h);
-                                       local_irq_restore(flags);
-                                       printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
-                                               mbytes);
-                               }
-                               break;
-                       }
-
-                       if ((c->x86_model == 8 && c->x86_mask >7) ||
-                            c->x86_model == 9 || c->x86_model == 13) {
-                               /* The more serious chips .. */
-
-                               if(mbytes>4092)
-                                       mbytes=4092;
-
-                               rdmsr(MSR_K6_WHCR, l, h);
-                               if ((l&0xFFFF0000)==0) {
-                                       unsigned long flags;
-                                       l=((mbytes>>2)<<22)|(1<<16);
-                                       local_irq_save(flags);
-                                       wbinvd();
-                                       wrmsr(MSR_K6_WHCR, l, h);
-                                       local_irq_restore(flags);
-                                       printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
-                                               mbytes);
-                               }
-
-                               /*  Set MTRR capability flag if appropriate */
-                               if (c->x86_model == 13 || c->x86_model == 9 ||
-                                  (c->x86_model == 8 && c->x86_mask >= 8))
-                                       set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
-                               break;
-                       }
-
-                       if (c->x86_model == 10) {
-                               /* AMD Geode LX is model 10 */
-                               /* placeholder for any needed mods */
-                               break;
-                       }
-                       break;
-               case 6: /* An Athlon/Duron */
-                       /* Bit 15 of Athlon specific MSR 15, needs to be 0
-                        * to enable SSE on Palomino/Morgan/Barton CPU's.
-                        * If the BIOS didn't enable it already, enable it here.
-                        */
-                       if (c->x86_model >= 6 && c->x86_model <= 10) {
-                               if (!cpu_has(c, X86_FEATURE_XMM)) {
-                                       printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
-                                       rdmsr(MSR_K7_HWCR, l, h);
-                                       l &= ~0x00008000;
-                                       wrmsr(MSR_K7_HWCR, l, h);
-                                       set_bit(X86_FEATURE_XMM, c->x86_capability);
-                               }
-                       }
-
-                       /* It's been determined by AMD that Athlons since model 8 stepping 1
-                        * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
-                        * As per AMD technical note 27212 0.2
-                        */
-                       if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
-                               rdmsr(MSR_K7_CLK_CTL, l, h);
-                               if ((l & 0xfff00000) != 0x20000000) {
-                                       printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
-                                               ((l & 0x000fffff)|0x20000000));
-                                       wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
-                               }
-                       }
-                       break;
-       }
-
-       switch (c->x86) {
-       case 15:
-       /* Use K8 tuning for Fam10h and Fam11h */
-       case 0x10:
-       case 0x11:
-               set_bit(X86_FEATURE_K8, c->x86_capability);
-               break;
-       case 6:
-               set_bit(X86_FEATURE_K7, c->x86_capability); 
-               break;
-       }
-       if (c->x86 >= 6)
-               set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
-
-       display_cacheinfo(c);
-
-       if (cpuid_eax(0x80000000) >= 0x80000008) {
-               c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-       }
-
-       if (cpuid_eax(0x80000000) >= 0x80000007) {
-               c->x86_power = cpuid_edx(0x80000007);
-               if (c->x86_power & (1<<8))
-                       set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-       }
-
-#ifdef CONFIG_X86_HT
-       /*
-        * On a AMD multi core setup the lower bits of the APIC id
-        * distingush the cores.
-        */
-       if (c->x86_max_cores > 1) {
-               int cpu = smp_processor_id();
-               unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
-
-               if (bits == 0) {
-                       while ((1 << bits) < c->x86_max_cores)
-                               bits++;
-               }
-               c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
-               c->phys_proc_id >>= bits;
-               printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
-                      cpu, c->x86_max_cores, c->cpu_core_id);
-       }
-#endif
-
-       if (cpuid_eax(0x80000000) >= 0x80000006) {
-               if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
-                       num_cache_leaves = 4;
-               else
-                       num_cache_leaves = 3;
-       }
-
-#ifdef CONFIG_X86_LOCAL_APIC
-       if (amd_apic_timer_broken())
-               local_apic_timer_disabled = 1;
-#endif
-
-       if (c->x86 == 0x10 && !force_mwait)
-               clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
-
-       /* K6s reports MCEs but don't actually have all the MSRs */
-       if (c->x86 < 6)
-               clear_bit(X86_FEATURE_MCE, c->x86_capability);
-}
-
-static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-       /* AMD errata T13 (order #21922) */
-       if ((c->x86 == 6)) {
-               if (c->x86_model == 3 && c->x86_mask == 0)      /* Duron Rev A0 */
-                       size = 64;
-               if (c->x86_model == 4 &&
-                   (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
-                       size = 256;
-       }
-       return size;
-}
-
-static struct cpu_dev amd_cpu_dev __cpuinitdata = {
-       .c_vendor       = "AMD",
-       .c_ident        = { "AuthenticAMD" },
-       .c_models = {
-               { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
-                 {
-                         [3] = "486 DX/2",
-                         [7] = "486 DX/2-WB",
-                         [8] = "486 DX/4", 
-                         [9] = "486 DX/4-WB", 
-                         [14] = "Am5x86-WT",
-                         [15] = "Am5x86-WB" 
-                 }
-               },
-       },
-       .c_init         = init_amd,
-       .c_size_cache   = amd_size_cache,
-};
-
-int __init amd_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
-       return 0;
-}
diff --git a/arch/i386/kernel/cpu/bugs.c b/arch/i386/kernel/cpu/bugs.c
deleted file mode 100644 (file)
index 59266f0..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- *  arch/i386/cpu/bugs.c
- *
- *  Copyright (C) 1994  Linus Torvalds
- *
- *  Cyrix stuff, June 1998 by:
- *     - Rafael R. Reilova (moved everything from head.S),
- *        <rreilova@ececs.uc.edu>
- *     - Channing Corn (tests & fixes),
- *     - Andrew D. Balsa (code cleanup).
- */
-#include <linux/init.h>
-#include <linux/utsname.h>
-#include <asm/bugs.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-#include <asm/paravirt.h>
-#include <asm/alternative.h>
-
-static int __init no_halt(char *s)
-{
-       boot_cpu_data.hlt_works_ok = 0;
-       return 1;
-}
-
-__setup("no-hlt", no_halt);
-
-static int __init mca_pentium(char *s)
-{
-       mca_pentium_flag = 1;
-       return 1;
-}
-
-__setup("mca-pentium", mca_pentium);
-
-static int __init no_387(char *s)
-{
-       boot_cpu_data.hard_math = 0;
-       write_cr0(0xE | read_cr0());
-       return 1;
-}
-
-__setup("no387", no_387);
-
-static double __initdata x = 4195835.0;
-static double __initdata y = 3145727.0;
-
-/*
- * This used to check for exceptions..
- * However, it turns out that to support that,
- * the XMM trap handlers basically had to
- * be buggy. So let's have a correct XMM trap
- * handler, and forget about printing out
- * some status at boot.
- *
- * We should really only care about bugs here
- * anyway. Not features.
- */
-static void __init check_fpu(void)
-{
-       if (!boot_cpu_data.hard_math) {
-#ifndef CONFIG_MATH_EMULATION
-               printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
-               printk(KERN_EMERG "Giving up.\n");
-               for (;;) ;
-#endif
-               return;
-       }
-
-/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */
-       /* Test for the divl bug.. */
-       __asm__("fninit\n\t"
-               "fldl %1\n\t"
-               "fdivl %2\n\t"
-               "fmull %2\n\t"
-               "fldl %1\n\t"
-               "fsubp %%st,%%st(1)\n\t"
-               "fistpl %0\n\t"
-               "fwait\n\t"
-               "fninit"
-               : "=m" (*&boot_cpu_data.fdiv_bug)
-               : "m" (*&x), "m" (*&y));
-       if (boot_cpu_data.fdiv_bug)
-               printk("Hmm, FPU with FDIV bug.\n");
-}
-
-static void __init check_hlt(void)
-{
-       if (paravirt_enabled())
-               return;
-
-       printk(KERN_INFO "Checking 'hlt' instruction... ");
-       if (!boot_cpu_data.hlt_works_ok) {
-               printk("disabled\n");
-               return;
-       }
-       halt();
-       halt();
-       halt();
-       halt();
-       printk("OK.\n");
-}
-
-/*
- *     Most 386 processors have a bug where a POPAD can lock the
- *     machine even from user space.
- */
-
-static void __init check_popad(void)
-{
-#ifndef CONFIG_X86_POPAD_OK
-       int res, inp = (int) &res;
-
-       printk(KERN_INFO "Checking for popad bug... ");
-       __asm__ __volatile__(
-         "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
-         : "=&a" (res)
-         : "d" (inp)
-         : "ecx", "edi" );
-       /* If this fails, it means that any user program may lock the CPU hard. Too bad. */
-       if (res != 12345678) printk( "Buggy.\n" );
-                       else printk( "OK.\n" );
-#endif
-}
-
-/*
- * Check whether we are able to run this kernel safely on SMP.
- *
- * - In order to run on a i386, we need to be compiled for i386
- *   (for due to lack of "invlpg" and working WP on a i386)
- * - In order to run on anything without a TSC, we need to be
- *   compiled for a i486.
- * - In order to support the local APIC on a buggy Pentium machine,
- *   we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
- *   which happens implicitly if compiled for a Pentium or lower
- *   (unless an advanced selection of CPU features is used) as an
- *   otherwise config implies a properly working local APIC without
- *   the need to do extra reads from the APIC.
-*/
-
-static void __init check_config(void)
-{
-/*
- * We'd better not be a i386 if we're configured to use some
- * i486+ only features! (WP works in supervisor mode and the
- * new "invlpg" and "bswap" instructions)
- */
-#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
-       if (boot_cpu_data.x86 == 3)
-               panic("Kernel requires i486+ for 'invlpg' and other features");
-#endif
-
-/*
- * If we configured ourselves for a TSC, we'd better have one!
- */
-#ifdef CONFIG_X86_TSC
-       if (!cpu_has_tsc && !tsc_disable)
-               panic("Kernel compiled for Pentium+, requires TSC feature!");
-#endif
-
-/*
- * If we were told we had a good local APIC, check for buggy Pentia,
- * i.e. all B steppings and the C2 stepping of P54C when using their
- * integrated APIC (see 11AP erratum in "Pentium Processor
- * Specification Update").
- */
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
-           && cpu_has_apic
-           && boot_cpu_data.x86 == 5
-           && boot_cpu_data.x86_model == 2
-           && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
-               panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
-#endif
-}
-
-
-void __init check_bugs(void)
-{
-       identify_boot_cpu();
-#ifndef CONFIG_SMP
-       printk("CPU: ");
-       print_cpu_info(&boot_cpu_data);
-#endif
-       check_config();
-       check_fpu();
-       check_hlt();
-       check_popad();
-       init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
-       alternative_instructions();
-}
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
deleted file mode 100644 (file)
index 473eac8..0000000
+++ /dev/null
@@ -1,471 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-#include <asm/e820.h>
-#include <asm/mtrr.h>
-#include "cpu.h"
-
-#ifdef CONFIG_X86_OOSTORE
-
-static u32 __cpuinit power2(u32 x)
-{
-       u32 s=1;
-       while(s<=x)
-               s<<=1;
-       return s>>=1;
-}
-
-
-/*
- *     Set up an actual MCR
- */
-static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
-{
-       u32 lo, hi;
-       
-       hi = base & ~0xFFF;
-       lo = ~(size-1);         /* Size is a power of 2 so this makes a mask */
-       lo &= ~0xFFF;           /* Remove the ctrl value bits */
-       lo |= key;              /* Attribute we wish to set */
-       wrmsr(reg+MSR_IDT_MCR0, lo, hi);
-       mtrr_centaur_report_mcr(reg, lo, hi);   /* Tell the mtrr driver */
-}
-
-/*
- *     Figure what we can cover with MCR's
- *
- *     Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-
-static u32 __cpuinit ramtop(void)              /* 16388 */
-{
-       int i;
-       u32 top = 0;
-       u32 clip = 0xFFFFFFFFUL;
-       
-       for (i = 0; i < e820.nr_map; i++) {
-               unsigned long start, end;
-
-               if (e820.map[i].addr > 0xFFFFFFFFUL)
-                       continue;
-               /*
-                *      Don't MCR over reserved space. Ignore the ISA hole
-                *      we frob around that catastrophy already
-                */
-                                       
-               if (e820.map[i].type == E820_RESERVED)
-               {
-                       if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
-                               clip = e820.map[i].addr;
-                       continue;
-               }
-               start = e820.map[i].addr;
-               end = e820.map[i].addr + e820.map[i].size;
-               if (start >= end)
-                       continue;
-               if (end > top)
-                       top = end;
-       }
-       /* Everything below 'top' should be RAM except for the ISA hole.
-          Because of the limited MCR's we want to map NV/ACPI into our
-          MCR range for gunk in RAM 
-          
-          Clip might cause us to MCR insufficient RAM but that is an
-          acceptable failure mode and should only bite obscure boxes with
-          a VESA hole at 15Mb
-          
-          The second case Clip sometimes kicks in is when the EBDA is marked
-          as reserved. Again we fail safe with reasonable results
-       */
-       
-       if(top>clip)
-               top=clip;
-               
-       return top;
-}
-
-/*
- *     Compute a set of MCR's to give maximum coverage
- */
-
-static int __cpuinit centaur_mcr_compute(int nr, int key)
-{
-       u32 mem = ramtop();
-       u32 root = power2(mem);
-       u32 base = root;
-       u32 top = root;
-       u32 floor = 0;
-       int ct = 0;
-       
-       while(ct<nr)
-       {
-               u32 fspace = 0;
-
-               /*
-                *      Find the largest block we will fill going upwards
-                */
-
-               u32 high = power2(mem-top);     
-
-               /*
-                *      Find the largest block we will fill going downwards
-                */
-
-               u32 low = base/2;
-
-               /*
-                *      Don't fill below 1Mb going downwards as there
-                *      is an ISA hole in the way.
-                */             
-                
-               if(base <= 1024*1024)
-                       low = 0;
-                       
-               /*
-                *      See how much space we could cover by filling below
-                *      the ISA hole
-                */
-                
-               if(floor == 0)
-                       fspace = 512*1024;
-               else if(floor ==512*1024)
-                       fspace = 128*1024;
-
-               /* And forget ROM space */
-               
-               /*
-                *      Now install the largest coverage we get
-                */
-                
-               if(fspace > high && fspace > low)
-               {
-                       centaur_mcr_insert(ct, floor, fspace, key);
-                       floor += fspace;
-               }
-               else if(high > low)
-               {
-                       centaur_mcr_insert(ct, top, high, key);
-                       top += high;
-               }
-               else if(low > 0)
-               {
-                       base -= low;
-                       centaur_mcr_insert(ct, base, low, key);
-               }
-               else break;
-               ct++;
-       }
-       /*
-        *      We loaded ct values. We now need to set the mask. The caller
-        *      must do this bit.
-        */
-        
-       return ct;
-}
-
-static void __cpuinit centaur_create_optimal_mcr(void)
-{
-       int i;
-       /*
-        *      Allocate up to 6 mcrs to mark as much of ram as possible
-        *      as write combining and weak write ordered.
-        *
-        *      To experiment with: Linux never uses stack operations for 
-        *      mmio spaces so we could globally enable stack operation wc
-        *
-        *      Load the registers with type 31 - full write combining, all
-        *      writes weakly ordered.
-        */
-       int used = centaur_mcr_compute(6, 31);
-
-       /*
-        *      Wipe unused MCRs
-        */
-        
-       for(i=used;i<8;i++)
-               wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-static void __cpuinit winchip2_create_optimal_mcr(void)
-{
-       u32 lo, hi;
-       int i;
-
-       /*
-        *      Allocate up to 6 mcrs to mark as much of ram as possible
-        *      as write combining, weak store ordered.
-        *
-        *      Load the registers with type 25
-        *              8       -       weak write ordering
-        *              16      -       weak read ordering
-        *              1       -       write combining
-        */
-
-       int used = centaur_mcr_compute(6, 25);
-       
-       /*
-        *      Mark the registers we are using.
-        */
-        
-       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-       for(i=0;i<used;i++)
-               lo|=1<<(9+i);
-       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-       
-       /*
-        *      Wipe unused MCRs
-        */
-        
-       for(i=used;i<8;i++)
-               wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-/*
- *     Handle the MCR key on the Winchip 2.
- */
-
-static void __cpuinit winchip2_unprotect_mcr(void)
-{
-       u32 lo, hi;
-       u32 key;
-       
-       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-       lo&=~0x1C0;     /* blank bits 8-6 */
-       key = (lo>>17) & 7;
-       lo |= key<<6;   /* replace with unlock key */
-       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-static void __cpuinit winchip2_protect_mcr(void)
-{
-       u32 lo, hi;
-       
-       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-       lo&=~0x1C0;     /* blank bits 8-6 */
-       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-#endif /* CONFIG_X86_OOSTORE */
-
-#define ACE_PRESENT    (1 << 6)
-#define ACE_ENABLED    (1 << 7)
-#define ACE_FCR                (1 << 28)       /* MSR_VIA_FCR */
-
-#define RNG_PRESENT    (1 << 2)
-#define RNG_ENABLED    (1 << 3)
-#define RNG_ENABLE     (1 << 6)        /* MSR_VIA_RNG */
-
-static void __cpuinit init_c3(struct cpuinfo_x86 *c)
-{
-       u32  lo, hi;
-
-       /* Test for Centaur Extended Feature Flags presence */
-       if (cpuid_eax(0xC0000000) >= 0xC0000001) {
-               u32 tmp = cpuid_edx(0xC0000001);
-
-               /* enable ACE unit, if present and disabled */
-               if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
-                       rdmsr (MSR_VIA_FCR, lo, hi);
-                       lo |= ACE_FCR;          /* enable ACE unit */
-                       wrmsr (MSR_VIA_FCR, lo, hi);
-                       printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
-               }
-
-               /* enable RNG unit, if present and disabled */
-               if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
-                       rdmsr (MSR_VIA_RNG, lo, hi);
-                       lo |= RNG_ENABLE;       /* enable RNG unit */
-                       wrmsr (MSR_VIA_RNG, lo, hi);
-                       printk(KERN_INFO "CPU: Enabled h/w RNG\n");
-               }
-
-               /* store Centaur Extended Feature Flags as
-                * word 5 of the CPU capability bit array
-                */
-               c->x86_capability[5] = cpuid_edx(0xC0000001);
-       }
-
-       /* Cyrix III family needs CX8 & PGE explicity enabled. */
-       if (c->x86_model >=6 && c->x86_model <= 9) {
-               rdmsr (MSR_VIA_FCR, lo, hi);
-               lo |= (1<<1 | 1<<7);
-               wrmsr (MSR_VIA_FCR, lo, hi);
-               set_bit(X86_FEATURE_CX8, c->x86_capability);
-       }
-
-       /* Before Nehemiah, the C3's had 3dNOW! */
-       if (c->x86_model >=6 && c->x86_model <9)
-               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-
-       get_model_name(c);
-       display_cacheinfo(c);
-}
-
-static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
-{
-       enum {
-               ECX8=1<<1,
-               EIERRINT=1<<2,
-               DPM=1<<3,
-               DMCE=1<<4,
-               DSTPCLK=1<<5,
-               ELINEAR=1<<6,
-               DSMC=1<<7,
-               DTLOCK=1<<8,
-               EDCTLB=1<<8,
-               EMMX=1<<9,
-               DPDC=1<<11,
-               EBRPRED=1<<12,
-               DIC=1<<13,
-               DDC=1<<14,
-               DNA=1<<15,
-               ERETSTK=1<<16,
-               E2MMX=1<<19,
-               EAMD3D=1<<20,
-       };
-
-       char *name;
-       u32  fcr_set=0;
-       u32  fcr_clr=0;
-       u32  lo,hi,newlo;
-       u32  aa,bb,cc,dd;
-
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-       clear_bit(0*32+31, c->x86_capability);
-
-       switch (c->x86) {
-
-               case 5:
-                       switch(c->x86_model) {
-                       case 4:
-                               name="C6";
-                               fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
-                               fcr_clr=DPDC;
-                               printk(KERN_NOTICE "Disabling bugged TSC.\n");
-                               clear_bit(X86_FEATURE_TSC, c->x86_capability);
-#ifdef CONFIG_X86_OOSTORE
-                               centaur_create_optimal_mcr();
-                               /* Enable
-                                       write combining on non-stack, non-string
-                                       write combining on string, all types
-                                       weak write ordering 
-                                       
-                                  The C6 original lacks weak read order 
-                                  
-                                  Note 0x120 is write only on Winchip 1 */
-                                  
-                               wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
-#endif                         
-                               break;
-                       case 8:
-                               switch(c->x86_mask) {
-                               default:
-                                       name="2";
-                                       break;
-                               case 7 ... 9:
-                                       name="2A";
-                                       break;
-                               case 10 ... 15:
-                                       name="2B";
-                                       break;
-                               }
-                               fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
-                               fcr_clr=DPDC;
-#ifdef CONFIG_X86_OOSTORE
-                               winchip2_unprotect_mcr();
-                               winchip2_create_optimal_mcr();
-                               rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-                               /* Enable
-                                       write combining on non-stack, non-string
-                                       write combining on string, all types
-                                       weak write ordering 
-                               */
-                               lo|=31;                         
-                               wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-                               winchip2_protect_mcr();
-#endif
-                               break;
-                       case 9:
-                               name="3";
-                               fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
-                               fcr_clr=DPDC;
-#ifdef CONFIG_X86_OOSTORE
-                               winchip2_unprotect_mcr();
-                               winchip2_create_optimal_mcr();
-                               rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
-                               /* Enable
-                                       write combining on non-stack, non-string
-                                       write combining on string, all types
-                                       weak write ordering 
-                               */
-                               lo|=31;                         
-                               wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-                               winchip2_protect_mcr();
-#endif
-                               break;
-                       default:
-                               name="??";
-                       }
-
-                       rdmsr(MSR_IDT_FCR1, lo, hi);
-                       newlo=(lo|fcr_set) & (~fcr_clr);
-
-                       if (newlo!=lo) {
-                               printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
-                               wrmsr(MSR_IDT_FCR1, newlo, hi );
-                       } else {
-                               printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
-                       }
-                       /* Emulate MTRRs using Centaur's MCR. */
-                       set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
-                       /* Report CX8 */
-                       set_bit(X86_FEATURE_CX8, c->x86_capability);
-                       /* Set 3DNow! on Winchip 2 and above. */
-                       if (c->x86_model >=8)
-                               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
-                       /* See if we can find out some more. */
-                       if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
-                               /* Yes, we can. */
-                               cpuid(0x80000005,&aa,&bb,&cc,&dd);
-                               /* Add L1 data and code cache sizes. */
-                               c->x86_cache_size = (cc>>24)+(dd>>24);
-                       }
-                       sprintf( c->x86_model_id, "WinChip %s", name );
-                       break;
-
-               case 6:
-                       init_c3(c);
-                       break;
-       }
-}
-
-static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-       /* VIA C3 CPUs (670-68F) need further shifting. */
-       if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
-               size >>= 8;
-
-       /* VIA also screwed up Nehemiah stepping 1, and made
-          it return '65KB' instead of '64KB'
-          - Note, it seems this may only be in engineering samples. */
-       if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
-               size -=1;
-
-       return size;
-}
-
-static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Centaur",
-       .c_ident        = { "CentaurHauls" },
-       .c_init         = init_centaur,
-       .c_size_cache   = centaur_size_cache,
-};
-
-int __init centaur_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
-       return 0;
-}
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
deleted file mode 100644 (file)
index d506201..0000000
+++ /dev/null
@@ -1,733 +0,0 @@
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/bootmem.h>
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm/mtrr.h>
-#include <asm/mce.h>
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <mach_apic.h>
-#endif
-
-#include "cpu.h"
-
-DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
-       [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
-       [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
-       [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
-       [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
-       /*
-        * Segments used for calling PnP BIOS have byte granularity.
-        * They code segments and data segments have fixed 64k limits,
-        * the transfer segment sizes are set at run time.
-        */
-       [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
-       [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
-       [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
-       /*
-        * The APM segments have byte granularity and their bases
-        * are set at run time.  All have 64k limits.
-        */
-       [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
-       /* 16-bit code */
-       [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
-       [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
-
-       [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
-       [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
-} };
-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
-
-static int cachesize_override __cpuinitdata = -1;
-static int disable_x86_fxsr __cpuinitdata;
-static int disable_x86_serial_nr __cpuinitdata = 1;
-static int disable_x86_sep __cpuinitdata;
-
-struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-
-extern int disable_pse;
-
-static void __cpuinit default_init(struct cpuinfo_x86 * c)
-{
-       /* Not much we can do here... */
-       /* Check if at least it has cpuid */
-       if (c->cpuid_level == -1) {
-               /* No cpuid. It must be an ancient CPU */
-               if (c->x86 == 4)
-                       strcpy(c->x86_model_id, "486");
-               else if (c->x86 == 3)
-                       strcpy(c->x86_model_id, "386");
-       }
-}
-
-static struct cpu_dev __cpuinitdata default_cpu = {
-       .c_init = default_init,
-       .c_vendor = "Unknown",
-};
-static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
-
-static int __init cachesize_setup(char *str)
-{
-       get_option (&str, &cachesize_override);
-       return 1;
-}
-__setup("cachesize=", cachesize_setup);
-
-int __cpuinit get_model_name(struct cpuinfo_x86 *c)
-{
-       unsigned int *v;
-       char *p, *q;
-
-       if (cpuid_eax(0x80000000) < 0x80000004)
-               return 0;
-
-       v = (unsigned int *) c->x86_model_id;
-       cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-       cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-       cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-       c->x86_model_id[48] = 0;
-
-       /* Intel chips right-justify this string for some dumb reason;
-          undo that brain damage */
-       p = q = &c->x86_model_id[0];
-       while ( *p == ' ' )
-            p++;
-       if ( p != q ) {
-            while ( *p )
-                 *q++ = *p++;
-            while ( q <= &c->x86_model_id[48] )
-                 *q++ = '\0';  /* Zero-pad the rest */
-       }
-
-       return 1;
-}
-
-
-void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
-{
-       unsigned int n, dummy, ecx, edx, l2size;
-
-       n = cpuid_eax(0x80000000);
-
-       if (n >= 0x80000005) {
-               cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-                       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-               c->x86_cache_size=(ecx>>24)+(edx>>24);  
-       }
-
-       if (n < 0x80000006)     /* Some chips just has a large L1. */
-               return;
-
-       ecx = cpuid_ecx(0x80000006);
-       l2size = ecx >> 16;
-       
-       /* do processor-specific cache resizing */
-       if (this_cpu->c_size_cache)
-               l2size = this_cpu->c_size_cache(c,l2size);
-
-       /* Allow user to override all this if necessary. */
-       if (cachesize_override != -1)
-               l2size = cachesize_override;
-
-       if ( l2size == 0 )
-               return;         /* Again, no L2 cache is possible */
-
-       c->x86_cache_size = l2size;
-
-       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-              l2size, ecx & 0xFF);
-}
-
-/* Naming convention should be: <Name> [(<Codename>)] */
-/* This table only is used unless init_<vendor>() below doesn't set it; */
-/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
-
-/* Look up CPU names by table lookup. */
-static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
-{
-       struct cpu_model_info *info;
-
-       if ( c->x86_model >= 16 )
-               return NULL;    /* Range check */
-
-       if (!this_cpu)
-               return NULL;
-
-       info = this_cpu->c_models;
-
-       while (info && info->family) {
-               if (info->family == c->x86)
-                       return info->model_names[c->x86_model];
-               info++;
-       }
-       return NULL;            /* Not found */
-}
-
-
-static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
-{
-       char *v = c->x86_vendor_id;
-       int i;
-       static int printed;
-
-       for (i = 0; i < X86_VENDOR_NUM; i++) {
-               if (cpu_devs[i]) {
-                       if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-                           (cpu_devs[i]->c_ident[1] && 
-                            !strcmp(v,cpu_devs[i]->c_ident[1]))) {
-                               c->x86_vendor = i;
-                               if (!early)
-                                       this_cpu = cpu_devs[i];
-                               return;
-                       }
-               }
-       }
-       if (!printed) {
-               printed++;
-               printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
-               printk(KERN_ERR "CPU: Your system may be unstable.\n");
-       }
-       c->x86_vendor = X86_VENDOR_UNKNOWN;
-       this_cpu = &default_cpu;
-}
-
-
-static int __init x86_fxsr_setup(char * s)
-{
-       /* Tell all the other CPU's to not use it... */
-       disable_x86_fxsr = 1;
-
-       /*
-        * ... and clear the bits early in the boot_cpu_data
-        * so that the bootup process doesn't try to do this
-        * either.
-        */
-       clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
-       clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
-       return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
-
-static int __init x86_sep_setup(char * s)
-{
-       disable_x86_sep = 1;
-       return 1;
-}
-__setup("nosep", x86_sep_setup);
-
-
-/* Standard macro to see if a specific flag is changeable */
-static inline int flag_is_changeable_p(u32 flag)
-{
-       u32 f1, f2;
-
-       asm("pushfl\n\t"
-           "pushfl\n\t"
-           "popl %0\n\t"
-           "movl %0,%1\n\t"
-           "xorl %2,%0\n\t"
-           "pushl %0\n\t"
-           "popfl\n\t"
-           "pushfl\n\t"
-           "popl %0\n\t"
-           "popfl\n\t"
-           : "=&r" (f1), "=&r" (f2)
-           : "ir" (flag));
-
-       return ((f1^f2) & flag) != 0;
-}
-
-
-/* Probe for the CPUID instruction */
-static int __cpuinit have_cpuid_p(void)
-{
-       return flag_is_changeable_p(X86_EFLAGS_ID);
-}
-
-void __init cpu_detect(struct cpuinfo_x86 *c)
-{
-       /* Get vendor name */
-       cpuid(0x00000000, &c->cpuid_level,
-             (int *)&c->x86_vendor_id[0],
-             (int *)&c->x86_vendor_id[8],
-             (int *)&c->x86_vendor_id[4]);
-
-       c->x86 = 4;
-       if (c->cpuid_level >= 0x00000001) {
-               u32 junk, tfms, cap0, misc;
-               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-               c->x86 = (tfms >> 8) & 15;
-               c->x86_model = (tfms >> 4) & 15;
-               if (c->x86 == 0xf)
-                       c->x86 += (tfms >> 20) & 0xff;
-               if (c->x86 >= 0x6)
-                       c->x86_model += ((tfms >> 16) & 0xF) << 4;
-               c->x86_mask = tfms & 15;
-               if (cap0 & (1<<19))
-                       c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
-       }
-}
-
-/* Do minimum CPU detection early.
-   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-   The others are not touched to avoid unwanted side effects.
-
-   WARNING: this function is only called on the BP.  Don't add code here
-   that is supposed to run on all CPUs. */
-static void __init early_cpu_detect(void)
-{
-       struct cpuinfo_x86 *c = &boot_cpu_data;
-
-       c->x86_cache_alignment = 32;
-
-       if (!have_cpuid_p())
-               return;
-
-       cpu_detect(c);
-
-       get_cpu_vendor(c, 1);
-}
-
-static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
-{
-       u32 tfms, xlvl;
-       int ebx;
-
-       if (have_cpuid_p()) {
-               /* Get vendor name */
-               cpuid(0x00000000, &c->cpuid_level,
-                     (int *)&c->x86_vendor_id[0],
-                     (int *)&c->x86_vendor_id[8],
-                     (int *)&c->x86_vendor_id[4]);
-               
-               get_cpu_vendor(c, 0);
-               /* Initialize the standard set of capabilities */
-               /* Note that the vendor-specific code below might override */
-       
-               /* Intel-defined flags: level 0x00000001 */
-               if ( c->cpuid_level >= 0x00000001 ) {
-                       u32 capability, excap;
-                       cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
-                       c->x86_capability[0] = capability;
-                       c->x86_capability[4] = excap;
-                       c->x86 = (tfms >> 8) & 15;
-                       c->x86_model = (tfms >> 4) & 15;
-                       if (c->x86 == 0xf)
-                               c->x86 += (tfms >> 20) & 0xff;
-                       if (c->x86 >= 0x6)
-                               c->x86_model += ((tfms >> 16) & 0xF) << 4;
-                       c->x86_mask = tfms & 15;
-#ifdef CONFIG_X86_HT
-                       c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
-#else
-                       c->apicid = (ebx >> 24) & 0xFF;
-#endif
-                       if (c->x86_capability[0] & (1<<19))
-                               c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
-               } else {
-                       /* Have CPUID level 0 only - unheard of */
-                       c->x86 = 4;
-               }
-
-               /* AMD-defined flags: level 0x80000001 */
-               xlvl = cpuid_eax(0x80000000);
-               if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-                       if ( xlvl >= 0x80000001 ) {
-                               c->x86_capability[1] = cpuid_edx(0x80000001);
-                               c->x86_capability[6] = cpuid_ecx(0x80000001);
-                       }
-                       if ( xlvl >= 0x80000004 )
-                               get_model_name(c); /* Default name */
-               }
-
-               init_scattered_cpuid_features(c);
-       }
-
-       early_intel_workaround(c);
-
-#ifdef CONFIG_X86_HT
-       c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
-#endif
-}
-
-static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-{
-       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-               /* Disable processor serial number */
-               unsigned long lo,hi;
-               rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-               lo |= 0x200000;
-               wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-               printk(KERN_NOTICE "CPU serial number disabled.\n");
-               clear_bit(X86_FEATURE_PN, c->x86_capability);
-
-               /* Disabling the serial number may affect the cpuid level */
-               c->cpuid_level = cpuid_eax(0);
-       }
-}
-
-static int __init x86_serial_nr_setup(char *s)
-{
-       disable_x86_serial_nr = 0;
-       return 1;
-}
-__setup("serialnumber", x86_serial_nr_setup);
-
-
-
-/*
- * This does the hard work of actually picking apart the CPU stuff...
- */
-static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
-{
-       int i;
-
-       c->loops_per_jiffy = loops_per_jiffy;
-       c->x86_cache_size = -1;
-       c->x86_vendor = X86_VENDOR_UNKNOWN;
-       c->cpuid_level = -1;    /* CPUID not detected */
-       c->x86_model = c->x86_mask = 0; /* So far unknown... */
-       c->x86_vendor_id[0] = '\0'; /* Unset */
-       c->x86_model_id[0] = '\0';  /* Unset */
-       c->x86_max_cores = 1;
-       c->x86_clflush_size = 32;
-       memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
-       if (!have_cpuid_p()) {
-               /* First of all, decide if this is a 486 or higher */
-               /* It's a 486 if we can modify the AC flag */
-               if ( flag_is_changeable_p(X86_EFLAGS_AC) )
-                       c->x86 = 4;
-               else
-                       c->x86 = 3;
-       }
-
-       generic_identify(c);
-
-       printk(KERN_DEBUG "CPU: After generic identify, caps:");
-       for (i = 0; i < NCAPINTS; i++)
-               printk(" %08lx", c->x86_capability[i]);
-       printk("\n");
-
-       if (this_cpu->c_identify) {
-               this_cpu->c_identify(c);
-
-               printk(KERN_DEBUG "CPU: After vendor identify, caps:");
-               for (i = 0; i < NCAPINTS; i++)
-                       printk(" %08lx", c->x86_capability[i]);
-               printk("\n");
-       }
-
-       /*
-        * Vendor-specific initialization.  In this section we
-        * canonicalize the feature flags, meaning if there are
-        * features a certain CPU supports which CPUID doesn't
-        * tell us, CPUID claiming incorrect flags, or other bugs,
-        * we handle them here.
-        *
-        * At the end of this section, c->x86_capability better
-        * indicate the features this CPU genuinely supports!
-        */
-       if (this_cpu->c_init)
-               this_cpu->c_init(c);
-
-       /* Disable the PN if appropriate */
-       squash_the_stupid_serial_number(c);
-
-       /*
-        * The vendor-specific functions might have changed features.  Now
-        * we do "generic changes."
-        */
-
-       /* TSC disabled? */
-       if ( tsc_disable )
-               clear_bit(X86_FEATURE_TSC, c->x86_capability);
-
-       /* FXSR disabled? */
-       if (disable_x86_fxsr) {
-               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-               clear_bit(X86_FEATURE_XMM, c->x86_capability);
-       }
-
-       /* SEP disabled? */
-       if (disable_x86_sep)
-               clear_bit(X86_FEATURE_SEP, c->x86_capability);
-
-       if (disable_pse)
-               clear_bit(X86_FEATURE_PSE, c->x86_capability);
-
-       /* If the model name is still unset, do table lookup. */
-       if ( !c->x86_model_id[0] ) {
-               char *p;
-               p = table_lookup_model(c);
-               if ( p )
-                       strcpy(c->x86_model_id, p);
-               else
-                       /* Last resort... */
-                       sprintf(c->x86_model_id, "%02x/%02x",
-                               c->x86, c->x86_model);
-       }
-
-       /* Now the feature flags better reflect actual CPU features! */
-
-       printk(KERN_DEBUG "CPU: After all inits, caps:");
-       for (i = 0; i < NCAPINTS; i++)
-               printk(" %08lx", c->x86_capability[i]);
-       printk("\n");
-
-       /*
-        * On SMP, boot_cpu_data holds the common feature set between
-        * all CPUs; so make sure that we indicate which features are
-        * common between the CPUs.  The first time this routine gets
-        * executed, c == &boot_cpu_data.
-        */
-       if ( c != &boot_cpu_data ) {
-               /* AND the already accumulated flags with these */
-               for ( i = 0 ; i < NCAPINTS ; i++ )
-                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-       }
-
-       /* Init Machine Check Exception if available. */
-       mcheck_init(c);
-}
-
-void __init identify_boot_cpu(void)
-{
-       identify_cpu(&boot_cpu_data);
-       sysenter_setup();
-       enable_sep_cpu();
-       mtrr_bp_init();
-}
-
-void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
-{
-       BUG_ON(c == &boot_cpu_data);
-       identify_cpu(c);
-       enable_sep_cpu();
-       mtrr_ap_init();
-}
-
-#ifdef CONFIG_X86_HT
-void __cpuinit detect_ht(struct cpuinfo_x86 *c)
-{
-       u32     eax, ebx, ecx, edx;
-       int     index_msb, core_bits;
-
-       cpuid(1, &eax, &ebx, &ecx, &edx);
-
-       if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-               return;
-
-       smp_num_siblings = (ebx & 0xff0000) >> 16;
-
-       if (smp_num_siblings == 1) {
-               printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-       } else if (smp_num_siblings > 1 ) {
-
-               if (smp_num_siblings > NR_CPUS) {
-                       printk(KERN_WARNING "CPU: Unsupported number of the "
-                                       "siblings %d", smp_num_siblings);
-                       smp_num_siblings = 1;
-                       return;
-               }
-
-               index_msb = get_count_order(smp_num_siblings);
-               c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-
-               printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
-                      c->phys_proc_id);
-
-               smp_num_siblings = smp_num_siblings / c->x86_max_cores;
-
-               index_msb = get_count_order(smp_num_siblings) ;
-
-               core_bits = get_count_order(c->x86_max_cores);
-
-               c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
-                                              ((1 << core_bits) - 1);
-
-               if (c->x86_max_cores > 1)
-                       printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
-                              c->cpu_core_id);
-       }
-}
-#endif
-
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
-{
-       char *vendor = NULL;
-
-       if (c->x86_vendor < X86_VENDOR_NUM)
-               vendor = this_cpu->c_vendor;
-       else if (c->cpuid_level >= 0)
-               vendor = c->x86_vendor_id;
-
-       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
-               printk("%s ", vendor);
-
-       if (!c->x86_model_id[0])
-               printk("%d86", c->x86);
-       else
-               printk("%s", c->x86_model_id);
-
-       if (c->x86_mask || c->cpuid_level >= 0) 
-               printk(" stepping %02x\n", c->x86_mask);
-       else
-               printk("\n");
-}
-
-cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-
-/* This is hacky. :)
- * We're emulating future behavior.
- * In the future, the cpu-specific init functions will be called implicitly
- * via the magic of initcalls.
- * They will insert themselves into the cpu_devs structure.
- * Then, when cpu_init() is called, we can just iterate over that array.
- */
-
-extern int intel_cpu_init(void);
-extern int cyrix_init_cpu(void);
-extern int nsc_init_cpu(void);
-extern int amd_init_cpu(void);
-extern int centaur_init_cpu(void);
-extern int transmeta_init_cpu(void);
-extern int nexgen_init_cpu(void);
-extern int umc_init_cpu(void);
-
-void __init early_cpu_init(void)
-{
-       intel_cpu_init();
-       cyrix_init_cpu();
-       nsc_init_cpu();
-       amd_init_cpu();
-       centaur_init_cpu();
-       transmeta_init_cpu();
-       nexgen_init_cpu();
-       umc_init_cpu();
-       early_cpu_detect();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /* pse is not compatible with on-the-fly unmapping,
-        * disable it even if the cpus claim to support it.
-        */
-       clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-       disable_pse = 1;
-#endif
-}
-
-/* Make sure %fs is initialized properly in idle threads */
-struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
-{
-       memset(regs, 0, sizeof(struct pt_regs));
-       regs->xfs = __KERNEL_PERCPU;
-       return regs;
-}
-
-/* Current gdt points %fs at the "master" per-cpu area: after this,
- * it's on the real one. */
-void switch_to_new_gdt(void)
-{
-       struct Xgt_desc_struct gdt_descr;
-
-       gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
-       gdt_descr.size = GDT_SIZE - 1;
-       load_gdt(&gdt_descr);
-       asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-}
-
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __cpuinit cpu_init(void)
-{
-       int cpu = smp_processor_id();
-       struct task_struct *curr = current;
-       struct tss_struct * t = &per_cpu(init_tss, cpu);
-       struct thread_struct *thread = &curr->thread;
-
-       if (cpu_test_and_set(cpu, cpu_initialized)) {
-               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-               for (;;) local_irq_enable();
-       }
-
-       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-
-       if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
-               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-       if (tsc_disable && cpu_has_tsc) {
-               printk(KERN_NOTICE "Disabling TSC...\n");
-               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-               set_in_cr4(X86_CR4_TSD);
-       }
-
-       load_idt(&idt_descr);
-       switch_to_new_gdt();
-
-       /*
-        * Set up and load the per-CPU TSS and LDT
-        */
-       atomic_inc(&init_mm.mm_count);
-       curr->active_mm = &init_mm;
-       if (curr->mm)
-               BUG();
-       enter_lazy_tlb(&init_mm, curr);
-
-       load_esp0(t, thread);
-       set_tss_desc(cpu,t);
-       load_TR_desc();
-       load_LDT(&init_mm.context);
-
-#ifdef CONFIG_DOUBLEFAULT
-       /* Set up doublefault TSS pointer in the GDT */
-       __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
-#endif
-
-       /* Clear %gs. */
-       asm volatile ("mov %0, %%gs" : : "r" (0));
-
-       /* Clear all 6 debug registers: */
-       set_debugreg(0, 0);
-       set_debugreg(0, 1);
-       set_debugreg(0, 2);
-       set_debugreg(0, 3);
-       set_debugreg(0, 6);
-       set_debugreg(0, 7);
-
-       /*
-        * Force FPU initialization:
-        */
-       current_thread_info()->status = 0;
-       clear_used_math();
-       mxcsr_feature_mask_init();
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-void __cpuinit cpu_uninit(void)
-{
-       int cpu = raw_smp_processor_id();
-       cpu_clear(cpu, cpu_initialized);
-
-       /* lazy TLB state */
-       per_cpu(cpu_tlbstate, cpu).state = 0;
-       per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-}
-#endif
diff --git a/arch/i386/kernel/cpu/cpu.h b/arch/i386/kernel/cpu/cpu.h
deleted file mode 100644 (file)
index 2f6432c..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-
-struct cpu_model_info {
-       int vendor;
-       int family;
-       char *model_names[16];
-};
-
-/* attempt to consolidate cpu attributes */
-struct cpu_dev {
-       char    * c_vendor;
-
-       /* some have two possibilities for cpuid string */
-       char    * c_ident[2];   
-
-       struct          cpu_model_info c_models[4];
-
-       void            (*c_init)(struct cpuinfo_x86 * c);
-       void            (*c_identify)(struct cpuinfo_x86 * c);
-       unsigned int    (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
-};
-
-extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
-
-extern int get_model_name(struct cpuinfo_x86 *c);
-extern void display_cacheinfo(struct cpuinfo_x86 *c);
-
-extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
deleted file mode 100644 (file)
index 122d2d7..0000000
+++ /dev/null
@@ -1,463 +0,0 @@
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/processor-cyrix.h>
-#include <asm/timer.h>
-#include <asm/pci-direct.h>
-#include <asm/tsc.h>
-
-#include "cpu.h"
-
-/*
- * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
- */
-static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
-{
-       unsigned char ccr2, ccr3;
-       unsigned long flags;
-       
-       /* we test for DEVID by checking whether CCR3 is writable */
-       local_irq_save(flags);
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, ccr3 ^ 0x80);
-       getCx86(0xc0);   /* dummy to change bus */
-
-       if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
-               ccr2 = getCx86(CX86_CCR2);
-               setCx86(CX86_CCR2, ccr2 ^ 0x04);
-               getCx86(0xc0);  /* dummy */
-
-               if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
-                       *dir0 = 0xfd;
-               else {                          /* Cx486S A step */
-                       setCx86(CX86_CCR2, ccr2);
-                       *dir0 = 0xfe;
-               }
-       }
-       else {
-               setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
-
-               /* read DIR0 and DIR1 CPU registers */
-               *dir0 = getCx86(CX86_DIR0);
-               *dir1 = getCx86(CX86_DIR1);
-       }
-       local_irq_restore(flags);
-}
-
-/*
- * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
- * order to identify the Cyrix CPU model after we're out of setup.c
- *
- * Actually since bugs.h doesn't even reference this perhaps someone should
- * fix the documentation ???
- */
-static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
-
-static char Cx86_model[][9] __cpuinitdata = {
-       "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
-       "M II ", "Unknown"
-};
-static char Cx486_name[][5] __cpuinitdata = {
-       "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
-       "SRx2", "DRx2"
-};
-static char Cx486S_name[][4] __cpuinitdata = {
-       "S", "S2", "Se", "S2e"
-};
-static char Cx486D_name[][4] __cpuinitdata = {
-       "DX", "DX2", "?", "?", "?", "DX4"
-};
-static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
-static char cyrix_model_mult1[] __cpuinitdata = "12??43";
-static char cyrix_model_mult2[] __cpuinitdata = "12233445";
-
-/*
- * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
- * BIOSes for compatibility with DOS games.  This makes the udelay loop
- * work correctly, and improves performance.
- *
- * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
- */
-
-extern void calibrate_delay(void) __init;
-
-static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
-{
-       unsigned long flags;
-       
-       if (Cx86_dir0_msb == 3) {
-               unsigned char ccr3, ccr5;
-
-               local_irq_save(flags);
-               ccr3 = getCx86(CX86_CCR3);
-               setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-               ccr5 = getCx86(CX86_CCR5);
-               if (ccr5 & 2)
-                       setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
-               setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
-               local_irq_restore(flags);
-
-               if (ccr5 & 2) { /* possible wrong calibration done */
-                       printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
-                       calibrate_delay();
-                       c->loops_per_jiffy = loops_per_jiffy;
-               }
-       }
-}
-
-
-static void __cpuinit set_cx86_reorder(void)
-{
-       u8 ccr3;
-
-       printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-
-       /* Load/Store Serialize to mem access disable (=reorder it)  */
-       setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
-       /* set load/store serialize from 1GB to 4GB */
-       ccr3 |= 0xe0;
-       setCx86(CX86_CCR3, ccr3);
-}
-
-static void __cpuinit set_cx86_memwb(void)
-{
-       u32 cr0;
-
-       printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
-
-       /* CCR2 bit 2: unlock NW bit */
-       setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
-       /* set 'Not Write-through' */
-       cr0 = 0x20000000;
-       write_cr0(read_cr0() | cr0);
-       /* CCR2 bit 2: lock NW bit and set WT1 */
-       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
-}
-
-static void __cpuinit set_cx86_inc(void)
-{
-       unsigned char ccr3;
-
-       printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
-
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-       /* PCR1 -- Performance Control */
-       /* Incrementor on, whatever that is */
-       setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
-       /* PCR0 -- Performance Control */
-       /* Incrementor Margin 10 */
-       setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); 
-       setCx86(CX86_CCR3, ccr3);       /* disable MAPEN */
-}
-
-/*
- *     Configure later MediaGX and/or Geode processor.
- */
-
-static void __cpuinit geode_configure(void)
-{
-       unsigned long flags;
-       u8 ccr3;
-       local_irq_save(flags);
-
-       /* Suspend on halt power saving and enable #SUSP pin */
-       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
-
-       ccr3 = getCx86(CX86_CCR3);
-       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN */
-       
-
-       /* FPU fast, DTE cache, Mem bypass */
-       setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
-       setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
-       
-       set_cx86_memwb();
-       set_cx86_reorder();     
-       set_cx86_inc();
-       
-       local_irq_restore(flags);
-}
-
-
-static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
-{
-       unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
-       char *buf = c->x86_model_id;
-       const char *p = NULL;
-
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-       clear_bit(0*32+31, c->x86_capability);
-
-       /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
-       if ( test_bit(1*32+24, c->x86_capability) ) {
-               clear_bit(1*32+24, c->x86_capability);
-               set_bit(X86_FEATURE_CXMMX, c->x86_capability);
-       }
-
-       do_cyrix_devid(&dir0, &dir1);
-
-       check_cx686_slop(c);
-
-       Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
-       dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
-
-       /* common case step number/rev -- exceptions handled below */
-       c->x86_model = (dir1 >> 4) + 1;
-       c->x86_mask = dir1 & 0xf;
-
-       /* Now cook; the original recipe is by Channing Corn, from Cyrix.
-        * We do the same thing for each generation: we work out
-        * the model, multiplier and stepping.  Black magic included,
-        * to make the silicon step/rev numbers match the printed ones.
-        */
-        
-       switch (dir0_msn) {
-               unsigned char tmp;
-
-       case 0: /* Cx486SLC/DLC/SRx/DRx */
-               p = Cx486_name[dir0_lsn & 7];
-               break;
-
-       case 1: /* Cx486S/DX/DX2/DX4 */
-               p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
-                       : Cx486S_name[dir0_lsn & 3];
-               break;
-
-       case 2: /* 5x86 */
-               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
-               p = Cx86_cb+2;
-               break;
-
-       case 3: /* 6x86/6x86L */
-               Cx86_cb[1] = ' ';
-               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
-               if (dir1 > 0x21) { /* 686L */
-                       Cx86_cb[0] = 'L';
-                       p = Cx86_cb;
-                       (c->x86_model)++;
-               } else             /* 686 */
-                       p = Cx86_cb+1;
-               /* Emulate MTRRs using Cyrix's ARRs. */
-               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
-               /* 6x86's contain this bug */
-               c->coma_bug = 1;
-               break;
-
-       case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
-#ifdef CONFIG_PCI
-       {
-               u32 vendor, device;
-               /* It isn't really a PCI quirk directly, but the cure is the
-                  same. The MediaGX has deep magic SMM stuff that handles the
-                  SB emulation. It thows away the fifo on disable_dma() which
-                  is wrong and ruins the audio. 
-
-                  Bug2: VSA1 has a wrap bug so that using maximum sized DMA 
-                  causes bad things. According to NatSemi VSA2 has another
-                  bug to do with 'hlt'. I've not seen any boards using VSA2
-                  and X doesn't seem to support it either so who cares 8).
-                  VSA1 we work around however.
-               */
-
-               printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
-               isa_dma_bridge_buggy = 2;
-
-               /* We do this before the PCI layer is running. However we
-                  are safe here as we know the bridge must be a Cyrix
-                  companion and must be present */
-               vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
-               device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
-
-               /*
-                *  The 5510/5520 companion chips have a funky PIT.
-                */  
-               if (vendor == PCI_VENDOR_ID_CYRIX &&
-        (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
-                       mark_tsc_unstable("cyrix 5510/5520 detected");
-       }
-#endif
-               c->x86_cache_size=16;   /* Yep 16K integrated cache thats it */
-
-               /* GXm supports extended cpuid levels 'ala' AMD */
-               if (c->cpuid_level == 2) {
-                       /* Enable cxMMX extensions (GX1 Datasheet 54) */
-                       setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
-                       
-                       /*
-                        * GXm : 0x30 ... 0x5f GXm  datasheet 51
-                        * GXlv: 0x6x          GXlv datasheet 54
-                        *  ?  : 0x7x
-                        * GX1 : 0x8x          GX1  datasheet 56
-                        */
-                       if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
-                               geode_configure();
-                       get_model_name(c);  /* get CPU marketing name */
-                       return;
-               }
-               else {  /* MediaGX */
-                       Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
-                       p = Cx86_cb+2;
-                       c->x86_model = (dir1 & 0x20) ? 1 : 2;
-               }
-               break;
-
-        case 5: /* 6x86MX/M II */
-               if (dir1 > 7)
-               {
-                       dir0_msn++;  /* M II */
-                       /* Enable MMX extensions (App note 108) */
-                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
-               }
-               else
-               {
-                       c->coma_bug = 1;      /* 6x86MX, it has the bug. */
-               }
-               tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
-               Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
-               p = Cx86_cb+tmp;
-               if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
-                       (c->x86_model)++;
-               /* Emulate MTRRs using Cyrix's ARRs. */
-               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
-               break;
-
-       case 0xf:  /* Cyrix 486 without DEVID registers */
-               switch (dir0_lsn) {
-               case 0xd:  /* either a 486SLC or DLC w/o DEVID */
-                       dir0_msn = 0;
-                       p = Cx486_name[(c->hard_math) ? 1 : 0];
-                       break;
-
-               case 0xe:  /* a 486S A step */
-                       dir0_msn = 0;
-                       p = Cx486S_name[0];
-                       break;
-               }
-               break;
-
-       default:  /* unknown (shouldn't happen, we know everyone ;-) */
-               dir0_msn = 7;
-               break;
-       }
-       strcpy(buf, Cx86_model[dir0_msn & 7]);
-       if (p) strcat(buf, p);
-       return;
-}
-
-/*
- * Handle National Semiconductor branded processors
- */
-static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
-{
-       /* There may be GX1 processors in the wild that are branded
-        * NSC and not Cyrix.
-        *
-        * This function only handles the GX processor, and kicks every
-        * thing else to the Cyrix init function above - that should
-        * cover any processors that might have been branded differently
-        * after NSC acquired Cyrix.
-        *
-        * If this breaks your GX1 horribly, please e-mail
-        * info-linux@ldcmail.amd.com to tell us.
-        */
-
-       /* Handle the GX (Formally known as the GX2) */
-
-       if (c->x86 == 5 && c->x86_model == 5)
-               display_cacheinfo(c);
-       else
-               init_cyrix(c);
-}
-
-/*
- * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
- * by the fact that they preserve the flags across the division of 5/2.
- * PII and PPro exhibit this behavior too, but they have cpuid available.
- */
-/*
- * Perform the Cyrix 5/2 test. A Cyrix won't change
- * the flags, while other 486 chips will.
- */
-static inline int test_cyrix_52div(void)
-{
-       unsigned int test;
-
-       __asm__ __volatile__(
-            "sahf\n\t"         /* clear flags (%eax = 0x0005) */
-            "div %b2\n\t"      /* divide 5 by 2 */
-            "lahf"             /* store flags into %ah */
-            : "=a" (test)
-            : "0" (5), "q" (2)
-            : "cc");
-
-       /* AH is 0x02 on Cyrix after the divide.. */
-       return (unsigned char) (test >> 8) == 0x02;
-}
-
-static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
-{
-       /* Detect Cyrix with disabled CPUID */
-       if ( c->x86 == 4 && test_cyrix_52div() ) {
-               unsigned char dir0, dir1;
-               
-               strcpy(c->x86_vendor_id, "CyrixInstead");
-               c->x86_vendor = X86_VENDOR_CYRIX;
-               
-               /* Actually enable cpuid on the older cyrix */
-           
-               /* Retrieve CPU revisions */
-               
-               do_cyrix_devid(&dir0, &dir1);
-
-               dir0>>=4;               
-               
-               /* Check it is an affected model */
-               
-               if (dir0 == 5 || dir0 == 3)
-               {
-                       unsigned char ccr3;
-                       unsigned long flags;
-                       printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
-                       local_irq_save(flags);
-                       ccr3 = getCx86(CX86_CCR3);
-                       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN  */
-                       setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);  /* enable cpuid  */
-                       setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
-                       local_irq_restore(flags);
-               }
-       }
-}
-
-static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Cyrix",
-       .c_ident        = { "CyrixInstead" },
-       .c_init         = init_cyrix,
-       .c_identify     = cyrix_identify,
-};
-
-int __init cyrix_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
-       return 0;
-}
-
-static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
-       .c_vendor       = "NSC",
-       .c_ident        = { "Geode by NSC" },
-       .c_init         = init_nsc,
-};
-
-int __init nsc_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
-       return 0;
-}
-
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
deleted file mode 100644 (file)
index dc4e081..0000000
+++ /dev/null
@@ -1,333 +0,0 @@
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <linux/smp.h>
-#include <linux/thread_info.h>
-#include <linux/module.h>
-
-#include <asm/processor.h>
-#include <asm/msr.h>
-#include <asm/uaccess.h>
-
-#include "cpu.h"
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <mach_apic.h>
-#endif
-
-extern int trap_init_f00f_bug(void);
-
-#ifdef CONFIG_X86_INTEL_USERCOPY
-/*
- * Alignment at which movsl is preferred for bulk memory copies.
- */
-struct movsl_mask movsl_mask __read_mostly;
-#endif
-
-void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
-{
-       if (c->x86_vendor != X86_VENDOR_INTEL)
-               return;
-       /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
-       if (c->x86 == 15 && c->x86_cache_alignment == 64)
-               c->x86_cache_alignment = 128;
-}
-
-/*
- *     Early probe support logic for ppro memory erratum #50
- *
- *     This is called before we do cpu ident work
- */
-int __cpuinit ppro_with_ram_bug(void)
-{
-       /* Uses data from early_cpu_detect now */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-           boot_cpu_data.x86 == 6 &&
-           boot_cpu_data.x86_model == 1 &&
-           boot_cpu_data.x86_mask < 8) {
-               printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
-               return 1;
-       }
-       return 0;
-}
-       
-
-/*
- * P4 Xeon errata 037 workaround.
- * Hardware prefetcher may cause stale data to be loaded into the cache.
- */
-static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
-{
-       unsigned long lo, hi;
-
-       if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
-               rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
-               if ((lo & (1<<9)) == 0) {
-                       printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
-                       printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
-                       lo |= (1<<9);   /* Disable hw prefetching */
-                       wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
-               }
-       }
-}
-
-
-/*
- * find out the number of processor cores on the die
- */
-static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       if (c->cpuid_level < 4)
-               return 1;
-
-       /* Intel has a non-standard dependency on %ecx for this CPUID level. */
-       cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
-       if (eax & 0x1f)
-               return ((eax >> 26) + 1);
-       else
-               return 1;
-}
-
-static void __cpuinit init_intel(struct cpuinfo_x86 *c)
-{
-       unsigned int l2 = 0;
-       char *p = NULL;
-
-#ifdef CONFIG_X86_F00F_BUG
-       /*
-        * All current models of Pentium and Pentium with MMX technology CPUs
-        * have the F0 0F bug, which lets nonprivileged users lock up the system.
-        * Note that the workaround only should be initialized once...
-        */
-       c->f00f_bug = 0;
-       if (!paravirt_enabled() && c->x86 == 5) {
-               static int f00f_workaround_enabled = 0;
-
-               c->f00f_bug = 1;
-               if ( !f00f_workaround_enabled ) {
-                       trap_init_f00f_bug();
-                       printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
-                       f00f_workaround_enabled = 1;
-               }
-       }
-#endif
-
-       select_idle_routine(c);
-       l2 = init_intel_cacheinfo(c);
-       if (c->cpuid_level > 9 ) {
-               unsigned eax = cpuid_eax(10);
-               /* Check for version and the number of counters */
-               if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
-                       set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
-       }
-
-       /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
-       if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
-               clear_bit(X86_FEATURE_SEP, c->x86_capability);
-
-       /* Names for the Pentium II/Celeron processors 
-          detectable only by also checking the cache size.
-          Dixon is NOT a Celeron. */
-       if (c->x86 == 6) {
-               switch (c->x86_model) {
-               case 5:
-                       if (c->x86_mask == 0) {
-                               if (l2 == 0)
-                                       p = "Celeron (Covington)";
-                               else if (l2 == 256)
-                                       p = "Mobile Pentium II (Dixon)";
-                       }
-                       break;
-                       
-               case 6:
-                       if (l2 == 128)
-                               p = "Celeron (Mendocino)";
-                       else if (c->x86_mask == 0 || c->x86_mask == 5)
-                               p = "Celeron-A";
-                       break;
-                       
-               case 8:
-                       if (l2 == 128)
-                               p = "Celeron (Coppermine)";
-                       break;
-               }
-       }
-
-       if ( p )
-               strcpy(c->x86_model_id, p);
-       
-       c->x86_max_cores = num_cpu_cores(c);
-
-       detect_ht(c);
-
-       /* Work around errata */
-       Intel_errata_workarounds(c);
-
-#ifdef CONFIG_X86_INTEL_USERCOPY
-       /*
-        * Set up the preferred alignment for movsl bulk memory moves
-        */
-       switch (c->x86) {
-       case 4:         /* 486: untested */
-               break;
-       case 5:         /* Old Pentia: untested */
-               break;
-       case 6:         /* PII/PIII only like movsl with 8-byte alignment */
-               movsl_mask.mask = 7;
-               break;
-       case 15:        /* P4 is OK down to 8-byte alignment */
-               movsl_mask.mask = 7;
-               break;
-       }
-#endif
-
-       if (c->x86 == 15) {
-               set_bit(X86_FEATURE_P4, c->x86_capability);
-               set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability);
-       }
-       if (c->x86 == 6) 
-               set_bit(X86_FEATURE_P3, c->x86_capability);
-       if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-               (c->x86 == 0x6 && c->x86_model >= 0x0e))
-               set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-
-       if (cpu_has_ds) {
-               unsigned int l1;
-               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
-               if (!(l1 & (1<<11)))
-                       set_bit(X86_FEATURE_BTS, c->x86_capability);
-               if (!(l1 & (1<<12)))
-                       set_bit(X86_FEATURE_PEBS, c->x86_capability);
-       }
-}
-
-static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
-{
-       /* Intel PIII Tualatin. This comes in two flavours.
-        * One has 256kb of cache, the other 512. We have no way
-        * to determine which, so we use a boottime override
-        * for the 512kb model, and assume 256 otherwise.
-        */
-       if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
-               size = 256;
-       return size;
-}
-
-static struct cpu_dev intel_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Intel",
-       .c_ident        = { "GenuineIntel" },
-       .c_models = {
-               { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 
-                 { 
-                         [0] = "486 DX-25/33", 
-                         [1] = "486 DX-50", 
-                         [2] = "486 SX", 
-                         [3] = "486 DX/2", 
-                         [4] = "486 SL", 
-                         [5] = "486 SX/2", 
-                         [7] = "486 DX/2-WB", 
-                         [8] = "486 DX/4", 
-                         [9] = "486 DX/4-WB"
-                 }
-               },
-               { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
-                 { 
-                         [0] = "Pentium 60/66 A-step", 
-                         [1] = "Pentium 60/66", 
-                         [2] = "Pentium 75 - 200",
-                         [3] = "OverDrive PODP5V83", 
-                         [4] = "Pentium MMX",
-                         [7] = "Mobile Pentium 75 - 200", 
-                         [8] = "Mobile Pentium MMX"
-                 }
-               },
-               { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
-                 { 
-                         [0] = "Pentium Pro A-step",
-                         [1] = "Pentium Pro", 
-                         [3] = "Pentium II (Klamath)", 
-                         [4] = "Pentium II (Deschutes)", 
-                         [5] = "Pentium II (Deschutes)", 
-                         [6] = "Mobile Pentium II",
-                         [7] = "Pentium III (Katmai)", 
-                         [8] = "Pentium III (Coppermine)", 
-                         [10] = "Pentium III (Cascades)",
-                         [11] = "Pentium III (Tualatin)",
-                 }
-               },
-               { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
-                 {
-                         [0] = "Pentium 4 (Unknown)",
-                         [1] = "Pentium 4 (Willamette)",
-                         [2] = "Pentium 4 (Northwood)",
-                         [4] = "Pentium 4 (Foster)",
-                         [5] = "Pentium 4 (Foster)",
-                 }
-               },
-       },
-       .c_init         = init_intel,
-       .c_size_cache   = intel_size_cache,
-};
-
-__init int intel_cpu_init(void)
-{
-       cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
-       return 0;
-}
-
-#ifndef CONFIG_X86_CMPXCHG
-unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
-{
-       u8 prev;
-       unsigned long flags;
-
-       /* Poor man's cmpxchg for 386. Unsuitable for SMP */
-       local_irq_save(flags);
-       prev = *(u8 *)ptr;
-       if (prev == old)
-               *(u8 *)ptr = new;
-       local_irq_restore(flags);
-       return prev;
-}
-EXPORT_SYMBOL(cmpxchg_386_u8);
-
-unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
-{
-       u16 prev;
-       unsigned long flags;
-
-       /* Poor man's cmpxchg for 386. Unsuitable for SMP */
-       local_irq_save(flags);
-       prev = *(u16 *)ptr;
-       if (prev == old)
-               *(u16 *)ptr = new;
-       local_irq_restore(flags);
-       return prev;
-}
-EXPORT_SYMBOL(cmpxchg_386_u16);
-
-unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
-{
-       u32 prev;
-       unsigned long flags;
-
-       /* Poor man's cmpxchg for 386. Unsuitable for SMP */
-       local_irq_save(flags);
-       prev = *(u32 *)ptr;
-       if (prev == old)
-               *(u32 *)ptr = new;
-       local_irq_restore(flags);
-       return prev;
-}
-EXPORT_SYMBOL(cmpxchg_386_u32);
-#endif
-
-// arch_initcall(intel_cpu_init);
-
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
deleted file mode 100644 (file)
index db6c25a..0000000
+++ /dev/null
@@ -1,806 +0,0 @@
-/*
- *      Routines to indentify caches on Intel CPU.
- *
- *      Changes:
- *      Venkatesh Pallipadi    : Adding cache identification through cpuid(4)
- *             Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
- *     Andi Kleen / Andreas Herrmann   : CPUID4 emulation on AMD.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/compiler.h>
-#include <linux/cpu.h>
-#include <linux/sched.h>
-
-#include <asm/processor.h>
-#include <asm/smp.h>
-
-#define LVL_1_INST     1
-#define LVL_1_DATA     2
-#define LVL_2          3
-#define LVL_3          4
-#define LVL_TRACE      5
-
-struct _cache_table
-{
-       unsigned char descriptor;
-       char cache_type;
-       short size;
-};
-
-/* all the cache descriptor types we care about (no TLB or trace cache entries) */
-static struct _cache_table cache_table[] __cpuinitdata =
-{
-       { 0x06, LVL_1_INST, 8 },        /* 4-way set assoc, 32 byte line size */
-       { 0x08, LVL_1_INST, 16 },       /* 4-way set assoc, 32 byte line size */
-       { 0x0a, LVL_1_DATA, 8 },        /* 2 way set assoc, 32 byte line size */
-       { 0x0c, LVL_1_DATA, 16 },       /* 4-way set assoc, 32 byte line size */
-       { 0x22, LVL_3,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x23, LVL_3,      1024 },     /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x25, LVL_3,      2048 },     /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x29, LVL_3,      4096 },     /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x2c, LVL_1_DATA, 32 },       /* 8-way set assoc, 64 byte line size */
-       { 0x30, LVL_1_INST, 32 },       /* 8-way set assoc, 64 byte line size */
-       { 0x39, LVL_2,      128 },      /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x3a, LVL_2,      192 },      /* 6-way set assoc, sectored cache, 64 byte line size */
-       { 0x3b, LVL_2,      128 },      /* 2-way set assoc, sectored cache, 64 byte line size */
-       { 0x3c, LVL_2,      256 },      /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x3d, LVL_2,      384 },      /* 6-way set assoc, sectored cache, 64 byte line size */
-       { 0x3e, LVL_2,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x41, LVL_2,      128 },      /* 4-way set assoc, 32 byte line size */
-       { 0x42, LVL_2,      256 },      /* 4-way set assoc, 32 byte line size */
-       { 0x43, LVL_2,      512 },      /* 4-way set assoc, 32 byte line size */
-       { 0x44, LVL_2,      1024 },     /* 4-way set assoc, 32 byte line size */
-       { 0x45, LVL_2,      2048 },     /* 4-way set assoc, 32 byte line size */
-       { 0x46, LVL_3,      4096 },     /* 4-way set assoc, 64 byte line size */
-       { 0x47, LVL_3,      8192 },     /* 8-way set assoc, 64 byte line size */
-       { 0x49, LVL_3,      4096 },     /* 16-way set assoc, 64 byte line size */
-       { 0x4a, LVL_3,      6144 },     /* 12-way set assoc, 64 byte line size */
-       { 0x4b, LVL_3,      8192 },     /* 16-way set assoc, 64 byte line size */
-       { 0x4c, LVL_3,     12288 },     /* 12-way set assoc, 64 byte line size */
-       { 0x4d, LVL_3,     16384 },     /* 16-way set assoc, 64 byte line size */
-       { 0x60, LVL_1_DATA, 16 },       /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x66, LVL_1_DATA, 8 },        /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x67, LVL_1_DATA, 16 },       /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x68, LVL_1_DATA, 32 },       /* 4-way set assoc, sectored cache, 64 byte line size */
-       { 0x70, LVL_TRACE,  12 },       /* 8-way set assoc */
-       { 0x71, LVL_TRACE,  16 },       /* 8-way set assoc */
-       { 0x72, LVL_TRACE,  32 },       /* 8-way set assoc */
-       { 0x73, LVL_TRACE,  64 },       /* 8-way set assoc */
-       { 0x78, LVL_2,    1024 },       /* 4-way set assoc, 64 byte line size */
-       { 0x79, LVL_2,     128 },       /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x7a, LVL_2,     256 },       /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x7b, LVL_2,     512 },       /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x7c, LVL_2,    1024 },       /* 8-way set assoc, sectored cache, 64 byte line size */
-       { 0x7d, LVL_2,    2048 },       /* 8-way set assoc, 64 byte line size */
-       { 0x7f, LVL_2,     512 },       /* 2-way set assoc, 64 byte line size */
-       { 0x82, LVL_2,     256 },       /* 8-way set assoc, 32 byte line size */
-       { 0x83, LVL_2,     512 },       /* 8-way set assoc, 32 byte line size */
-       { 0x84, LVL_2,    1024 },       /* 8-way set assoc, 32 byte line size */
-       { 0x85, LVL_2,    2048 },       /* 8-way set assoc, 32 byte line size */
-       { 0x86, LVL_2,     512 },       /* 4-way set assoc, 64 byte line size */
-       { 0x87, LVL_2,    1024 },       /* 8-way set assoc, 64 byte line size */
-       { 0x00, 0, 0}
-};
-
-
-enum _cache_type
-{
-       CACHE_TYPE_NULL = 0,
-       CACHE_TYPE_DATA = 1,
-       CACHE_TYPE_INST = 2,
-       CACHE_TYPE_UNIFIED = 3
-};
-
-union _cpuid4_leaf_eax {
-       struct {
-               enum _cache_type        type:5;
-               unsigned int            level:3;
-               unsigned int            is_self_initializing:1;
-               unsigned int            is_fully_associative:1;
-               unsigned int            reserved:4;
-               unsigned int            num_threads_sharing:12;
-               unsigned int            num_cores_on_die:6;
-       } split;
-       u32 full;
-};
-
-union _cpuid4_leaf_ebx {
-       struct {
-               unsigned int            coherency_line_size:12;
-               unsigned int            physical_line_partition:10;
-               unsigned int            ways_of_associativity:10;
-       } split;
-       u32 full;
-};
-
-union _cpuid4_leaf_ecx {
-       struct {
-               unsigned int            number_of_sets:32;
-       } split;
-       u32 full;
-};
-
-struct _cpuid4_info {
-       union _cpuid4_leaf_eax eax;
-       union _cpuid4_leaf_ebx ebx;
-       union _cpuid4_leaf_ecx ecx;
-       unsigned long size;
-       cpumask_t shared_cpu_map;
-};
-
-unsigned short                 num_cache_leaves;
-
-/* AMD doesn't have CPUID4. Emulate it here to report the same
-   information to the user.  This makes some assumptions about the machine:
-   L2 not shared, no SMT etc. that is currently true on AMD CPUs.
-
-   In theory the TLBs could be reported as fake type (they are in "dummy").
-   Maybe later */
-union l1_cache {
-       struct {
-               unsigned line_size : 8;
-               unsigned lines_per_tag : 8;
-               unsigned assoc : 8;
-               unsigned size_in_kb : 8;
-       };
-       unsigned val;
-};
-
-union l2_cache {
-       struct {
-               unsigned line_size : 8;
-               unsigned lines_per_tag : 4;
-               unsigned assoc : 4;
-               unsigned size_in_kb : 16;
-       };
-       unsigned val;
-};
-
-union l3_cache {
-       struct {
-               unsigned line_size : 8;
-               unsigned lines_per_tag : 4;
-               unsigned assoc : 4;
-               unsigned res : 2;
-               unsigned size_encoded : 14;
-       };
-       unsigned val;
-};
-
-static const unsigned short assocs[] = {
-       [1] = 1, [2] = 2, [4] = 4, [6] = 8,
-       [8] = 16, [0xa] = 32, [0xb] = 48,
-       [0xc] = 64,
-       [0xf] = 0xffff // ??
-};
-
-static const unsigned char levels[] = { 1, 1, 2, 3 };
-static const unsigned char types[] = { 1, 2, 3, 3 };
-
-static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
-                      union _cpuid4_leaf_ebx *ebx,
-                      union _cpuid4_leaf_ecx *ecx)
-{
-       unsigned dummy;
-       unsigned line_size, lines_per_tag, assoc, size_in_kb;
-       union l1_cache l1i, l1d;
-       union l2_cache l2;
-       union l3_cache l3;
-       union l1_cache *l1 = &l1d;
-
-       eax->full = 0;
-       ebx->full = 0;
-       ecx->full = 0;
-
-       cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
-       cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
-
-       switch (leaf) {
-       case 1:
-               l1 = &l1i;
-       case 0:
-               if (!l1->val)
-                       return;
-               assoc = l1->assoc;
-               line_size = l1->line_size;
-               lines_per_tag = l1->lines_per_tag;
-               size_in_kb = l1->size_in_kb;
-               break;
-       case 2:
-               if (!l2.val)
-                       return;
-               assoc = l2.assoc;
-               line_size = l2.line_size;
-               lines_per_tag = l2.lines_per_tag;
-               /* cpu_data has errata corrections for K7 applied */
-               size_in_kb = current_cpu_data.x86_cache_size;
-               break;
-       case 3:
-               if (!l3.val)
-                       return;
-               assoc = l3.assoc;
-               line_size = l3.line_size;
-               lines_per_tag = l3.lines_per_tag;
-               size_in_kb = l3.size_encoded * 512;
-               break;
-       default:
-               return;
-       }
-
-       eax->split.is_self_initializing = 1;
-       eax->split.type = types[leaf];
-       eax->split.level = levels[leaf];
-       if (leaf == 3)
-               eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
-       else
-               eax->split.num_threads_sharing = 0;
-       eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
-
-
-       if (assoc == 0xf)
-               eax->split.is_fully_associative = 1;
-       ebx->split.coherency_line_size = line_size - 1;
-       ebx->split.ways_of_associativity = assocs[assoc] - 1;
-       ebx->split.physical_line_partition = lines_per_tag - 1;
-       ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
-               (ebx->split.ways_of_associativity + 1) - 1;
-}
-
-static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
-{
-       union _cpuid4_leaf_eax  eax;
-       union _cpuid4_leaf_ebx  ebx;
-       union _cpuid4_leaf_ecx  ecx;
-       unsigned                edx;
-
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-               amd_cpuid4(index, &eax, &ebx, &ecx);
-       else
-               cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full,  &edx);
-       if (eax.split.type == CACHE_TYPE_NULL)
-               return -EIO; /* better error ? */
-
-       this_leaf->eax = eax;
-       this_leaf->ebx = ebx;
-       this_leaf->ecx = ecx;
-       this_leaf->size = (ecx.split.number_of_sets + 1) *
-               (ebx.split.coherency_line_size + 1) *
-               (ebx.split.physical_line_partition + 1) *
-               (ebx.split.ways_of_associativity + 1);
-       return 0;
-}
-
-static int __cpuinit find_num_cache_leaves(void)
-{
-       unsigned int            eax, ebx, ecx, edx;
-       union _cpuid4_leaf_eax  cache_eax;
-       int                     i = -1;
-
-       do {
-               ++i;
-               /* Do cpuid(4) loop to find out num_cache_leaves */
-               cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
-               cache_eax.full = eax;
-       } while (cache_eax.split.type != CACHE_TYPE_NULL);
-       return i;
-}
-
-unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
-{
-       unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
-       unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
-       unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
-       unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_X86_HT
-       unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
-#endif
-
-       if (c->cpuid_level > 3) {
-               static int is_initialized;
-
-               if (is_initialized == 0) {
-                       /* Init num_cache_leaves from boot CPU */
-                       num_cache_leaves = find_num_cache_leaves();
-                       is_initialized++;
-               }
-
-               /*
-                * Whenever possible use cpuid(4), deterministic cache
-                * parameters cpuid leaf to find the cache details
-                */
-               for (i = 0; i < num_cache_leaves; i++) {
-                       struct _cpuid4_info this_leaf;
-
-                       int retval;
-
-                       retval = cpuid4_cache_lookup(i, &this_leaf);
-                       if (retval >= 0) {
-                               switch(this_leaf.eax.split.level) {
-                                   case 1:
-                                       if (this_leaf.eax.split.type ==
-                                                       CACHE_TYPE_DATA)
-                                               new_l1d = this_leaf.size/1024;
-                                       else if (this_leaf.eax.split.type ==
-                                                       CACHE_TYPE_INST)
-                                               new_l1i = this_leaf.size/1024;
-                                       break;
-                                   case 2:
-                                       new_l2 = this_leaf.size/1024;
-                                       num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
-                                       index_msb = get_count_order(num_threads_sharing);
-                                       l2_id = c->apicid >> index_msb;
-                                       break;
-                                   case 3:
-                                       new_l3 = this_leaf.size/1024;
-                                       num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
-                                       index_msb = get_count_order(num_threads_sharing);
-                                       l3_id = c->apicid >> index_msb;
-                                       break;
-                                   default:
-                                       break;
-                               }
-                       }
-               }
-       }
-       /*
-        * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
-        * trace cache
-        */
-       if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
-               /* supports eax=2  call */
-               int i, j, n;
-               int regs[4];
-               unsigned char *dp = (unsigned char *)regs;
-               int only_trace = 0;
-
-               if (num_cache_leaves != 0 && c->x86 == 15)
-                       only_trace = 1;
-
-               /* Number of times to iterate */
-               n = cpuid_eax(2) & 0xFF;
-
-               for ( i = 0 ; i < n ; i++ ) {
-                       cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
-
-                       /* If bit 31 is set, this is an unknown format */
-                       for ( j = 0 ; j < 3 ; j++ ) {
-                               if ( regs[j] < 0 ) regs[j] = 0;
-                       }
-
-                       /* Byte 0 is level count, not a descriptor */
-                       for ( j = 1 ; j < 16 ; j++ ) {
-                               unsigned char des = dp[j];
-                               unsigned char k = 0;
-
-                               /* look up this descriptor in the table */
-                               while (cache_table[k].descriptor != 0)
-                               {
-                                       if (cache_table[k].descriptor == des) {
-                                               if (only_trace && cache_table[k].cache_type != LVL_TRACE)
-                                                       break;
-                                               switch (cache_table[k].cache_type) {
-                                               case LVL_1_INST:
-                                                       l1i += cache_table[k].size;
-                                                       break;
-                                               case LVL_1_DATA:
-                                                       l1d += cache_table[k].size;
-                                                       break;
-                                               case LVL_2:
-                                                       l2 += cache_table[k].size;
-                                                       break;
-                                               case LVL_3:
-                                                       l3 += cache_table[k].size;
-                                                       break;
-                                               case LVL_TRACE:
-                                                       trace += cache_table[k].size;
-                                                       break;
-                                               }
-
-                                               break;
-                                       }
-
-                                       k++;
-                               }
-                       }
-               }
-       }
-
-       if (new_l1d)
-               l1d = new_l1d;
-
-       if (new_l1i)
-               l1i = new_l1i;
-
-       if (new_l2) {
-               l2 = new_l2;
-#ifdef CONFIG_X86_HT
-               cpu_llc_id[cpu] = l2_id;
-#endif
-       }
-
-       if (new_l3) {
-               l3 = new_l3;
-#ifdef CONFIG_X86_HT
-               cpu_llc_id[cpu] = l3_id;
-#endif
-       }
-
-       if (trace)
-               printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
-       else if ( l1i )
-               printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
-
-       if (l1d)
-               printk(", L1 D cache: %dK\n", l1d);
-       else
-               printk("\n");
-
-       if (l2)
-               printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
-
-       if (l3)
-               printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
-
-       c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
-
-       return l2;
-}
-
-/* pointer to _cpuid4_info array (for each cache leaf) */
-static struct _cpuid4_info *cpuid4_info[NR_CPUS];
-#define CPUID4_INFO_IDX(x,y)    (&((cpuid4_info[x])[y]))
-
-#ifdef CONFIG_SMP
-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
-{
-       struct _cpuid4_info     *this_leaf, *sibling_leaf;
-       unsigned long num_threads_sharing;
-       int index_msb, i;
-       struct cpuinfo_x86 *c = cpu_data;
-
-       this_leaf = CPUID4_INFO_IDX(cpu, index);
-       num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
-
-       if (num_threads_sharing == 1)
-               cpu_set(cpu, this_leaf->shared_cpu_map);
-       else {
-               index_msb = get_count_order(num_threads_sharing);
-
-               for_each_online_cpu(i) {
-                       if (c[i].apicid >> index_msb ==
-                           c[cpu].apicid >> index_msb) {
-                               cpu_set(i, this_leaf->shared_cpu_map);
-                               if (i != cpu && cpuid4_info[i])  {
-                                       sibling_leaf = CPUID4_INFO_IDX(i, index);
-                                       cpu_set(cpu, sibling_leaf->shared_cpu_map);
-                               }
-                       }
-               }
-       }
-}
-static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
-{
-       struct _cpuid4_info     *this_leaf, *sibling_leaf;
-       int sibling;
-
-       this_leaf = CPUID4_INFO_IDX(cpu, index);
-       for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
-               sibling_leaf = CPUID4_INFO_IDX(sibling, index); 
-               cpu_clear(cpu, sibling_leaf->shared_cpu_map);
-       }
-}
-#else
-static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
-static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
-#endif
-
-static void free_cache_attributes(unsigned int cpu)
-{
-       kfree(cpuid4_info[cpu]);
-       cpuid4_info[cpu] = NULL;
-}
-
-static int __cpuinit detect_cache_attributes(unsigned int cpu)
-{
-       struct _cpuid4_info     *this_leaf;
-       unsigned long           j;
-       int                     retval;
-       cpumask_t               oldmask;
-
-       if (num_cache_leaves == 0)
-               return -ENOENT;
-
-       cpuid4_info[cpu] = kzalloc(
-           sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
-       if (cpuid4_info[cpu] == NULL)
-               return -ENOMEM;
-
-       oldmask = current->cpus_allowed;
-       retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
-       if (retval)
-               goto out;
-
-       /* Do cpuid and store the results */
-       retval = 0;
-       for (j = 0; j < num_cache_leaves; j++) {
-               this_leaf = CPUID4_INFO_IDX(cpu, j);
-               retval = cpuid4_cache_lookup(j, this_leaf);
-               if (unlikely(retval < 0))
-                       break;
-               cache_shared_cpu_map_setup(cpu, j);
-       }
-       set_cpus_allowed(current, oldmask);
-
-out:
-       if (retval)
-               free_cache_attributes(cpu);
-       return retval;
-}
-
-#ifdef CONFIG_SYSFS
-
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-
-extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
-
-/* pointer to kobject for cpuX/cache */
-static struct kobject * cache_kobject[NR_CPUS];
-
-struct _index_kobject {
-       struct kobject kobj;
-       unsigned int cpu;
-       unsigned short index;
-};
-
-/* pointer to array of kobjects for cpuX/cache/indexY */
-static struct _index_kobject *index_kobject[NR_CPUS];
-#define INDEX_KOBJECT_PTR(x,y)    (&((index_kobject[x])[y]))
-
-#define show_one_plus(file_name, object, val)                          \
-static ssize_t show_##file_name                                                \
-                       (struct _cpuid4_info *this_leaf, char *buf)     \
-{                                                                      \
-       return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
-}
-
-show_one_plus(level, eax.split.level, 0);
-show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
-show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
-show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
-show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
-
-static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
-{
-       return sprintf (buf, "%luK\n", this_leaf->size / 1024);
-}
-
-static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
-{
-       char mask_str[NR_CPUS];
-       cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
-       return sprintf(buf, "%s\n", mask_str);
-}
-
-static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
-       switch(this_leaf->eax.split.type) {
-           case CACHE_TYPE_DATA:
-               return sprintf(buf, "Data\n");
-               break;
-           case CACHE_TYPE_INST:
-               return sprintf(buf, "Instruction\n");
-               break;
-           case CACHE_TYPE_UNIFIED:
-               return sprintf(buf, "Unified\n");
-               break;
-           default:
-               return sprintf(buf, "Unknown\n");
-               break;
-       }
-}
-
-struct _cache_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct _cpuid4_info *, char *);
-       ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
-};
-
-#define define_one_ro(_name) \
-static struct _cache_attr _name = \
-       __ATTR(_name, 0444, show_##_name, NULL)
-
-define_one_ro(level);
-define_one_ro(type);
-define_one_ro(coherency_line_size);
-define_one_ro(physical_line_partition);
-define_one_ro(ways_of_associativity);
-define_one_ro(number_of_sets);
-define_one_ro(size);
-define_one_ro(shared_cpu_map);
-
-static struct attribute * default_attrs[] = {
-       &type.attr,
-       &level.attr,
-       &coherency_line_size.attr,
-       &physical_line_partition.attr,
-       &ways_of_associativity.attr,
-       &number_of_sets.attr,
-       &size.attr,
-       &shared_cpu_map.attr,
-       NULL
-};
-
-#define to_object(k) container_of(k, struct _index_kobject, kobj)
-#define to_attr(a) container_of(a, struct _cache_attr, attr)
-
-static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
-{
-       struct _cache_attr *fattr = to_attr(attr);
-       struct _index_kobject *this_leaf = to_object(kobj);
-       ssize_t ret;
-
-       ret = fattr->show ?
-               fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
-                       buf) :
-               0;
-       return ret;
-}
-
-static ssize_t store(struct kobject * kobj, struct attribute * attr,
-                    const char * buf, size_t count)
-{
-       return 0;
-}
-
-static struct sysfs_ops sysfs_ops = {
-       .show   = show,
-       .store  = store,
-};
-
-static struct kobj_type ktype_cache = {
-       .sysfs_ops      = &sysfs_ops,
-       .default_attrs  = default_attrs,
-};
-
-static struct kobj_type ktype_percpu_entry = {
-       .sysfs_ops      = &sysfs_ops,
-};
-
-static void cpuid4_cache_sysfs_exit(unsigned int cpu)
-{
-       kfree(cache_kobject[cpu]);
-       kfree(index_kobject[cpu]);
-       cache_kobject[cpu] = NULL;
-       index_kobject[cpu] = NULL;
-       free_cache_attributes(cpu);
-}
-
-static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
-{
-
-       if (num_cache_leaves == 0)
-               return -ENOENT;
-
-       detect_cache_attributes(cpu);
-       if (cpuid4_info[cpu] == NULL)
-               return -ENOENT;
-
-       /* Allocate all required memory */
-       cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
-       if (unlikely(cache_kobject[cpu] == NULL))
-               goto err_out;
-
-       index_kobject[cpu] = kzalloc(
-           sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
-       if (unlikely(index_kobject[cpu] == NULL))
-               goto err_out;
-
-       return 0;
-
-err_out:
-       cpuid4_cache_sysfs_exit(cpu);
-       return -ENOMEM;
-}
-
-/* Add/Remove cache interface for CPU device */
-static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
-{
-       unsigned int cpu = sys_dev->id;
-       unsigned long i, j;
-       struct _index_kobject *this_object;
-       int retval = 0;
-
-       retval = cpuid4_cache_sysfs_init(cpu);
-       if (unlikely(retval < 0))
-               return retval;
-
-       cache_kobject[cpu]->parent = &sys_dev->kobj;
-       kobject_set_name(cache_kobject[cpu], "%s", "cache");
-       cache_kobject[cpu]->ktype = &ktype_percpu_entry;
-       retval = kobject_register(cache_kobject[cpu]);
-
-       for (i = 0; i < num_cache_leaves; i++) {
-               this_object = INDEX_KOBJECT_PTR(cpu,i);
-               this_object->cpu = cpu;
-               this_object->index = i;
-               this_object->kobj.parent = cache_kobject[cpu];
-               kobject_set_name(&(this_object->kobj), "index%1lu", i);
-               this_object->kobj.ktype = &ktype_cache;
-               retval = kobject_register(&(this_object->kobj));
-               if (unlikely(retval)) {
-                       for (j = 0; j < i; j++) {
-                               kobject_unregister(
-                                       &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
-                       }
-                       kobject_unregister(cache_kobject[cpu]);
-                       cpuid4_cache_sysfs_exit(cpu);
-                       break;
-               }
-       }
-       return retval;
-}
-
-static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
-{
-       unsigned int cpu = sys_dev->id;
-       unsigned long i;
-
-       if (cpuid4_info[cpu] == NULL)
-               return;
-       for (i = 0; i < num_cache_leaves; i++) {
-               cache_remove_shared_cpu_map(cpu, i);
-               kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
-       }
-       kobject_unregister(cache_kobject[cpu]);
-       cpuid4_cache_sysfs_exit(cpu);
-       return;
-}
-
-static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
-                                       unsigned long action, void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-       struct sys_device *sys_dev;
-
-       sys_dev = get_cpu_sysdev(cpu);
-       switch (action) {
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               cache_add_dev(sys_dev);
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               cache_remove_dev(sys_dev);
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
-{
-    .notifier_call = cacheinfo_cpu_callback,
-};
-
-static int __cpuinit cache_sysfs_init(void)
-{
-       int i;
-
-       if (num_cache_leaves == 0)
-               return 0;
-
-       register_hotcpu_notifier(&cacheinfo_cpu_notifier);
-
-       for_each_online_cpu(i) {
-               cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
-                       (void *)(long)i);
-       }
-
-       return 0;
-}
-
-device_initcall(cache_sysfs_init);
-
-#endif
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
deleted file mode 100644 (file)
index 961fbe1..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <asm/processor.h>
-
-#include "cpu.h"
-
-/*
- *     Detect a NexGen CPU running without BIOS hypercode new enough
- *     to have CPUID. (Thanks to Herbert Oppmann)
- */
-static int __cpuinit deep_magic_nexgen_probe(void)
-{
-       int ret;
-       
-       __asm__ __volatile__ (
-               "       movw    $0x5555, %%ax\n"
-               "       xorw    %%dx,%%dx\n"
-               "       movw    $2, %%cx\n"
-               "       divw    %%cx\n"
-               "       movl    $0, %%eax\n"
-               "       jnz     1f\n"
-               "       movl    $1, %%eax\n"
-               "1:\n" 
-               : "=a" (ret) : : "cx", "dx" );
-       return  ret;
-}
-
-static void __cpuinit init_nexgen(struct cpuinfo_x86 * c)
-{
-       c->x86_cache_size = 256; /* A few had 1 MB... */
-}
-
-static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c)
-{
-       /* Detect NexGen with old hypercode */
-       if ( deep_magic_nexgen_probe() ) {
-               strcpy(c->x86_vendor_id, "NexGenDriven");
-       }
-}
-
-static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Nexgen",
-       .c_ident        = { "NexGenDriven" },
-       .c_models = {
-                       { .vendor = X86_VENDOR_NEXGEN,
-                         .family = 5,
-                         .model_names = { [1] = "Nx586" }
-                       },
-       },
-       .c_init         = init_nexgen,
-       .c_identify     = nexgen_identify,
-};
-
-int __init nexgen_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
-       return 0;
-}
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
deleted file mode 100644 (file)
index 93fecd4..0000000
+++ /dev/null
@@ -1,713 +0,0 @@
-/* local apic based NMI watchdog for various CPUs.
-   This file also handles reservation of performance counters for coordination
-   with other users (like oprofile).
-
-   Note that these events normally don't tick when the CPU idles. This means
-   the frequency varies with CPU load.
-
-   Original code for K7/P6 written by Keith Owens */
-
-#include <linux/percpu.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/smp.h>
-#include <linux/nmi.h>
-#include <asm/apic.h>
-#include <asm/intel_arch_perfmon.h>
-
-struct nmi_watchdog_ctlblk {
-       unsigned int cccr_msr;
-       unsigned int perfctr_msr;  /* the MSR to reset in NMI handler */
-       unsigned int evntsel_msr;  /* the MSR to select the events to handle */
-};
-
-/* Interface defining a CPU specific perfctr watchdog */
-struct wd_ops {
-       int (*reserve)(void);
-       void (*unreserve)(void);
-       int (*setup)(unsigned nmi_hz);
-       void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
-       void (*stop)(void);
-       unsigned perfctr;
-       unsigned evntsel;
-       u64 checkbit;
-};
-
-static struct wd_ops *wd_ops;
-
-/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
- * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
- */
-#define NMI_MAX_COUNTER_BITS 66
-
-/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
- * evtsel_nmi_owner tracks the ownership of the event selection
- * - different performance counters/ event selection may be reserved for
- *   different subsystems this reservation system just tries to coordinate
- *   things a little
- */
-static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
-static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
-
-static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
-
-/* converts an msr to an appropriate reservation bit */
-static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
-{
-       /* returns the bit offset of the performance counter register */
-       switch (boot_cpu_data.x86_vendor) {
-       case X86_VENDOR_AMD:
-               return (msr - MSR_K7_PERFCTR0);
-       case X86_VENDOR_INTEL:
-               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
-                       return (msr - MSR_ARCH_PERFMON_PERFCTR0);
-
-               switch (boot_cpu_data.x86) {
-               case 6:
-                       return (msr - MSR_P6_PERFCTR0);
-               case 15:
-                       return (msr - MSR_P4_BPU_PERFCTR0);
-               }
-       }
-       return 0;
-}
-
-/* converts an msr to an appropriate reservation bit */
-/* returns the bit offset of the event selection register */
-static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
-{
-       /* returns the bit offset of the event selection register */
-       switch (boot_cpu_data.x86_vendor) {
-       case X86_VENDOR_AMD:
-               return (msr - MSR_K7_EVNTSEL0);
-       case X86_VENDOR_INTEL:
-               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
-                       return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
-
-               switch (boot_cpu_data.x86) {
-               case 6:
-                       return (msr - MSR_P6_EVNTSEL0);
-               case 15:
-                       return (msr - MSR_P4_BSU_ESCR0);
-               }
-       }
-       return 0;
-
-}
-
-/* checks for a bit availability (hack for oprofile) */
-int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
-{
-       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
-       return (!test_bit(counter, perfctr_nmi_owner));
-}
-
-/* checks the an msr for availability */
-int avail_to_resrv_perfctr_nmi(unsigned int msr)
-{
-       unsigned int counter;
-
-       counter = nmi_perfctr_msr_to_bit(msr);
-       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
-       return (!test_bit(counter, perfctr_nmi_owner));
-}
-
-int reserve_perfctr_nmi(unsigned int msr)
-{
-       unsigned int counter;
-
-       counter = nmi_perfctr_msr_to_bit(msr);
-       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
-       if (!test_and_set_bit(counter, perfctr_nmi_owner))
-               return 1;
-       return 0;
-}
-
-void release_perfctr_nmi(unsigned int msr)
-{
-       unsigned int counter;
-
-       counter = nmi_perfctr_msr_to_bit(msr);
-       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
-       clear_bit(counter, perfctr_nmi_owner);
-}
-
-int reserve_evntsel_nmi(unsigned int msr)
-{
-       unsigned int counter;
-
-       counter = nmi_evntsel_msr_to_bit(msr);
-       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
-       if (!test_and_set_bit(counter, evntsel_nmi_owner))
-               return 1;
-       return 0;
-}
-
-void release_evntsel_nmi(unsigned int msr)
-{
-       unsigned int counter;
-
-       counter = nmi_evntsel_msr_to_bit(msr);
-       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
-       clear_bit(counter, evntsel_nmi_owner);
-}
-
-EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
-EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
-EXPORT_SYMBOL(reserve_perfctr_nmi);
-EXPORT_SYMBOL(release_perfctr_nmi);
-EXPORT_SYMBOL(reserve_evntsel_nmi);
-EXPORT_SYMBOL(release_evntsel_nmi);
-
-void disable_lapic_nmi_watchdog(void)
-{
-       BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
-
-       if (atomic_read(&nmi_active) <= 0)
-               return;
-
-       on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
-       wd_ops->unreserve();
-
-       BUG_ON(atomic_read(&nmi_active) != 0);
-}
-
-void enable_lapic_nmi_watchdog(void)
-{
-       BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
-
-       /* are we already enabled */
-       if (atomic_read(&nmi_active) != 0)
-               return;
-
-       /* are we lapic aware */
-       if (!wd_ops)
-               return;
-       if (!wd_ops->reserve()) {
-               printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");
-               return;
-       }
-
-       on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
-       touch_nmi_watchdog();
-}
-
-/*
- * Activate the NMI watchdog via the local APIC.
- */
-
-static unsigned int adjust_for_32bit_ctr(unsigned int hz)
-{
-       u64 counter_val;
-       unsigned int retval = hz;
-
-       /*
-        * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
-        * are writable, with higher bits sign extending from bit 31.
-        * So, we can only program the counter with 31 bit values and
-        * 32nd bit should be 1, for 33.. to be 1.
-        * Find the appropriate nmi_hz
-        */
-       counter_val = (u64)cpu_khz * 1000;
-       do_div(counter_val, retval);
-       if (counter_val > 0x7fffffffULL) {
-               u64 count = (u64)cpu_khz * 1000;
-               do_div(count, 0x7fffffffUL);
-               retval = count + 1;
-       }
-       return retval;
-}
-
-static void
-write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz)
-{
-       u64 count = (u64)cpu_khz * 1000;
-
-       do_div(count, nmi_hz);
-       if(descr)
-               Dprintk("setting %s to -0x%08Lx\n", descr, count);
-       wrmsrl(perfctr_msr, 0 - count);
-}
-
-static void write_watchdog_counter32(unsigned int perfctr_msr,
-               const char *descr, unsigned nmi_hz)
-{
-       u64 count = (u64)cpu_khz * 1000;
-
-       do_div(count, nmi_hz);
-       if(descr)
-               Dprintk("setting %s to -0x%08Lx\n", descr, count);
-       wrmsr(perfctr_msr, (u32)(-count), 0);
-}
-
-/* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
-   nicely stable so there is not much variety */
-
-#define K7_EVNTSEL_ENABLE      (1 << 22)
-#define K7_EVNTSEL_INT         (1 << 20)
-#define K7_EVNTSEL_OS          (1 << 17)
-#define K7_EVNTSEL_USR         (1 << 16)
-#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING   0x76
-#define K7_NMI_EVENT           K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
-
-static int setup_k7_watchdog(unsigned nmi_hz)
-{
-       unsigned int perfctr_msr, evntsel_msr;
-       unsigned int evntsel;
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-
-       perfctr_msr = wd_ops->perfctr;
-       evntsel_msr = wd_ops->evntsel;
-
-       wrmsrl(perfctr_msr, 0UL);
-
-       evntsel = K7_EVNTSEL_INT
-               | K7_EVNTSEL_OS
-               | K7_EVNTSEL_USR
-               | K7_NMI_EVENT;
-
-       /* setup the timer */
-       wrmsr(evntsel_msr, evntsel, 0);
-       write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= K7_EVNTSEL_ENABLE;
-       wrmsr(evntsel_msr, evntsel, 0);
-
-       wd->perfctr_msr = perfctr_msr;
-       wd->evntsel_msr = evntsel_msr;
-       wd->cccr_msr = 0;  //unused
-       return 1;
-}
-
-static void single_msr_stop_watchdog(void)
-{
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-
-       wrmsr(wd->evntsel_msr, 0, 0);
-}
-
-static int single_msr_reserve(void)
-{
-       if (!reserve_perfctr_nmi(wd_ops->perfctr))
-               return 0;
-
-       if (!reserve_evntsel_nmi(wd_ops->evntsel)) {
-               release_perfctr_nmi(wd_ops->perfctr);
-               return 0;
-       }
-       return 1;
-}
-
-static void single_msr_unreserve(void)
-{
-       release_evntsel_nmi(wd_ops->evntsel);
-       release_perfctr_nmi(wd_ops->perfctr);
-}
-
-static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
-{
-       /* start the cycle over again */
-       write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
-}
-
-static struct wd_ops k7_wd_ops = {
-       .reserve = single_msr_reserve,
-       .unreserve = single_msr_unreserve,
-       .setup = setup_k7_watchdog,
-       .rearm = single_msr_rearm,
-       .stop = single_msr_stop_watchdog,
-       .perfctr = MSR_K7_PERFCTR0,
-       .evntsel = MSR_K7_EVNTSEL0,
-       .checkbit = 1ULL<<47,
-};
-
-/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
-
-#define P6_EVNTSEL0_ENABLE     (1 << 22)
-#define P6_EVNTSEL_INT         (1 << 20)
-#define P6_EVNTSEL_OS          (1 << 17)
-#define P6_EVNTSEL_USR         (1 << 16)
-#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
-#define P6_NMI_EVENT           P6_EVENT_CPU_CLOCKS_NOT_HALTED
-
-static int setup_p6_watchdog(unsigned nmi_hz)
-{
-       unsigned int perfctr_msr, evntsel_msr;
-       unsigned int evntsel;
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-
-       perfctr_msr = wd_ops->perfctr;
-       evntsel_msr = wd_ops->evntsel;
-
-       /* KVM doesn't implement this MSR */
-       if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
-               return 0;
-
-       evntsel = P6_EVNTSEL_INT
-               | P6_EVNTSEL_OS
-               | P6_EVNTSEL_USR
-               | P6_NMI_EVENT;
-
-       /* setup the timer */
-       wrmsr(evntsel_msr, evntsel, 0);
-       nmi_hz = adjust_for_32bit_ctr(nmi_hz);
-       write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= P6_EVNTSEL0_ENABLE;
-       wrmsr(evntsel_msr, evntsel, 0);
-
-       wd->perfctr_msr = perfctr_msr;
-       wd->evntsel_msr = evntsel_msr;
-       wd->cccr_msr = 0;  //unused
-       return 1;
-}
-
-static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
-{
-       /* P6 based Pentium M need to re-unmask
-        * the apic vector but it doesn't hurt
-        * other P6 variant.
-        * ArchPerfom/Core Duo also needs this */
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /* P6/ARCH_PERFMON has 32 bit counter write */
-       write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
-}
-
-static struct wd_ops p6_wd_ops = {
-       .reserve = single_msr_reserve,
-       .unreserve = single_msr_unreserve,
-       .setup = setup_p6_watchdog,
-       .rearm = p6_rearm,
-       .stop = single_msr_stop_watchdog,
-       .perfctr = MSR_P6_PERFCTR0,
-       .evntsel = MSR_P6_EVNTSEL0,
-       .checkbit = 1ULL<<39,
-};
-
-/* Intel P4 performance counters. By far the most complicated of all. */
-
-#define MSR_P4_MISC_ENABLE_PERF_AVAIL  (1<<7)
-#define P4_ESCR_EVENT_SELECT(N)        ((N)<<25)
-#define P4_ESCR_OS             (1<<3)
-#define P4_ESCR_USR            (1<<2)
-#define P4_CCCR_OVF_PMI0       (1<<26)
-#define P4_CCCR_OVF_PMI1       (1<<27)
-#define P4_CCCR_THRESHOLD(N)   ((N)<<20)
-#define P4_CCCR_COMPLEMENT     (1<<19)
-#define P4_CCCR_COMPARE                (1<<18)
-#define P4_CCCR_REQUIRED       (3<<16)
-#define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
-#define P4_CCCR_ENABLE         (1<<12)
-#define P4_CCCR_OVF            (1<<31)
-
-/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
-   CRU_ESCR0 (with any non-null event selector) through a complemented
-   max threshold. [IA32-Vol3, Section 14.9.9] */
-
-static int setup_p4_watchdog(unsigned nmi_hz)
-{
-       unsigned int perfctr_msr, evntsel_msr, cccr_msr;
-       unsigned int evntsel, cccr_val;
-       unsigned int misc_enable, dummy;
-       unsigned int ht_num;
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-
-       rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
-       if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
-               return 0;
-
-#ifdef CONFIG_SMP
-       /* detect which hyperthread we are on */
-       if (smp_num_siblings == 2) {
-               unsigned int ebx, apicid;
-
-               ebx = cpuid_ebx(1);
-               apicid = (ebx >> 24) & 0xff;
-               ht_num = apicid & 1;
-       } else
-#endif
-               ht_num = 0;
-
-       /* performance counters are shared resources
-        * assign each hyperthread its own set
-        * (re-use the ESCR0 register, seems safe
-        * and keeps the cccr_val the same)
-        */
-       if (!ht_num) {
-               /* logical cpu 0 */
-               perfctr_msr = MSR_P4_IQ_PERFCTR0;
-               evntsel_msr = MSR_P4_CRU_ESCR0;
-               cccr_msr = MSR_P4_IQ_CCCR0;
-               cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
-       } else {
-               /* logical cpu 1 */
-               perfctr_msr = MSR_P4_IQ_PERFCTR1;
-               evntsel_msr = MSR_P4_CRU_ESCR0;
-               cccr_msr = MSR_P4_IQ_CCCR1;
-               cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
-       }
-
-       evntsel = P4_ESCR_EVENT_SELECT(0x3F)
-               | P4_ESCR_OS
-               | P4_ESCR_USR;
-
-       cccr_val |= P4_CCCR_THRESHOLD(15)
-                | P4_CCCR_COMPLEMENT
-                | P4_CCCR_COMPARE
-                | P4_CCCR_REQUIRED;
-
-       wrmsr(evntsel_msr, evntsel, 0);
-       wrmsr(cccr_msr, cccr_val, 0);
-       write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       cccr_val |= P4_CCCR_ENABLE;
-       wrmsr(cccr_msr, cccr_val, 0);
-       wd->perfctr_msr = perfctr_msr;
-       wd->evntsel_msr = evntsel_msr;
-       wd->cccr_msr = cccr_msr;
-       return 1;
-}
-
-static void stop_p4_watchdog(void)
-{
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-       wrmsr(wd->cccr_msr, 0, 0);
-       wrmsr(wd->evntsel_msr, 0, 0);
-}
-
-static int p4_reserve(void)
-{
-       if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0))
-               return 0;
-#ifdef CONFIG_SMP
-       if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1))
-               goto fail1;
-#endif
-       if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
-               goto fail2;
-       /* RED-PEN why is ESCR1 not reserved here? */
-       return 1;
- fail2:
-#ifdef CONFIG_SMP
-       if (smp_num_siblings > 1)
-               release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
- fail1:
-#endif
-       release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
-       return 0;
-}
-
-static void p4_unreserve(void)
-{
-#ifdef CONFIG_SMP
-       if (smp_num_siblings > 1)
-               release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
-#endif
-       release_evntsel_nmi(MSR_P4_CRU_ESCR0);
-       release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
-}
-
-static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
-{
-       unsigned dummy;
-       /*
-        * P4 quirks:
-        * - An overflown perfctr will assert its interrupt
-        *   until the OVF flag in its CCCR is cleared.
-        * - LVTPC is masked on interrupt and must be
-        *   unmasked by the LVTPC handler.
-        */
-       rdmsrl(wd->cccr_msr, dummy);
-       dummy &= ~P4_CCCR_OVF;
-       wrmsrl(wd->cccr_msr, dummy);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /* start the cycle over again */
-       write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
-}
-
-static struct wd_ops p4_wd_ops = {
-       .reserve = p4_reserve,
-       .unreserve = p4_unreserve,
-       .setup = setup_p4_watchdog,
-       .rearm = p4_rearm,
-       .stop = stop_p4_watchdog,
-       /* RED-PEN this is wrong for the other sibling */
-       .perfctr = MSR_P4_BPU_PERFCTR0,
-       .evntsel = MSR_P4_BSU_ESCR0,
-       .checkbit = 1ULL<<39,
-};
-
-/* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
-   all future Intel CPUs. */
-
-#define ARCH_PERFMON_NMI_EVENT_SEL     ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
-#define ARCH_PERFMON_NMI_EVENT_UMASK   ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
-
-static int setup_intel_arch_watchdog(unsigned nmi_hz)
-{
-       unsigned int ebx;
-       union cpuid10_eax eax;
-       unsigned int unused;
-       unsigned int perfctr_msr, evntsel_msr;
-       unsigned int evntsel;
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-
-       /*
-        * Check whether the Architectural PerfMon supports
-        * Unhalted Core Cycles Event or not.
-        * NOTE: Corresponding bit = 0 in ebx indicates event present.
-        */
-       cpuid(10, &(eax.full), &ebx, &unused, &unused);
-       if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
-           (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
-               return 0;
-
-       perfctr_msr = wd_ops->perfctr;
-       evntsel_msr = wd_ops->evntsel;
-
-       wrmsrl(perfctr_msr, 0UL);
-
-       evntsel = ARCH_PERFMON_EVENTSEL_INT
-               | ARCH_PERFMON_EVENTSEL_OS
-               | ARCH_PERFMON_EVENTSEL_USR
-               | ARCH_PERFMON_NMI_EVENT_SEL
-               | ARCH_PERFMON_NMI_EVENT_UMASK;
-
-       /* setup the timer */
-       wrmsr(evntsel_msr, evntsel, 0);
-       nmi_hz = adjust_for_32bit_ctr(nmi_hz);
-       write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-       wrmsr(evntsel_msr, evntsel, 0);
-
-       wd->perfctr_msr = perfctr_msr;
-       wd->evntsel_msr = evntsel_msr;
-       wd->cccr_msr = 0;  //unused
-       wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1);
-       return 1;
-}
-
-static struct wd_ops intel_arch_wd_ops = {
-       .reserve = single_msr_reserve,
-       .unreserve = single_msr_unreserve,
-       .setup = setup_intel_arch_watchdog,
-       .rearm = p6_rearm,
-       .stop = single_msr_stop_watchdog,
-       .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
-       .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
-};
-
-static struct wd_ops coreduo_wd_ops = {
-       .reserve = single_msr_reserve,
-       .unreserve = single_msr_unreserve,
-       .setup = setup_intel_arch_watchdog,
-       .rearm = p6_rearm,
-       .stop = single_msr_stop_watchdog,
-       .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
-       .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
-};
-
-static void probe_nmi_watchdog(void)
-{
-       switch (boot_cpu_data.x86_vendor) {
-       case X86_VENDOR_AMD:
-               if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
-                   boot_cpu_data.x86 != 16)
-                       return;
-               wd_ops = &k7_wd_ops;
-               break;
-       case X86_VENDOR_INTEL:
-               /* Work around Core Duo (Yonah) errata AE49 where perfctr1
-                  doesn't have a working enable bit. */
-               if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
-                       wd_ops = &coreduo_wd_ops;
-                       break;
-               }
-               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
-                       wd_ops = &intel_arch_wd_ops;
-                       break;
-               }
-               switch (boot_cpu_data.x86) {
-               case 6:
-                       if (boot_cpu_data.x86_model > 0xd)
-                               return;
-
-                       wd_ops = &p6_wd_ops;
-                       break;
-               case 15:
-                       if (boot_cpu_data.x86_model > 0x4)
-                               return;
-
-                       wd_ops = &p4_wd_ops;
-                       break;
-               default:
-                       return;
-               }
-               break;
-       }
-}
-
-/* Interface to nmi.c */
-
-int lapic_watchdog_init(unsigned nmi_hz)
-{
-       if (!wd_ops) {
-               probe_nmi_watchdog();
-               if (!wd_ops)
-                       return -1;
-
-               if (!wd_ops->reserve()) {
-                       printk(KERN_ERR
-                               "NMI watchdog: cannot reserve perfctrs\n");
-                       return -1;
-               }
-       }
-
-       if (!(wd_ops->setup(nmi_hz))) {
-               printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n",
-                      raw_smp_processor_id());
-               return -1;
-       }
-
-       return 0;
-}
-
-void lapic_watchdog_stop(void)
-{
-       if (wd_ops)
-               wd_ops->stop();
-}
-
-unsigned lapic_adjust_nmi_hz(unsigned hz)
-{
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-       if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
-           wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
-               hz = adjust_for_32bit_ctr(hz);
-       return hz;
-}
-
-int lapic_wd_event(unsigned nmi_hz)
-{
-       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
-       u64 ctr;
-       rdmsrl(wd->perfctr_msr, ctr);
-       if (ctr & wd_ops->checkbit) { /* perfctr still running? */
-               return 0;
-       }
-       wd_ops->rearm(wd, nmi_hz);
-       return 1;
-}
-
-int lapic_watchdog_ok(void)
-{
-       return wd_ops != NULL;
-}
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
deleted file mode 100644 (file)
index 1e31b6c..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-#include <linux/smp.h>
-#include <linux/timex.h>
-#include <linux/string.h>
-#include <asm/semaphore.h>
-#include <linux/seq_file.h>
-#include <linux/cpufreq.h>
-
-/*
- *     Get CPU information for use by the procfs.
- */
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
-       /* 
-        * These flag bits must match the definitions in <asm/cpufeature.h>.
-        * NULL means this bit is undefined or reserved; either way it doesn't
-        * have meaning as far as Linux is concerned.  Note that it's important
-        * to realize there is a difference between this table and CPUID -- if
-        * applications want to get the raw CPUID data, they should access
-        * /dev/cpu/<cpu_nr>/cpuid instead.
-        */
-       static const char * const x86_cap_flags[] = {
-               /* Intel-defined */
-               "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-               "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-               "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-               "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
-
-               /* AMD-defined */
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
-               NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
-               "3dnowext", "3dnow",
-
-               /* Transmeta-defined */
-               "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
-               /* Other (Linux-defined) */
-               "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
-               NULL, NULL, NULL, NULL,
-               "constant_tsc", "up", NULL, "arch_perfmon",
-               "pebs", "bts", NULL, "sync_rdtsc",
-               "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
-               /* Intel-defined (#2) */
-               "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
-               "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
-               NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
-               /* VIA/Cyrix/Centaur-defined */
-               NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
-               "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
-               /* AMD-defined (#2) */
-               "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
-               "altmovcr8", "abm", "sse4a",
-               "misalignsse", "3dnowprefetch",
-               "osvw", "ibs", NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-
-               /* Auxiliary (Linux-defined) */
-               "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-       };
-       static const char * const x86_power_flags[] = {
-               "ts",   /* temperature sensor */
-               "fid",  /* frequency id control */
-               "vid",  /* voltage id control */
-               "ttp",  /* thermal trip */
-               "tm",
-               "stc",
-               "100mhzsteps",
-               "hwpstate",
-               "",     /* constant_tsc - moved to flags */
-               /* nothing */
-       };
-       struct cpuinfo_x86 *c = v;
-       int i, n = c - cpu_data;
-       int fpu_exception;
-
-#ifdef CONFIG_SMP
-       if (!cpu_online(n))
-               return 0;
-#endif
-       seq_printf(m, "processor\t: %d\n"
-               "vendor_id\t: %s\n"
-               "cpu family\t: %d\n"
-               "model\t\t: %d\n"
-               "model name\t: %s\n",
-               n,
-               c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-               c->x86,
-               c->x86_model,
-               c->x86_model_id[0] ? c->x86_model_id : "unknown");
-
-       if (c->x86_mask || c->cpuid_level >= 0)
-               seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-       else
-               seq_printf(m, "stepping\t: unknown\n");
-
-       if ( cpu_has(c, X86_FEATURE_TSC) ) {
-               unsigned int freq = cpufreq_quick_get(n);
-               if (!freq)
-                       freq = cpu_khz;
-               seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-                       freq / 1000, (freq % 1000));
-       }
-
-       /* Cache size */
-       if (c->x86_cache_size >= 0)
-               seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-#ifdef CONFIG_X86_HT
-       if (c->x86_max_cores * smp_num_siblings > 1) {
-               seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
-               seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-               seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-       }
-#endif
-       
-       /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
-       fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
-       seq_printf(m, "fdiv_bug\t: %s\n"
-                       "hlt_bug\t\t: %s\n"
-                       "f00f_bug\t: %s\n"
-                       "coma_bug\t: %s\n"
-                       "fpu\t\t: %s\n"
-                       "fpu_exception\t: %s\n"
-                       "cpuid level\t: %d\n"
-                       "wp\t\t: %s\n"
-                       "flags\t\t:",
-                    c->fdiv_bug ? "yes" : "no",
-                    c->hlt_works_ok ? "no" : "yes",
-                    c->f00f_bug ? "yes" : "no",
-                    c->coma_bug ? "yes" : "no",
-                    c->hard_math ? "yes" : "no",
-                    fpu_exception ? "yes" : "no",
-                    c->cpuid_level,
-                    c->wp_works_ok ? "yes" : "no");
-
-       for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-               if ( test_bit(i, c->x86_capability) &&
-                    x86_cap_flags[i] != NULL )
-                       seq_printf(m, " %s", x86_cap_flags[i]);
-
-       for (i = 0; i < 32; i++)
-               if (c->x86_power & (1 << i)) {
-                       if (i < ARRAY_SIZE(x86_power_flags) &&
-                           x86_power_flags[i])
-                               seq_printf(m, "%s%s",
-                                          x86_power_flags[i][0]?" ":"",
-                                          x86_power_flags[i]);
-                       else
-                               seq_printf(m, " [%d]", i);
-               }
-
-       seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-                    c->loops_per_jiffy/(500000/HZ),
-                    (c->loops_per_jiffy/(5000/HZ)) % 100);
-       seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
-
-       return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-       return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return c_start(m, pos);
-}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-struct seq_operations cpuinfo_op = {
-       .start  = c_start,
-       .next   = c_next,
-       .stop   = c_stop,
-       .show   = show_cpuinfo,
-};
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
deleted file mode 100644 (file)
index 200fb3f..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-#include "cpu.h"
-
-static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
-{
-       unsigned int cap_mask, uk, max, dummy;
-       unsigned int cms_rev1, cms_rev2;
-       unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
-       char cpu_info[65];
-
-       get_model_name(c);      /* Same as AMD/Cyrix */
-       display_cacheinfo(c);
-
-       /* Print CMS and CPU revision */
-       max = cpuid_eax(0x80860000);
-       cpu_rev = 0;
-       if ( max >= 0x80860001 ) {
-               cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
-               if (cpu_rev != 0x02000000) {
-                       printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
-                               (cpu_rev >> 24) & 0xff,
-                               (cpu_rev >> 16) & 0xff,
-                               (cpu_rev >> 8) & 0xff,
-                               cpu_rev & 0xff,
-                               cpu_freq);
-               }
-       }
-       if ( max >= 0x80860002 ) {
-               cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
-               if (cpu_rev == 0x02000000) {
-                       printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
-                               new_cpu_rev, cpu_freq);
-               }
-               printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
-                      (cms_rev1 >> 24) & 0xff,
-                      (cms_rev1 >> 16) & 0xff,
-                      (cms_rev1 >> 8) & 0xff,
-                      cms_rev1 & 0xff,
-                      cms_rev2);
-       }
-       if ( max >= 0x80860006 ) {
-               cpuid(0x80860003,
-                     (void *)&cpu_info[0],
-                     (void *)&cpu_info[4],
-                     (void *)&cpu_info[8],
-                     (void *)&cpu_info[12]);
-               cpuid(0x80860004,
-                     (void *)&cpu_info[16],
-                     (void *)&cpu_info[20],
-                     (void *)&cpu_info[24],
-                     (void *)&cpu_info[28]);
-               cpuid(0x80860005,
-                     (void *)&cpu_info[32],
-                     (void *)&cpu_info[36],
-                     (void *)&cpu_info[40],
-                     (void *)&cpu_info[44]);
-               cpuid(0x80860006,
-                     (void *)&cpu_info[48],
-                     (void *)&cpu_info[52],
-                     (void *)&cpu_info[56],
-                     (void *)&cpu_info[60]);
-               cpu_info[64] = '\0';
-               printk(KERN_INFO "CPU: %s\n", cpu_info);
-       }
-
-       /* Unhide possibly hidden capability flags */
-       rdmsr(0x80860004, cap_mask, uk);
-       wrmsr(0x80860004, ~0, uk);
-       c->x86_capability[0] = cpuid_edx(0x00000001);
-       wrmsr(0x80860004, cap_mask, uk);
-
-       /* All Transmeta CPUs have a constant TSC */
-       set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-       
-       /* If we can run i686 user-space code, call us an i686 */
-#define USER686 ((1 << X86_FEATURE_TSC)|\
-                (1 << X86_FEATURE_CX8)|\
-                (1 << X86_FEATURE_CMOV))
-        if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686)
-               c->x86 = 6;
-
-#ifdef CONFIG_SYSCTL
-       /* randomize_va_space slows us down enormously;
-          it probably triggers retranslation of x86->native bytecode */
-       randomize_va_space = 0;
-#endif
-}
-
-static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c)
-{
-       u32 xlvl;
-
-       /* Transmeta-defined flags: level 0x80860001 */
-       xlvl = cpuid_eax(0x80860000);
-       if ( (xlvl & 0xffff0000) == 0x80860000 ) {
-               if (  xlvl >= 0x80860001 )
-                       c->x86_capability[2] = cpuid_edx(0x80860001);
-       }
-}
-
-static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
-       .c_vendor       = "Transmeta",
-       .c_ident        = { "GenuineTMx86", "TransmetaCPU" },
-       .c_init         = init_transmeta,
-       .c_identify     = transmeta_identify,
-};
-
-int __init transmeta_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
-       return 0;
-}
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
deleted file mode 100644 (file)
index a7a4e75..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/processor.h>
-#include "cpu.h"
-
-/* UMC chips appear to be only either 386 or 486, so no special init takes place.
- */
-
-static struct cpu_dev umc_cpu_dev __cpuinitdata = {
-       .c_vendor       = "UMC",
-       .c_ident        = { "UMC UMC UMC" },
-       .c_models = {
-               { .vendor = X86_VENDOR_UMC, .family = 4, .model_names =
-                 { 
-                         [1] = "U5D", 
-                         [2] = "U5S", 
-                 }
-               },
-       },
-};
-
-int __init umc_init_cpu(void)
-{
-       cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
-       return 0;
-}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
new file mode 100644 (file)
index 0000000..6687f6d
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Makefile for x86-compatible CPU details and quirks
+#
+
+obj-y  :=      common.o proc.o bugs.o
+
+obj-y  +=      amd.o
+obj-y  +=      cyrix.o
+obj-y  +=      centaur.o
+obj-y  +=      transmeta.o
+obj-y  +=      intel.o intel_cacheinfo.o addon_cpuid_features.o
+obj-y  +=      nexgen.o
+obj-y  +=      umc.o
+
+obj-$(CONFIG_X86_MCE)  +=      ../../../x86/kernel/cpu/mcheck/
+
+obj-$(CONFIG_MTRR)     +=      ../../../x86/kernel/cpu/mtrr/
+obj-$(CONFIG_CPU_FREQ) +=      ../../../x86/kernel/cpu/cpufreq/
+
+obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
new file mode 100644 (file)
index 0000000..3e91d3e
--- /dev/null
@@ -0,0 +1,50 @@
+
+/*
+ *     Routines to indentify additional cpu features that are scattered in
+ *     cpuid space.
+ */
+
+#include <linux/cpu.h>
+
+#include <asm/processor.h>
+
+struct cpuid_bit {
+       u16 feature;
+       u8 reg;
+       u8 bit;
+       u32 level;
+};
+
+enum cpuid_regs {
+       CR_EAX = 0,
+       CR_ECX,
+       CR_EDX,
+       CR_EBX
+};
+
+void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
+{
+       u32 max_level;
+       u32 regs[4];
+       const struct cpuid_bit *cb;
+
+       static const struct cpuid_bit cpuid_bits[] = {
+               { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
+               { 0, 0, 0, 0 }
+       };
+
+       for (cb = cpuid_bits; cb->feature; cb++) {
+
+               /* Verify that the level is valid */
+               max_level = cpuid_eax(cb->level & 0xffff0000);
+               if (max_level < cb->level ||
+                   max_level > (cb->level | 0xffff))
+                       continue;
+
+               cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
+                       &regs[CR_ECX], &regs[CR_EDX]);
+
+               if (regs[cb->reg] & (1 << cb->bit))
+                       set_bit(cb->feature, c->x86_capability);
+       }
+}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
new file mode 100644 (file)
index 0000000..dcf6bbb
--- /dev/null
@@ -0,0 +1,337 @@
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/apic.h>
+
+#include "cpu.h"
+
+/*
+ *     B step AMD K6 before B 9730xxxx have hardware bugs that can cause
+ *     misexecution of code under Linux. Owners of such processors should
+ *     contact AMD for precise details and a CPU swap.
+ *
+ *     See     http://www.multimania.com/poulot/k6bug.html
+ *             http://www.amd.com/K6/k6docs/revgd.html
+ *
+ *     The following test is erm.. interesting. AMD neglected to up
+ *     the chip setting when fixing the bug but they also tweaked some
+ *     performance at the same time..
+ */
+extern void vide(void);
+__asm__(".align 4\nvide: ret");
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#define ENABLE_C1E_MASK         0x18000000
+#define CPUID_PROCESSOR_SIGNATURE       1
+#define CPUID_XFAM              0x0ff00000
+#define CPUID_XFAM_K8           0x00000000
+#define CPUID_XFAM_10H          0x00100000
+#define CPUID_XFAM_11H          0x00200000
+#define CPUID_XMOD              0x000f0000
+#define CPUID_XMOD_REV_F        0x00040000
+
+/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
+static __cpuinit int amd_apic_timer_broken(void)
+{
+       u32 lo, hi;
+       u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+       switch (eax & CPUID_XFAM) {
+       case CPUID_XFAM_K8:
+               if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
+                       break;
+       case CPUID_XFAM_10H:
+       case CPUID_XFAM_11H:
+               rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
+               if (lo & ENABLE_C1E_MASK)
+                       return 1;
+                break;
+        default:
+                /* err on the side of caution */
+               return 1;
+        }
+       return 0;
+}
+#endif
+
+int force_mwait __cpuinitdata;
+
+static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+{
+       u32 l, h;
+       int mbytes = num_physpages >> (20-PAGE_SHIFT);
+       int r;
+
+#ifdef CONFIG_SMP
+       unsigned long long value;
+
+       /* Disable TLB flush filter by setting HWCR.FFDIS on K8
+        * bit 6 of msr C001_0015
+        *
+        * Errata 63 for SH-B3 steppings
+        * Errata 122 for all steppings (F+ have it disabled by default)
+        */
+       if (c->x86 == 15) {
+               rdmsrl(MSR_K7_HWCR, value);
+               value |= 1 << 6;
+               wrmsrl(MSR_K7_HWCR, value);
+       }
+#endif
+
+       /*
+        *      FIXME: We should handle the K5 here. Set up the write
+        *      range and also turn on MSR 83 bits 4 and 31 (write alloc,
+        *      no bus pipeline)
+        */
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_bit(0*32+31, c->x86_capability);
+       
+       r = get_model_name(c);
+
+       switch(c->x86)
+       {
+               case 4:
+               /*
+                * General Systems BIOSen alias the cpu frequency registers
+                * of the Elan at 0x000df000. Unfortuantly, one of the Linux
+                * drivers subsequently pokes it, and changes the CPU speed.
+                * Workaround : Remove the unneeded alias.
+                */
+#define CBAR           (0xfffc) /* Configuration Base Address  (32-bit) */
+#define CBAR_ENB       (0x80000000)
+#define CBAR_KEY       (0X000000CB)
+                       if (c->x86_model==9 || c->x86_model == 10) {
+                               if (inl (CBAR) & CBAR_ENB)
+                                       outl (0 | CBAR_KEY, CBAR);
+                       }
+                       break;
+               case 5:
+                       if( c->x86_model < 6 )
+                       {
+                               /* Based on AMD doc 20734R - June 2000 */
+                               if ( c->x86_model == 0 ) {
+                                       clear_bit(X86_FEATURE_APIC, c->x86_capability);
+                                       set_bit(X86_FEATURE_PGE, c->x86_capability);
+                               }
+                               break;
+                       }
+                       
+                       if ( c->x86_model == 6 && c->x86_mask == 1 ) {
+                               const int K6_BUG_LOOP = 1000000;
+                               int n;
+                               void (*f_vide)(void);
+                               unsigned long d, d2;
+                               
+                               printk(KERN_INFO "AMD K6 stepping B detected - ");
+                               
+                               /*
+                                * It looks like AMD fixed the 2.6.2 bug and improved indirect 
+                                * calls at the same time.
+                                */
+
+                               n = K6_BUG_LOOP;
+                               f_vide = vide;
+                               rdtscl(d);
+                               while (n--) 
+                                       f_vide();
+                               rdtscl(d2);
+                               d = d2-d;
+
+                               if (d > 20*K6_BUG_LOOP) 
+                                       printk("system stability may be impaired when more than 32 MB are used.\n");
+                               else 
+                                       printk("probably OK (after B9730xxxx).\n");
+                               printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
+                       }
+
+                       /* K6 with old style WHCR */
+                       if (c->x86_model < 8 ||
+                          (c->x86_model== 8 && c->x86_mask < 8)) {
+                               /* We can only write allocate on the low 508Mb */
+                               if(mbytes>508)
+                                       mbytes=508;
+
+                               rdmsr(MSR_K6_WHCR, l, h);
+                               if ((l&0x0000FFFF)==0) {
+                                       unsigned long flags;
+                                       l=(1<<0)|((mbytes/4)<<1);
+                                       local_irq_save(flags);
+                                       wbinvd();
+                                       wrmsr(MSR_K6_WHCR, l, h);
+                                       local_irq_restore(flags);
+                                       printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
+                                               mbytes);
+                               }
+                               break;
+                       }
+
+                       if ((c->x86_model == 8 && c->x86_mask >7) ||
+                            c->x86_model == 9 || c->x86_model == 13) {
+                               /* The more serious chips .. */
+
+                               if(mbytes>4092)
+                                       mbytes=4092;
+
+                               rdmsr(MSR_K6_WHCR, l, h);
+                               if ((l&0xFFFF0000)==0) {
+                                       unsigned long flags;
+                                       l=((mbytes>>2)<<22)|(1<<16);
+                                       local_irq_save(flags);
+                                       wbinvd();
+                                       wrmsr(MSR_K6_WHCR, l, h);
+                                       local_irq_restore(flags);
+                                       printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
+                                               mbytes);
+                               }
+
+                               /*  Set MTRR capability flag if appropriate */
+                               if (c->x86_model == 13 || c->x86_model == 9 ||
+                                  (c->x86_model == 8 && c->x86_mask >= 8))
+                                       set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
+                               break;
+                       }
+
+                       if (c->x86_model == 10) {
+                               /* AMD Geode LX is model 10 */
+                               /* placeholder for any needed mods */
+                               break;
+                       }
+                       break;
+               case 6: /* An Athlon/Duron */
+                       /* Bit 15 of Athlon specific MSR 15, needs to be 0
+                        * to enable SSE on Palomino/Morgan/Barton CPU's.
+                        * If the BIOS didn't enable it already, enable it here.
+                        */
+                       if (c->x86_model >= 6 && c->x86_model <= 10) {
+                               if (!cpu_has(c, X86_FEATURE_XMM)) {
+                                       printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
+                                       rdmsr(MSR_K7_HWCR, l, h);
+                                       l &= ~0x00008000;
+                                       wrmsr(MSR_K7_HWCR, l, h);
+                                       set_bit(X86_FEATURE_XMM, c->x86_capability);
+                               }
+                       }
+
+                       /* It's been determined by AMD that Athlons since model 8 stepping 1
+                        * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
+                        * As per AMD technical note 27212 0.2
+                        */
+                       if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
+                               rdmsr(MSR_K7_CLK_CTL, l, h);
+                               if ((l & 0xfff00000) != 0x20000000) {
+                                       printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
+                                               ((l & 0x000fffff)|0x20000000));
+                                       wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
+                               }
+                       }
+                       break;
+       }
+
+       switch (c->x86) {
+       case 15:
+       /* Use K8 tuning for Fam10h and Fam11h */
+       case 0x10:
+       case 0x11:
+               set_bit(X86_FEATURE_K8, c->x86_capability);
+               break;
+       case 6:
+               set_bit(X86_FEATURE_K7, c->x86_capability); 
+               break;
+       }
+       if (c->x86 >= 6)
+               set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
+
+       display_cacheinfo(c);
+
+       if (cpuid_eax(0x80000000) >= 0x80000008) {
+               c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+       }
+
+       if (cpuid_eax(0x80000000) >= 0x80000007) {
+               c->x86_power = cpuid_edx(0x80000007);
+               if (c->x86_power & (1<<8))
+                       set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+       }
+
+#ifdef CONFIG_X86_HT
+       /*
+        * On a AMD multi core setup the lower bits of the APIC id
+        * distingush the cores.
+        */
+       if (c->x86_max_cores > 1) {
+               int cpu = smp_processor_id();
+               unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
+
+               if (bits == 0) {
+                       while ((1 << bits) < c->x86_max_cores)
+                               bits++;
+               }
+               c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
+               c->phys_proc_id >>= bits;
+               printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
+                      cpu, c->x86_max_cores, c->cpu_core_id);
+       }
+#endif
+
+       if (cpuid_eax(0x80000000) >= 0x80000006) {
+               if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
+                       num_cache_leaves = 4;
+               else
+                       num_cache_leaves = 3;
+       }
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       if (amd_apic_timer_broken())
+               local_apic_timer_disabled = 1;
+#endif
+
+       if (c->x86 == 0x10 && !force_mwait)
+               clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
+
+       /* K6s reports MCEs but don't actually have all the MSRs */
+       if (c->x86 < 6)
+               clear_bit(X86_FEATURE_MCE, c->x86_capability);
+}
+
+static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+{
+       /* AMD errata T13 (order #21922) */
+       if ((c->x86 == 6)) {
+               if (c->x86_model == 3 && c->x86_mask == 0)      /* Duron Rev A0 */
+                       size = 64;
+               if (c->x86_model == 4 &&
+                   (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */
+                       size = 256;
+       }
+       return size;
+}
+
+static struct cpu_dev amd_cpu_dev __cpuinitdata = {
+       .c_vendor       = "AMD",
+       .c_ident        = { "AuthenticAMD" },
+       .c_models = {
+               { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
+                 {
+                         [3] = "486 DX/2",
+                         [7] = "486 DX/2-WB",
+                         [8] = "486 DX/4", 
+                         [9] = "486 DX/4-WB", 
+                         [14] = "Am5x86-WT",
+                         [15] = "Am5x86-WB" 
+                 }
+               },
+       },
+       .c_init         = init_amd,
+       .c_size_cache   = amd_size_cache,
+};
+
+int __init amd_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
+       return 0;
+}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
new file mode 100644 (file)
index 0000000..59266f0
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ *  arch/i386/cpu/bugs.c
+ *
+ *  Copyright (C) 1994  Linus Torvalds
+ *
+ *  Cyrix stuff, June 1998 by:
+ *     - Rafael R. Reilova (moved everything from head.S),
+ *        <rreilova@ececs.uc.edu>
+ *     - Channing Corn (tests & fixes),
+ *     - Andrew D. Balsa (code cleanup).
+ */
+#include <linux/init.h>
+#include <linux/utsname.h>
+#include <asm/bugs.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/paravirt.h>
+#include <asm/alternative.h>
+
+static int __init no_halt(char *s)
+{
+       boot_cpu_data.hlt_works_ok = 0;
+       return 1;
+}
+
+__setup("no-hlt", no_halt);
+
+static int __init mca_pentium(char *s)
+{
+       mca_pentium_flag = 1;
+       return 1;
+}
+
+__setup("mca-pentium", mca_pentium);
+
+static int __init no_387(char *s)
+{
+       boot_cpu_data.hard_math = 0;
+       write_cr0(0xE | read_cr0());
+       return 1;
+}
+
+__setup("no387", no_387);
+
+static double __initdata x = 4195835.0;
+static double __initdata y = 3145727.0;
+
+/*
+ * This used to check for exceptions..
+ * However, it turns out that to support that,
+ * the XMM trap handlers basically had to
+ * be buggy. So let's have a correct XMM trap
+ * handler, and forget about printing out
+ * some status at boot.
+ *
+ * We should really only care about bugs here
+ * anyway. Not features.
+ */
+static void __init check_fpu(void)
+{
+       if (!boot_cpu_data.hard_math) {
+#ifndef CONFIG_MATH_EMULATION
+               printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
+               printk(KERN_EMERG "Giving up.\n");
+               for (;;) ;
+#endif
+               return;
+       }
+
+/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */
+       /* Test for the divl bug.. */
+       __asm__("fninit\n\t"
+               "fldl %1\n\t"
+               "fdivl %2\n\t"
+               "fmull %2\n\t"
+               "fldl %1\n\t"
+               "fsubp %%st,%%st(1)\n\t"
+               "fistpl %0\n\t"
+               "fwait\n\t"
+               "fninit"
+               : "=m" (*&boot_cpu_data.fdiv_bug)
+               : "m" (*&x), "m" (*&y));
+       if (boot_cpu_data.fdiv_bug)
+               printk("Hmm, FPU with FDIV bug.\n");
+}
+
+static void __init check_hlt(void)
+{
+       if (paravirt_enabled())
+               return;
+
+       printk(KERN_INFO "Checking 'hlt' instruction... ");
+       if (!boot_cpu_data.hlt_works_ok) {
+               printk("disabled\n");
+               return;
+       }
+       halt();
+       halt();
+       halt();
+       halt();
+       printk("OK.\n");
+}
+
+/*
+ *     Most 386 processors have a bug where a POPAD can lock the
+ *     machine even from user space.
+ */
+
+static void __init check_popad(void)
+{
+#ifndef CONFIG_X86_POPAD_OK
+       int res, inp = (int) &res;
+
+       printk(KERN_INFO "Checking for popad bug... ");
+       __asm__ __volatile__(
+         "movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
+         : "=&a" (res)
+         : "d" (inp)
+         : "ecx", "edi" );
+       /* If this fails, it means that any user program may lock the CPU hard. Too bad. */
+       if (res != 12345678) printk( "Buggy.\n" );
+                       else printk( "OK.\n" );
+#endif
+}
+
+/*
+ * Check whether we are able to run this kernel safely on SMP.
+ *
+ * - In order to run on a i386, we need to be compiled for i386
+ *   (for due to lack of "invlpg" and working WP on a i386)
+ * - In order to run on anything without a TSC, we need to be
+ *   compiled for a i486.
+ * - In order to support the local APIC on a buggy Pentium machine,
+ *   we need to be compiled with CONFIG_X86_GOOD_APIC disabled,
+ *   which happens implicitly if compiled for a Pentium or lower
+ *   (unless an advanced selection of CPU features is used) as an
+ *   otherwise config implies a properly working local APIC without
+ *   the need to do extra reads from the APIC.
+*/
+
+static void __init check_config(void)
+{
+/*
+ * We'd better not be a i386 if we're configured to use some
+ * i486+ only features! (WP works in supervisor mode and the
+ * new "invlpg" and "bswap" instructions)
+ */
+#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
+       if (boot_cpu_data.x86 == 3)
+               panic("Kernel requires i486+ for 'invlpg' and other features");
+#endif
+
+/*
+ * If we configured ourselves for a TSC, we'd better have one!
+ */
+#ifdef CONFIG_X86_TSC
+       if (!cpu_has_tsc && !tsc_disable)
+               panic("Kernel compiled for Pentium+, requires TSC feature!");
+#endif
+
+/*
+ * If we were told we had a good local APIC, check for buggy Pentia,
+ * i.e. all B steppings and the C2 stepping of P54C when using their
+ * integrated APIC (see 11AP erratum in "Pentium Processor
+ * Specification Update").
+ */
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
+           && cpu_has_apic
+           && boot_cpu_data.x86 == 5
+           && boot_cpu_data.x86_model == 2
+           && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
+               panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
+#endif
+}
+
+
+void __init check_bugs(void)
+{
+       identify_boot_cpu();
+#ifndef CONFIG_SMP
+       printk("CPU: ");
+       print_cpu_info(&boot_cpu_data);
+#endif
+       check_config();
+       check_fpu();
+       check_hlt();
+       check_popad();
+       init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+       alternative_instructions();
+}
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
new file mode 100644 (file)
index 0000000..473eac8
--- /dev/null
@@ -0,0 +1,471 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/e820.h>
+#include <asm/mtrr.h>
+#include "cpu.h"
+
+#ifdef CONFIG_X86_OOSTORE
+
+static u32 __cpuinit power2(u32 x)
+{
+       u32 s=1;
+       while(s<=x)
+               s<<=1;
+       return s>>=1;
+}
+
+
+/*
+ *     Set up an actual MCR
+ */
+static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
+{
+       u32 lo, hi;
+       
+       hi = base & ~0xFFF;
+       lo = ~(size-1);         /* Size is a power of 2 so this makes a mask */
+       lo &= ~0xFFF;           /* Remove the ctrl value bits */
+       lo |= key;              /* Attribute we wish to set */
+       wrmsr(reg+MSR_IDT_MCR0, lo, hi);
+       mtrr_centaur_report_mcr(reg, lo, hi);   /* Tell the mtrr driver */
+}
+
+/*
+ *     Figure what we can cover with MCR's
+ *
+ *     Shortcut: We know you can't put 4Gig of RAM on a winchip
+ */
+
+static u32 __cpuinit ramtop(void)              /* 16388 */
+{
+       int i;
+       u32 top = 0;
+       u32 clip = 0xFFFFFFFFUL;
+       
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long start, end;
+
+               if (e820.map[i].addr > 0xFFFFFFFFUL)
+                       continue;
+               /*
+                *      Don't MCR over reserved space. Ignore the ISA hole
+                *      we frob around that catastrophy already
+                */
+                                       
+               if (e820.map[i].type == E820_RESERVED)
+               {
+                       if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
+                               clip = e820.map[i].addr;
+                       continue;
+               }
+               start = e820.map[i].addr;
+               end = e820.map[i].addr + e820.map[i].size;
+               if (start >= end)
+                       continue;
+               if (end > top)
+                       top = end;
+       }
+       /* Everything below 'top' should be RAM except for the ISA hole.
+          Because of the limited MCR's we want to map NV/ACPI into our
+          MCR range for gunk in RAM 
+          
+          Clip might cause us to MCR insufficient RAM but that is an
+          acceptable failure mode and should only bite obscure boxes with
+          a VESA hole at 15Mb
+          
+          The second case Clip sometimes kicks in is when the EBDA is marked
+          as reserved. Again we fail safe with reasonable results
+       */
+       
+       if(top>clip)
+               top=clip;
+               
+       return top;
+}
+
+/*
+ *     Compute a set of MCR's to give maximum coverage
+ */
+
+static int __cpuinit centaur_mcr_compute(int nr, int key)
+{
+       u32 mem = ramtop();
+       u32 root = power2(mem);
+       u32 base = root;
+       u32 top = root;
+       u32 floor = 0;
+       int ct = 0;
+       
+       while(ct<nr)
+       {
+               u32 fspace = 0;
+
+               /*
+                *      Find the largest block we will fill going upwards
+                */
+
+               u32 high = power2(mem-top);     
+
+               /*
+                *      Find the largest block we will fill going downwards
+                */
+
+               u32 low = base/2;
+
+               /*
+                *      Don't fill below 1Mb going downwards as there
+                *      is an ISA hole in the way.
+                */             
+                
+               if(base <= 1024*1024)
+                       low = 0;
+                       
+               /*
+                *      See how much space we could cover by filling below
+                *      the ISA hole
+                */
+                
+               if(floor == 0)
+                       fspace = 512*1024;
+               else if(floor ==512*1024)
+                       fspace = 128*1024;
+
+               /* And forget ROM space */
+               
+               /*
+                *      Now install the largest coverage we get
+                */
+                
+               if(fspace > high && fspace > low)
+               {
+                       centaur_mcr_insert(ct, floor, fspace, key);
+                       floor += fspace;
+               }
+               else if(high > low)
+               {
+                       centaur_mcr_insert(ct, top, high, key);
+                       top += high;
+               }
+               else if(low > 0)
+               {
+                       base -= low;
+                       centaur_mcr_insert(ct, base, low, key);
+               }
+               else break;
+               ct++;
+       }
+       /*
+        *      We loaded ct values. We now need to set the mask. The caller
+        *      must do this bit.
+        */
+        
+       return ct;
+}
+
+static void __cpuinit centaur_create_optimal_mcr(void)
+{
+       int i;
+       /*
+        *      Allocate up to 6 mcrs to mark as much of ram as possible
+        *      as write combining and weak write ordered.
+        *
+        *      To experiment with: Linux never uses stack operations for 
+        *      mmio spaces so we could globally enable stack operation wc
+        *
+        *      Load the registers with type 31 - full write combining, all
+        *      writes weakly ordered.
+        */
+       int used = centaur_mcr_compute(6, 31);
+
+       /*
+        *      Wipe unused MCRs
+        */
+        
+       for(i=used;i<8;i++)
+               wrmsr(MSR_IDT_MCR0+i, 0, 0);
+}
+
+static void __cpuinit winchip2_create_optimal_mcr(void)
+{
+       u32 lo, hi;
+       int i;
+
+       /*
+        *      Allocate up to 6 mcrs to mark as much of ram as possible
+        *      as write combining, weak store ordered.
+        *
+        *      Load the registers with type 25
+        *              8       -       weak write ordering
+        *              16      -       weak read ordering
+        *              1       -       write combining
+        */
+
+       int used = centaur_mcr_compute(6, 25);
+       
+       /*
+        *      Mark the registers we are using.
+        */
+        
+       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       for(i=0;i<used;i++)
+               lo|=1<<(9+i);
+       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       
+       /*
+        *      Wipe unused MCRs
+        */
+        
+       for(i=used;i<8;i++)
+               wrmsr(MSR_IDT_MCR0+i, 0, 0);
+}
+
+/*
+ *     Handle the MCR key on the Winchip 2.
+ */
+
+static void __cpuinit winchip2_unprotect_mcr(void)
+{
+       u32 lo, hi;
+       u32 key;
+       
+       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       lo&=~0x1C0;     /* blank bits 8-6 */
+       key = (lo>>17) & 7;
+       lo |= key<<6;   /* replace with unlock key */
+       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+}
+
+static void __cpuinit winchip2_protect_mcr(void)
+{
+       u32 lo, hi;
+       
+       rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+       lo&=~0x1C0;     /* blank bits 8-6 */
+       wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+}
+#endif /* CONFIG_X86_OOSTORE */
+
+#define ACE_PRESENT    (1 << 6)
+#define ACE_ENABLED    (1 << 7)
+#define ACE_FCR                (1 << 28)       /* MSR_VIA_FCR */
+
+#define RNG_PRESENT    (1 << 2)
+#define RNG_ENABLED    (1 << 3)
+#define RNG_ENABLE     (1 << 6)        /* MSR_VIA_RNG */
+
+static void __cpuinit init_c3(struct cpuinfo_x86 *c)
+{
+       u32  lo, hi;
+
+       /* Test for Centaur Extended Feature Flags presence */
+       if (cpuid_eax(0xC0000000) >= 0xC0000001) {
+               u32 tmp = cpuid_edx(0xC0000001);
+
+               /* enable ACE unit, if present and disabled */
+               if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
+                       rdmsr (MSR_VIA_FCR, lo, hi);
+                       lo |= ACE_FCR;          /* enable ACE unit */
+                       wrmsr (MSR_VIA_FCR, lo, hi);
+                       printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
+               }
+
+               /* enable RNG unit, if present and disabled */
+               if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
+                       rdmsr (MSR_VIA_RNG, lo, hi);
+                       lo |= RNG_ENABLE;       /* enable RNG unit */
+                       wrmsr (MSR_VIA_RNG, lo, hi);
+                       printk(KERN_INFO "CPU: Enabled h/w RNG\n");
+               }
+
+               /* store Centaur Extended Feature Flags as
+                * word 5 of the CPU capability bit array
+                */
+               c->x86_capability[5] = cpuid_edx(0xC0000001);
+       }
+
+       /* Cyrix III family needs CX8 & PGE explicity enabled. */
+       if (c->x86_model >=6 && c->x86_model <= 9) {
+               rdmsr (MSR_VIA_FCR, lo, hi);
+               lo |= (1<<1 | 1<<7);
+               wrmsr (MSR_VIA_FCR, lo, hi);
+               set_bit(X86_FEATURE_CX8, c->x86_capability);
+       }
+
+       /* Before Nehemiah, the C3's had 3dNOW! */
+       if (c->x86_model >=6 && c->x86_model <9)
+               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
+
+       get_model_name(c);
+       display_cacheinfo(c);
+}
+
+static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+{
+       enum {
+               ECX8=1<<1,
+               EIERRINT=1<<2,
+               DPM=1<<3,
+               DMCE=1<<4,
+               DSTPCLK=1<<5,
+               ELINEAR=1<<6,
+               DSMC=1<<7,
+               DTLOCK=1<<8,
+               EDCTLB=1<<8,
+               EMMX=1<<9,
+               DPDC=1<<11,
+               EBRPRED=1<<12,
+               DIC=1<<13,
+               DDC=1<<14,
+               DNA=1<<15,
+               ERETSTK=1<<16,
+               E2MMX=1<<19,
+               EAMD3D=1<<20,
+       };
+
+       char *name;
+       u32  fcr_set=0;
+       u32  fcr_clr=0;
+       u32  lo,hi,newlo;
+       u32  aa,bb,cc,dd;
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_bit(0*32+31, c->x86_capability);
+
+       switch (c->x86) {
+
+               case 5:
+                       switch(c->x86_model) {
+                       case 4:
+                               name="C6";
+                               fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
+                               fcr_clr=DPDC;
+                               printk(KERN_NOTICE "Disabling bugged TSC.\n");
+                               clear_bit(X86_FEATURE_TSC, c->x86_capability);
+#ifdef CONFIG_X86_OOSTORE
+                               centaur_create_optimal_mcr();
+                               /* Enable
+                                       write combining on non-stack, non-string
+                                       write combining on string, all types
+                                       weak write ordering 
+                                       
+                                  The C6 original lacks weak read order 
+                                  
+                                  Note 0x120 is write only on Winchip 1 */
+                                  
+                               wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
+#endif                         
+                               break;
+                       case 8:
+                               switch(c->x86_mask) {
+                               default:
+                                       name="2";
+                                       break;
+                               case 7 ... 9:
+                                       name="2A";
+                                       break;
+                               case 10 ... 15:
+                                       name="2B";
+                                       break;
+                               }
+                               fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
+                               fcr_clr=DPDC;
+#ifdef CONFIG_X86_OOSTORE
+                               winchip2_unprotect_mcr();
+                               winchip2_create_optimal_mcr();
+                               rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               /* Enable
+                                       write combining on non-stack, non-string
+                                       write combining on string, all types
+                                       weak write ordering 
+                               */
+                               lo|=31;                         
+                               wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               winchip2_protect_mcr();
+#endif
+                               break;
+                       case 9:
+                               name="3";
+                               fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
+                               fcr_clr=DPDC;
+#ifdef CONFIG_X86_OOSTORE
+                               winchip2_unprotect_mcr();
+                               winchip2_create_optimal_mcr();
+                               rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               /* Enable
+                                       write combining on non-stack, non-string
+                                       write combining on string, all types
+                                       weak write ordering 
+                               */
+                               lo|=31;                         
+                               wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
+                               winchip2_protect_mcr();
+#endif
+                               break;
+                       default:
+                               name="??";
+                       }
+
+                       rdmsr(MSR_IDT_FCR1, lo, hi);
+                       newlo=(lo|fcr_set) & (~fcr_clr);
+
+                       if (newlo!=lo) {
+                               printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
+                               wrmsr(MSR_IDT_FCR1, newlo, hi );
+                       } else {
+                               printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
+                       }
+                       /* Emulate MTRRs using Centaur's MCR. */
+                       set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
+                       /* Report CX8 */
+                       set_bit(X86_FEATURE_CX8, c->x86_capability);
+                       /* Set 3DNow! on Winchip 2 and above. */
+                       if (c->x86_model >=8)
+                               set_bit(X86_FEATURE_3DNOW, c->x86_capability);
+                       /* See if we can find out some more. */
+                       if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
+                               /* Yes, we can. */
+                               cpuid(0x80000005,&aa,&bb,&cc,&dd);
+                               /* Add L1 data and code cache sizes. */
+                               c->x86_cache_size = (cc>>24)+(dd>>24);
+                       }
+                       sprintf( c->x86_model_id, "WinChip %s", name );
+                       break;
+
+               case 6:
+                       init_c3(c);
+                       break;
+       }
+}
+
+static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+{
+       /* VIA C3 CPUs (670-68F) need further shifting. */
+       if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
+               size >>= 8;
+
+       /* VIA also screwed up Nehemiah stepping 1, and made
+          it return '65KB' instead of '64KB'
+          - Note, it seems this may only be in engineering samples. */
+       if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
+               size -=1;
+
+       return size;
+}
+
+static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Centaur",
+       .c_ident        = { "CentaurHauls" },
+       .c_init         = init_centaur,
+       .c_size_cache   = centaur_size_cache,
+};
+
+int __init centaur_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
+       return 0;
+}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
new file mode 100644 (file)
index 0000000..d506201
--- /dev/null
@@ -0,0 +1,733 @@
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/bootmem.h>
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/i387.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/mtrr.h>
+#include <asm/mce.h>
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <mach_apic.h>
+#endif
+
+#include "cpu.h"
+
+DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
+       [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
+       [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
+       [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
+       [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
+       /*
+        * Segments used for calling PnP BIOS have byte granularity.
+        * They code segments and data segments have fixed 64k limits,
+        * the transfer segment sizes are set at run time.
+        */
+       [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
+       [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
+       [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
+       [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
+       [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
+       /*
+        * The APM segments have byte granularity and their bases
+        * are set at run time.  All have 64k limits.
+        */
+       [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
+       /* 16-bit code */
+       [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
+       [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
+
+       [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
+       [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
+} };
+EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+
+static int cachesize_override __cpuinitdata = -1;
+static int disable_x86_fxsr __cpuinitdata;
+static int disable_x86_serial_nr __cpuinitdata = 1;
+static int disable_x86_sep __cpuinitdata;
+
+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+extern int disable_pse;
+
+static void __cpuinit default_init(struct cpuinfo_x86 * c)
+{
+       /* Not much we can do here... */
+       /* Check if at least it has cpuid */
+       if (c->cpuid_level == -1) {
+               /* No cpuid. It must be an ancient CPU */
+               if (c->x86 == 4)
+                       strcpy(c->x86_model_id, "486");
+               else if (c->x86 == 3)
+                       strcpy(c->x86_model_id, "386");
+       }
+}
+
+static struct cpu_dev __cpuinitdata default_cpu = {
+       .c_init = default_init,
+       .c_vendor = "Unknown",
+};
+static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
+
+static int __init cachesize_setup(char *str)
+{
+       get_option (&str, &cachesize_override);
+       return 1;
+}
+__setup("cachesize=", cachesize_setup);
+
+int __cpuinit get_model_name(struct cpuinfo_x86 *c)
+{
+       unsigned int *v;
+       char *p, *q;
+
+       if (cpuid_eax(0x80000000) < 0x80000004)
+               return 0;
+
+       v = (unsigned int *) c->x86_model_id;
+       cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+       cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+       cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+       c->x86_model_id[48] = 0;
+
+       /* Intel chips right-justify this string for some dumb reason;
+          undo that brain damage */
+       p = q = &c->x86_model_id[0];
+       while ( *p == ' ' )
+            p++;
+       if ( p != q ) {
+            while ( *p )
+                 *q++ = *p++;
+            while ( q <= &c->x86_model_id[48] )
+                 *q++ = '\0';  /* Zero-pad the rest */
+       }
+
+       return 1;
+}
+
+
+void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
+{
+       unsigned int n, dummy, ecx, edx, l2size;
+
+       n = cpuid_eax(0x80000000);
+
+       if (n >= 0x80000005) {
+               cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+               printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+                       edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+               c->x86_cache_size=(ecx>>24)+(edx>>24);  
+       }
+
+       if (n < 0x80000006)     /* Some chips just has a large L1. */
+               return;
+
+       ecx = cpuid_ecx(0x80000006);
+       l2size = ecx >> 16;
+       
+       /* do processor-specific cache resizing */
+       if (this_cpu->c_size_cache)
+               l2size = this_cpu->c_size_cache(c,l2size);
+
+       /* Allow user to override all this if necessary. */
+       if (cachesize_override != -1)
+               l2size = cachesize_override;
+
+       if ( l2size == 0 )
+               return;         /* Again, no L2 cache is possible */
+
+       c->x86_cache_size = l2size;
+
+       printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+              l2size, ecx & 0xFF);
+}
+
+/* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+
+/* Look up CPU names by table lookup. */
+static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
+{
+       struct cpu_model_info *info;
+
+       if ( c->x86_model >= 16 )
+               return NULL;    /* Range check */
+
+       if (!this_cpu)
+               return NULL;
+
+       info = this_cpu->c_models;
+
+       while (info && info->family) {
+               if (info->family == c->x86)
+                       return info->model_names[c->x86_model];
+               info++;
+       }
+       return NULL;            /* Not found */
+}
+
+
+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+{
+       char *v = c->x86_vendor_id;
+       int i;
+       static int printed;
+
+       for (i = 0; i < X86_VENDOR_NUM; i++) {
+               if (cpu_devs[i]) {
+                       if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+                           (cpu_devs[i]->c_ident[1] && 
+                            !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+                               c->x86_vendor = i;
+                               if (!early)
+                                       this_cpu = cpu_devs[i];
+                               return;
+                       }
+               }
+       }
+       if (!printed) {
+               printed++;
+               printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+               printk(KERN_ERR "CPU: Your system may be unstable.\n");
+       }
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       this_cpu = &default_cpu;
+}
+
+
+static int __init x86_fxsr_setup(char * s)
+{
+       /* Tell all the other CPU's to not use it... */
+       disable_x86_fxsr = 1;
+
+       /*
+        * ... and clear the bits early in the boot_cpu_data
+        * so that the bootup process doesn't try to do this
+        * either.
+        */
+       clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
+       clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
+       return 1;
+}
+__setup("nofxsr", x86_fxsr_setup);
+
+
+static int __init x86_sep_setup(char * s)
+{
+       disable_x86_sep = 1;
+       return 1;
+}
+__setup("nosep", x86_sep_setup);
+
+
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
+{
+       u32 f1, f2;
+
+       asm("pushfl\n\t"
+           "pushfl\n\t"
+           "popl %0\n\t"
+           "movl %0,%1\n\t"
+           "xorl %2,%0\n\t"
+           "pushl %0\n\t"
+           "popfl\n\t"
+           "pushfl\n\t"
+           "popl %0\n\t"
+           "popfl\n\t"
+           : "=&r" (f1), "=&r" (f2)
+           : "ir" (flag));
+
+       return ((f1^f2) & flag) != 0;
+}
+
+
+/* Probe for the CPUID instruction */
+static int __cpuinit have_cpuid_p(void)
+{
+       return flag_is_changeable_p(X86_EFLAGS_ID);
+}
+
+void __init cpu_detect(struct cpuinfo_x86 *c)
+{
+       /* Get vendor name */
+       cpuid(0x00000000, &c->cpuid_level,
+             (int *)&c->x86_vendor_id[0],
+             (int *)&c->x86_vendor_id[8],
+             (int *)&c->x86_vendor_id[4]);
+
+       c->x86 = 4;
+       if (c->cpuid_level >= 0x00000001) {
+               u32 junk, tfms, cap0, misc;
+               cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+               c->x86 = (tfms >> 8) & 15;
+               c->x86_model = (tfms >> 4) & 15;
+               if (c->x86 == 0xf)
+                       c->x86 += (tfms >> 20) & 0xff;
+               if (c->x86 >= 0x6)
+                       c->x86_model += ((tfms >> 16) & 0xF) << 4;
+               c->x86_mask = tfms & 15;
+               if (cap0 & (1<<19))
+                       c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+       }
+}
+
+/* Do minimum CPU detection early.
+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+   The others are not touched to avoid unwanted side effects.
+
+   WARNING: this function is only called on the BP.  Don't add code here
+   that is supposed to run on all CPUs. */
+static void __init early_cpu_detect(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       c->x86_cache_alignment = 32;
+
+       if (!have_cpuid_p())
+               return;
+
+       cpu_detect(c);
+
+       get_cpu_vendor(c, 1);
+}
+
+static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
+{
+       u32 tfms, xlvl;
+       int ebx;
+
+       if (have_cpuid_p()) {
+               /* Get vendor name */
+               cpuid(0x00000000, &c->cpuid_level,
+                     (int *)&c->x86_vendor_id[0],
+                     (int *)&c->x86_vendor_id[8],
+                     (int *)&c->x86_vendor_id[4]);
+               
+               get_cpu_vendor(c, 0);
+               /* Initialize the standard set of capabilities */
+               /* Note that the vendor-specific code below might override */
+       
+               /* Intel-defined flags: level 0x00000001 */
+               if ( c->cpuid_level >= 0x00000001 ) {
+                       u32 capability, excap;
+                       cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
+                       c->x86_capability[0] = capability;
+                       c->x86_capability[4] = excap;
+                       c->x86 = (tfms >> 8) & 15;
+                       c->x86_model = (tfms >> 4) & 15;
+                       if (c->x86 == 0xf)
+                               c->x86 += (tfms >> 20) & 0xff;
+                       if (c->x86 >= 0x6)
+                               c->x86_model += ((tfms >> 16) & 0xF) << 4;
+                       c->x86_mask = tfms & 15;
+#ifdef CONFIG_X86_HT
+                       c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
+#else
+                       c->apicid = (ebx >> 24) & 0xFF;
+#endif
+                       if (c->x86_capability[0] & (1<<19))
+                               c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
+               } else {
+                       /* Have CPUID level 0 only - unheard of */
+                       c->x86 = 4;
+               }
+
+               /* AMD-defined flags: level 0x80000001 */
+               xlvl = cpuid_eax(0x80000000);
+               if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+                       if ( xlvl >= 0x80000001 ) {
+                               c->x86_capability[1] = cpuid_edx(0x80000001);
+                               c->x86_capability[6] = cpuid_ecx(0x80000001);
+                       }
+                       if ( xlvl >= 0x80000004 )
+                               get_model_name(c); /* Default name */
+               }
+
+               init_scattered_cpuid_features(c);
+       }
+
+       early_intel_workaround(c);
+
+#ifdef CONFIG_X86_HT
+       c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
+#endif
+}
+
+static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+               /* Disable processor serial number */
+               unsigned long lo,hi;
+               rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               lo |= 0x200000;
+               wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+               printk(KERN_NOTICE "CPU serial number disabled.\n");
+               clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+               /* Disabling the serial number may affect the cpuid level */
+               c->cpuid_level = cpuid_eax(0);
+       }
+}
+
+static int __init x86_serial_nr_setup(char *s)
+{
+       disable_x86_serial_nr = 0;
+       return 1;
+}
+__setup("serialnumber", x86_serial_nr_setup);
+
+
+
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+{
+       int i;
+
+       c->loops_per_jiffy = loops_per_jiffy;
+       c->x86_cache_size = -1;
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
+       c->cpuid_level = -1;    /* CPUID not detected */
+       c->x86_model = c->x86_mask = 0; /* So far unknown... */
+       c->x86_vendor_id[0] = '\0'; /* Unset */
+       c->x86_model_id[0] = '\0';  /* Unset */
+       c->x86_max_cores = 1;
+       c->x86_clflush_size = 32;
+       memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+       if (!have_cpuid_p()) {
+               /* First of all, decide if this is a 486 or higher */
+               /* It's a 486 if we can modify the AC flag */
+               if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+                       c->x86 = 4;
+               else
+                       c->x86 = 3;
+       }
+
+       generic_identify(c);
+
+       printk(KERN_DEBUG "CPU: After generic identify, caps:");
+       for (i = 0; i < NCAPINTS; i++)
+               printk(" %08lx", c->x86_capability[i]);
+       printk("\n");
+
+       if (this_cpu->c_identify) {
+               this_cpu->c_identify(c);
+
+               printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+               for (i = 0; i < NCAPINTS; i++)
+                       printk(" %08lx", c->x86_capability[i]);
+               printk("\n");
+       }
+
+       /*
+        * Vendor-specific initialization.  In this section we
+        * canonicalize the feature flags, meaning if there are
+        * features a certain CPU supports which CPUID doesn't
+        * tell us, CPUID claiming incorrect flags, or other bugs,
+        * we handle them here.
+        *
+        * At the end of this section, c->x86_capability better
+        * indicate the features this CPU genuinely supports!
+        */
+       if (this_cpu->c_init)
+               this_cpu->c_init(c);
+
+       /* Disable the PN if appropriate */
+       squash_the_stupid_serial_number(c);
+
+       /*
+        * The vendor-specific functions might have changed features.  Now
+        * we do "generic changes."
+        */
+
+       /* TSC disabled? */
+       if ( tsc_disable )
+               clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+       /* FXSR disabled? */
+       if (disable_x86_fxsr) {
+               clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+               clear_bit(X86_FEATURE_XMM, c->x86_capability);
+       }
+
+       /* SEP disabled? */
+       if (disable_x86_sep)
+               clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
+       if (disable_pse)
+               clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+       /* If the model name is still unset, do table lookup. */
+       if ( !c->x86_model_id[0] ) {
+               char *p;
+               p = table_lookup_model(c);
+               if ( p )
+                       strcpy(c->x86_model_id, p);
+               else
+                       /* Last resort... */
+                       sprintf(c->x86_model_id, "%02x/%02x",
+                               c->x86, c->x86_model);
+       }
+
+       /* Now the feature flags better reflect actual CPU features! */
+
+       printk(KERN_DEBUG "CPU: After all inits, caps:");
+       for (i = 0; i < NCAPINTS; i++)
+               printk(" %08lx", c->x86_capability[i]);
+       printk("\n");
+
+       /*
+        * On SMP, boot_cpu_data holds the common feature set between
+        * all CPUs; so make sure that we indicate which features are
+        * common between the CPUs.  The first time this routine gets
+        * executed, c == &boot_cpu_data.
+        */
+       if ( c != &boot_cpu_data ) {
+               /* AND the already accumulated flags with these */
+               for ( i = 0 ; i < NCAPINTS ; i++ )
+                       boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+       }
+
+       /* Init Machine Check Exception if available. */
+       mcheck_init(c);
+}
+
+void __init identify_boot_cpu(void)
+{
+       identify_cpu(&boot_cpu_data);
+       sysenter_setup();
+       enable_sep_cpu();
+       mtrr_bp_init();
+}
+
+void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
+{
+       BUG_ON(c == &boot_cpu_data);
+       identify_cpu(c);
+       enable_sep_cpu();
+       mtrr_ap_init();
+}
+
+#ifdef CONFIG_X86_HT
+void __cpuinit detect_ht(struct cpuinfo_x86 *c)
+{
+       u32     eax, ebx, ecx, edx;
+       int     index_msb, core_bits;
+
+       cpuid(1, &eax, &ebx, &ecx, &edx);
+
+       if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
+               return;
+
+       smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+       if (smp_num_siblings == 1) {
+               printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
+       } else if (smp_num_siblings > 1 ) {
+
+               if (smp_num_siblings > NR_CPUS) {
+                       printk(KERN_WARNING "CPU: Unsupported number of the "
+                                       "siblings %d", smp_num_siblings);
+                       smp_num_siblings = 1;
+                       return;
+               }
+
+               index_msb = get_count_order(smp_num_siblings);
+               c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+               printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
+                      c->phys_proc_id);
+
+               smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+
+               index_msb = get_count_order(smp_num_siblings) ;
+
+               core_bits = get_count_order(c->x86_max_cores);
+
+               c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+                                              ((1 << core_bits) - 1);
+
+               if (c->x86_max_cores > 1)
+                       printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
+                              c->cpu_core_id);
+       }
+}
+#endif
+
+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+{
+       char *vendor = NULL;
+
+       if (c->x86_vendor < X86_VENDOR_NUM)
+               vendor = this_cpu->c_vendor;
+       else if (c->cpuid_level >= 0)
+               vendor = c->x86_vendor_id;
+
+       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+               printk("%s ", vendor);
+
+       if (!c->x86_model_id[0])
+               printk("%d86", c->x86);
+       else
+               printk("%s", c->x86_model_id);
+
+       if (c->x86_mask || c->cpuid_level >= 0) 
+               printk(" stepping %02x\n", c->x86_mask);
+       else
+               printk("\n");
+}
+
+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
+
+/* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+extern int intel_cpu_init(void);
+extern int cyrix_init_cpu(void);
+extern int nsc_init_cpu(void);
+extern int amd_init_cpu(void);
+extern int centaur_init_cpu(void);
+extern int transmeta_init_cpu(void);
+extern int nexgen_init_cpu(void);
+extern int umc_init_cpu(void);
+
+void __init early_cpu_init(void)
+{
+       intel_cpu_init();
+       cyrix_init_cpu();
+       nsc_init_cpu();
+       amd_init_cpu();
+       centaur_init_cpu();
+       transmeta_init_cpu();
+       nexgen_init_cpu();
+       umc_init_cpu();
+       early_cpu_detect();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /* pse is not compatible with on-the-fly unmapping,
+        * disable it even if the cpus claim to support it.
+        */
+       clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+       disable_pse = 1;
+#endif
+}
+
+/* Make sure %fs is initialized properly in idle threads */
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+{
+       memset(regs, 0, sizeof(struct pt_regs));
+       regs->xfs = __KERNEL_PERCPU;
+       return regs;
+}
+
+/* Current gdt points %fs at the "master" per-cpu area: after this,
+ * it's on the real one. */
+void switch_to_new_gdt(void)
+{
+       struct Xgt_desc_struct gdt_descr;
+
+       gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+       gdt_descr.size = GDT_SIZE - 1;
+       load_gdt(&gdt_descr);
+       asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __cpuinit cpu_init(void)
+{
+       int cpu = smp_processor_id();
+       struct task_struct *curr = current;
+       struct tss_struct * t = &per_cpu(init_tss, cpu);
+       struct thread_struct *thread = &curr->thread;
+
+       if (cpu_test_and_set(cpu, cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+               for (;;) local_irq_enable();
+       }
+
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+       if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+       if (tsc_disable && cpu_has_tsc) {
+               printk(KERN_NOTICE "Disabling TSC...\n");
+               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+               set_in_cr4(X86_CR4_TSD);
+       }
+
+       load_idt(&idt_descr);
+       switch_to_new_gdt();
+
+       /*
+        * Set up and load the per-CPU TSS and LDT
+        */
+       atomic_inc(&init_mm.mm_count);
+       curr->active_mm = &init_mm;
+       if (curr->mm)
+               BUG();
+       enter_lazy_tlb(&init_mm, curr);
+
+       load_esp0(t, thread);
+       set_tss_desc(cpu,t);
+       load_TR_desc();
+       load_LDT(&init_mm.context);
+
+#ifdef CONFIG_DOUBLEFAULT
+       /* Set up doublefault TSS pointer in the GDT */
+       __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+#endif
+
+       /* Clear %gs. */
+       asm volatile ("mov %0, %%gs" : : "r" (0));
+
+       /* Clear all 6 debug registers: */
+       set_debugreg(0, 0);
+       set_debugreg(0, 1);
+       set_debugreg(0, 2);
+       set_debugreg(0, 3);
+       set_debugreg(0, 6);
+       set_debugreg(0, 7);
+
+       /*
+        * Force FPU initialization:
+        */
+       current_thread_info()->status = 0;
+       clear_used_math();
+       mxcsr_feature_mask_init();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __cpuinit cpu_uninit(void)
+{
+       int cpu = raw_smp_processor_id();
+       cpu_clear(cpu, cpu_initialized);
+
+       /* lazy TLB state */
+       per_cpu(cpu_tlbstate, cpu).state = 0;
+       per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
+}
+#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
new file mode 100644 (file)
index 0000000..2f6432c
--- /dev/null
@@ -0,0 +1,28 @@
+
+struct cpu_model_info {
+       int vendor;
+       int family;
+       char *model_names[16];
+};
+
+/* attempt to consolidate cpu attributes */
+struct cpu_dev {
+       char    * c_vendor;
+
+       /* some have two possibilities for cpuid string */
+       char    * c_ident[2];   
+
+       struct          cpu_model_info c_models[4];
+
+       void            (*c_init)(struct cpuinfo_x86 * c);
+       void            (*c_identify)(struct cpuinfo_x86 * c);
+       unsigned int    (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
+};
+
+extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
+
+extern int get_model_name(struct cpuinfo_x86 *c);
+extern void display_cacheinfo(struct cpuinfo_x86 *c);
+
+extern void early_intel_workaround(struct cpuinfo_x86 *c);
+
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
new file mode 100644 (file)
index 0000000..122d2d7
--- /dev/null
@@ -0,0 +1,463 @@
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/processor-cyrix.h>
+#include <asm/timer.h>
+#include <asm/pci-direct.h>
+#include <asm/tsc.h>
+
+#include "cpu.h"
+
+/*
+ * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
+ */
+static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
+{
+       unsigned char ccr2, ccr3;
+       unsigned long flags;
+       
+       /* we test for DEVID by checking whether CCR3 is writable */
+       local_irq_save(flags);
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, ccr3 ^ 0x80);
+       getCx86(0xc0);   /* dummy to change bus */
+
+       if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
+               ccr2 = getCx86(CX86_CCR2);
+               setCx86(CX86_CCR2, ccr2 ^ 0x04);
+               getCx86(0xc0);  /* dummy */
+
+               if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
+                       *dir0 = 0xfd;
+               else {                          /* Cx486S A step */
+                       setCx86(CX86_CCR2, ccr2);
+                       *dir0 = 0xfe;
+               }
+       }
+       else {
+               setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
+
+               /* read DIR0 and DIR1 CPU registers */
+               *dir0 = getCx86(CX86_DIR0);
+               *dir1 = getCx86(CX86_DIR1);
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
+ * order to identify the Cyrix CPU model after we're out of setup.c
+ *
+ * Actually since bugs.h doesn't even reference this perhaps someone should
+ * fix the documentation ???
+ */
+static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
+
+static char Cx86_model[][9] __cpuinitdata = {
+       "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
+       "M II ", "Unknown"
+};
+static char Cx486_name[][5] __cpuinitdata = {
+       "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
+       "SRx2", "DRx2"
+};
+static char Cx486S_name[][4] __cpuinitdata = {
+       "S", "S2", "Se", "S2e"
+};
+static char Cx486D_name[][4] __cpuinitdata = {
+       "DX", "DX2", "?", "?", "?", "DX4"
+};
+static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
+static char cyrix_model_mult1[] __cpuinitdata = "12??43";
+static char cyrix_model_mult2[] __cpuinitdata = "12233445";
+
+/*
+ * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
+ * BIOSes for compatibility with DOS games.  This makes the udelay loop
+ * work correctly, and improves performance.
+ *
+ * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
+ */
+
+extern void calibrate_delay(void) __init;
+
+static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
+{
+       unsigned long flags;
+       
+       if (Cx86_dir0_msb == 3) {
+               unsigned char ccr3, ccr5;
+
+               local_irq_save(flags);
+               ccr3 = getCx86(CX86_CCR3);
+               setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+               ccr5 = getCx86(CX86_CCR5);
+               if (ccr5 & 2)
+                       setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
+               setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
+               local_irq_restore(flags);
+
+               if (ccr5 & 2) { /* possible wrong calibration done */
+                       printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
+                       calibrate_delay();
+                       c->loops_per_jiffy = loops_per_jiffy;
+               }
+       }
+}
+
+
+static void __cpuinit set_cx86_reorder(void)
+{
+       u8 ccr3;
+
+       printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+
+       /* Load/Store Serialize to mem access disable (=reorder it)  */
+       setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
+       /* set load/store serialize from 1GB to 4GB */
+       ccr3 |= 0xe0;
+       setCx86(CX86_CCR3, ccr3);
+}
+
+static void __cpuinit set_cx86_memwb(void)
+{
+       u32 cr0;
+
+       printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
+
+       /* CCR2 bit 2: unlock NW bit */
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
+       /* set 'Not Write-through' */
+       cr0 = 0x20000000;
+       write_cr0(read_cr0() | cr0);
+       /* CCR2 bit 2: lock NW bit and set WT1 */
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
+}
+
+static void __cpuinit set_cx86_inc(void)
+{
+       unsigned char ccr3;
+
+       printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
+
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+       /* PCR1 -- Performance Control */
+       /* Incrementor on, whatever that is */
+       setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
+       /* PCR0 -- Performance Control */
+       /* Incrementor Margin 10 */
+       setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); 
+       setCx86(CX86_CCR3, ccr3);       /* disable MAPEN */
+}
+
+/*
+ *     Configure later MediaGX and/or Geode processor.
+ */
+
+static void __cpuinit geode_configure(void)
+{
+       unsigned long flags;
+       u8 ccr3;
+       local_irq_save(flags);
+
+       /* Suspend on halt power saving and enable #SUSP pin */
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+
+       ccr3 = getCx86(CX86_CCR3);
+       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN */
+       
+
+       /* FPU fast, DTE cache, Mem bypass */
+       setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
+       setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
+       
+       set_cx86_memwb();
+       set_cx86_reorder();     
+       set_cx86_inc();
+       
+       local_irq_restore(flags);
+}
+
+
+static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
+{
+       unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
+       char *buf = c->x86_model_id;
+       const char *p = NULL;
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_bit(0*32+31, c->x86_capability);
+
+       /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
+       if ( test_bit(1*32+24, c->x86_capability) ) {
+               clear_bit(1*32+24, c->x86_capability);
+               set_bit(X86_FEATURE_CXMMX, c->x86_capability);
+       }
+
+       do_cyrix_devid(&dir0, &dir1);
+
+       check_cx686_slop(c);
+
+       Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
+       dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
+
+       /* common case step number/rev -- exceptions handled below */
+       c->x86_model = (dir1 >> 4) + 1;
+       c->x86_mask = dir1 & 0xf;
+
+       /* Now cook; the original recipe is by Channing Corn, from Cyrix.
+        * We do the same thing for each generation: we work out
+        * the model, multiplier and stepping.  Black magic included,
+        * to make the silicon step/rev numbers match the printed ones.
+        */
+        
+       switch (dir0_msn) {
+               unsigned char tmp;
+
+       case 0: /* Cx486SLC/DLC/SRx/DRx */
+               p = Cx486_name[dir0_lsn & 7];
+               break;
+
+       case 1: /* Cx486S/DX/DX2/DX4 */
+               p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
+                       : Cx486S_name[dir0_lsn & 3];
+               break;
+
+       case 2: /* 5x86 */
+               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
+               p = Cx86_cb+2;
+               break;
+
+       case 3: /* 6x86/6x86L */
+               Cx86_cb[1] = ' ';
+               Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
+               if (dir1 > 0x21) { /* 686L */
+                       Cx86_cb[0] = 'L';
+                       p = Cx86_cb;
+                       (c->x86_model)++;
+               } else             /* 686 */
+                       p = Cx86_cb+1;
+               /* Emulate MTRRs using Cyrix's ARRs. */
+               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
+               /* 6x86's contain this bug */
+               c->coma_bug = 1;
+               break;
+
+       case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
+#ifdef CONFIG_PCI
+       {
+               u32 vendor, device;
+               /* It isn't really a PCI quirk directly, but the cure is the
+                  same. The MediaGX has deep magic SMM stuff that handles the
+                  SB emulation. It thows away the fifo on disable_dma() which
+                  is wrong and ruins the audio. 
+
+                  Bug2: VSA1 has a wrap bug so that using maximum sized DMA 
+                  causes bad things. According to NatSemi VSA2 has another
+                  bug to do with 'hlt'. I've not seen any boards using VSA2
+                  and X doesn't seem to support it either so who cares 8).
+                  VSA1 we work around however.
+               */
+
+               printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
+               isa_dma_bridge_buggy = 2;
+
+               /* We do this before the PCI layer is running. However we
+                  are safe here as we know the bridge must be a Cyrix
+                  companion and must be present */
+               vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
+               device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
+
+               /*
+                *  The 5510/5520 companion chips have a funky PIT.
+                */  
+               if (vendor == PCI_VENDOR_ID_CYRIX &&
+        (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
+                       mark_tsc_unstable("cyrix 5510/5520 detected");
+       }
+#endif
+               c->x86_cache_size=16;   /* Yep 16K integrated cache thats it */
+
+               /* GXm supports extended cpuid levels 'ala' AMD */
+               if (c->cpuid_level == 2) {
+                       /* Enable cxMMX extensions (GX1 Datasheet 54) */
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
+                       
+                       /*
+                        * GXm : 0x30 ... 0x5f GXm  datasheet 51
+                        * GXlv: 0x6x          GXlv datasheet 54
+                        *  ?  : 0x7x
+                        * GX1 : 0x8x          GX1  datasheet 56
+                        */
+                       if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
+                               geode_configure();
+                       get_model_name(c);  /* get CPU marketing name */
+                       return;
+               }
+               else {  /* MediaGX */
+                       Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
+                       p = Cx86_cb+2;
+                       c->x86_model = (dir1 & 0x20) ? 1 : 2;
+               }
+               break;
+
+        case 5: /* 6x86MX/M II */
+               if (dir1 > 7)
+               {
+                       dir0_msn++;  /* M II */
+                       /* Enable MMX extensions (App note 108) */
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
+               }
+               else
+               {
+                       c->coma_bug = 1;      /* 6x86MX, it has the bug. */
+               }
+               tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
+               Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
+               p = Cx86_cb+tmp;
+               if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
+                       (c->x86_model)++;
+               /* Emulate MTRRs using Cyrix's ARRs. */
+               set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
+               break;
+
+       case 0xf:  /* Cyrix 486 without DEVID registers */
+               switch (dir0_lsn) {
+               case 0xd:  /* either a 486SLC or DLC w/o DEVID */
+                       dir0_msn = 0;
+                       p = Cx486_name[(c->hard_math) ? 1 : 0];
+                       break;
+
+               case 0xe:  /* a 486S A step */
+                       dir0_msn = 0;
+                       p = Cx486S_name[0];
+                       break;
+               }
+               break;
+
+       default:  /* unknown (shouldn't happen, we know everyone ;-) */
+               dir0_msn = 7;
+               break;
+       }
+       strcpy(buf, Cx86_model[dir0_msn & 7]);
+       if (p) strcat(buf, p);
+       return;
+}
+
+/*
+ * Handle National Semiconductor branded processors
+ */
+static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
+{
+       /* There may be GX1 processors in the wild that are branded
+        * NSC and not Cyrix.
+        *
+        * This function only handles the GX processor, and kicks every
+        * thing else to the Cyrix init function above - that should
+        * cover any processors that might have been branded differently
+        * after NSC acquired Cyrix.
+        *
+        * If this breaks your GX1 horribly, please e-mail
+        * info-linux@ldcmail.amd.com to tell us.
+        */
+
+       /* Handle the GX (Formally known as the GX2) */
+
+       if (c->x86 == 5 && c->x86_model == 5)
+               display_cacheinfo(c);
+       else
+               init_cyrix(c);
+}
+
+/*
+ * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
+ * by the fact that they preserve the flags across the division of 5/2.
+ * PII and PPro exhibit this behavior too, but they have cpuid available.
+ */
+/*
+ * Perform the Cyrix 5/2 test. A Cyrix won't change
+ * the flags, while other 486 chips will.
+ */
+static inline int test_cyrix_52div(void)
+{
+       unsigned int test;
+
+       __asm__ __volatile__(
+            "sahf\n\t"         /* clear flags (%eax = 0x0005) */
+            "div %b2\n\t"      /* divide 5 by 2 */
+            "lahf"             /* store flags into %ah */
+            : "=a" (test)
+            : "0" (5), "q" (2)
+            : "cc");
+
+       /* AH is 0x02 on Cyrix after the divide.. */
+       return (unsigned char) (test >> 8) == 0x02;
+}
+
+static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
+{
+       /* Detect Cyrix with disabled CPUID */
+       if ( c->x86 == 4 && test_cyrix_52div() ) {
+               unsigned char dir0, dir1;
+               
+               strcpy(c->x86_vendor_id, "CyrixInstead");
+               c->x86_vendor = X86_VENDOR_CYRIX;
+               
+               /* Actually enable cpuid on the older cyrix */
+           
+               /* Retrieve CPU revisions */
+               
+               do_cyrix_devid(&dir0, &dir1);
+
+               dir0>>=4;               
+               
+               /* Check it is an affected model */
+               
+               if (dir0 == 5 || dir0 == 3)
+               {
+                       unsigned char ccr3;
+                       unsigned long flags;
+                       printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
+                       local_irq_save(flags);
+                       ccr3 = getCx86(CX86_CCR3);
+                       setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN  */
+                       setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);  /* enable cpuid  */
+                       setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
+                       local_irq_restore(flags);
+               }
+       }
+}
+
+static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Cyrix",
+       .c_ident        = { "CyrixInstead" },
+       .c_init         = init_cyrix,
+       .c_identify     = cyrix_identify,
+};
+
+int __init cyrix_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
+       return 0;
+}
+
+static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
+       .c_vendor       = "NSC",
+       .c_ident        = { "Geode by NSC" },
+       .c_init         = init_nsc,
+};
+
+int __init nsc_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
+       return 0;
+}
+
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
new file mode 100644 (file)
index 0000000..dc4e081
--- /dev/null
@@ -0,0 +1,333 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/smp.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/uaccess.h>
+
+#include "cpu.h"
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <mach_apic.h>
+#endif
+
+extern int trap_init_f00f_bug(void);
+
+#ifdef CONFIG_X86_INTEL_USERCOPY
+/*
+ * Alignment at which movsl is preferred for bulk memory copies.
+ */
+struct movsl_mask movsl_mask __read_mostly;
+#endif
+
+void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
+{
+       if (c->x86_vendor != X86_VENDOR_INTEL)
+               return;
+       /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
+       if (c->x86 == 15 && c->x86_cache_alignment == 64)
+               c->x86_cache_alignment = 128;
+}
+
+/*
+ *     Early probe support logic for ppro memory erratum #50
+ *
+ *     This is called before we do cpu ident work
+ */
+int __cpuinit ppro_with_ram_bug(void)
+{
+       /* Uses data from early_cpu_detect now */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+           boot_cpu_data.x86 == 6 &&
+           boot_cpu_data.x86_model == 1 &&
+           boot_cpu_data.x86_mask < 8) {
+               printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
+               return 1;
+       }
+       return 0;
+}
+       
+
+/*
+ * P4 Xeon errata 037 workaround.
+ * Hardware prefetcher may cause stale data to be loaded into the cache.
+ */
+static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
+{
+       unsigned long lo, hi;
+
+       if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
+               rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+               if ((lo & (1<<9)) == 0) {
+                       printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
+                       printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
+                       lo |= (1<<9);   /* Disable hw prefetching */
+                       wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
+               }
+       }
+}
+
+
+/*
+ * find out the number of processor cores on the die
+ */
+static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       if (c->cpuid_level < 4)
+               return 1;
+
+       /* Intel has a non-standard dependency on %ecx for this CPUID level. */
+       cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
+       if (eax & 0x1f)
+               return ((eax >> 26) + 1);
+       else
+               return 1;
+}
+
+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+{
+       unsigned int l2 = 0;
+       char *p = NULL;
+
+#ifdef CONFIG_X86_F00F_BUG
+       /*
+        * All current models of Pentium and Pentium with MMX technology CPUs
+        * have the F0 0F bug, which lets nonprivileged users lock up the system.
+        * Note that the workaround only should be initialized once...
+        */
+       c->f00f_bug = 0;
+       if (!paravirt_enabled() && c->x86 == 5) {
+               static int f00f_workaround_enabled = 0;
+
+               c->f00f_bug = 1;
+               if ( !f00f_workaround_enabled ) {
+                       trap_init_f00f_bug();
+                       printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
+                       f00f_workaround_enabled = 1;
+               }
+       }
+#endif
+
+       select_idle_routine(c);
+       l2 = init_intel_cacheinfo(c);
+       if (c->cpuid_level > 9 ) {
+               unsigned eax = cpuid_eax(10);
+               /* Check for version and the number of counters */
+               if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+                       set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
+       }
+
+       /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
+       if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
+               clear_bit(X86_FEATURE_SEP, c->x86_capability);
+
+       /* Names for the Pentium II/Celeron processors 
+          detectable only by also checking the cache size.
+          Dixon is NOT a Celeron. */
+       if (c->x86 == 6) {
+               switch (c->x86_model) {
+               case 5:
+                       if (c->x86_mask == 0) {
+                               if (l2 == 0)
+                                       p = "Celeron (Covington)";
+                               else if (l2 == 256)
+                                       p = "Mobile Pentium II (Dixon)";
+                       }
+                       break;
+                       
+               case 6:
+                       if (l2 == 128)
+                               p = "Celeron (Mendocino)";
+                       else if (c->x86_mask == 0 || c->x86_mask == 5)
+                               p = "Celeron-A";
+                       break;
+                       
+               case 8:
+                       if (l2 == 128)
+                               p = "Celeron (Coppermine)";
+                       break;
+               }
+       }
+
+       if ( p )
+               strcpy(c->x86_model_id, p);
+       
+       c->x86_max_cores = num_cpu_cores(c);
+
+       detect_ht(c);
+
+       /* Work around errata */
+       Intel_errata_workarounds(c);
+
+#ifdef CONFIG_X86_INTEL_USERCOPY
+       /*
+        * Set up the preferred alignment for movsl bulk memory moves
+        */
+       switch (c->x86) {
+       case 4:         /* 486: untested */
+               break;
+       case 5:         /* Old Pentia: untested */
+               break;
+       case 6:         /* PII/PIII only like movsl with 8-byte alignment */
+               movsl_mask.mask = 7;
+               break;
+       case 15:        /* P4 is OK down to 8-byte alignment */
+               movsl_mask.mask = 7;
+               break;
+       }
+#endif
+
+       if (c->x86 == 15) {
+               set_bit(X86_FEATURE_P4, c->x86_capability);
+               set_bit(X86_FEATURE_SYNC_RDTSC, c->x86_capability);
+       }
+       if (c->x86 == 6) 
+               set_bit(X86_FEATURE_P3, c->x86_capability);
+       if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+               (c->x86 == 0x6 && c->x86_model >= 0x0e))
+               set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+
+       if (cpu_has_ds) {
+               unsigned int l1;
+               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+               if (!(l1 & (1<<11)))
+                       set_bit(X86_FEATURE_BTS, c->x86_capability);
+               if (!(l1 & (1<<12)))
+                       set_bit(X86_FEATURE_PEBS, c->x86_capability);
+       }
+}
+
+static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
+{
+       /* Intel PIII Tualatin. This comes in two flavours.
+        * One has 256kb of cache, the other 512. We have no way
+        * to determine which, so we use a boottime override
+        * for the 512kb model, and assume 256 otherwise.
+        */
+       if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
+               size = 256;
+       return size;
+}
+
+static struct cpu_dev intel_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Intel",
+       .c_ident        = { "GenuineIntel" },
+       .c_models = {
+               { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 
+                 { 
+                         [0] = "486 DX-25/33", 
+                         [1] = "486 DX-50", 
+                         [2] = "486 SX", 
+                         [3] = "486 DX/2", 
+                         [4] = "486 SL", 
+                         [5] = "486 SX/2", 
+                         [7] = "486 DX/2-WB", 
+                         [8] = "486 DX/4", 
+                         [9] = "486 DX/4-WB"
+                 }
+               },
+               { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
+                 { 
+                         [0] = "Pentium 60/66 A-step", 
+                         [1] = "Pentium 60/66", 
+                         [2] = "Pentium 75 - 200",
+                         [3] = "OverDrive PODP5V83", 
+                         [4] = "Pentium MMX",
+                         [7] = "Mobile Pentium 75 - 200", 
+                         [8] = "Mobile Pentium MMX"
+                 }
+               },
+               { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
+                 { 
+                         [0] = "Pentium Pro A-step",
+                         [1] = "Pentium Pro", 
+                         [3] = "Pentium II (Klamath)", 
+                         [4] = "Pentium II (Deschutes)", 
+                         [5] = "Pentium II (Deschutes)", 
+                         [6] = "Mobile Pentium II",
+                         [7] = "Pentium III (Katmai)", 
+                         [8] = "Pentium III (Coppermine)", 
+                         [10] = "Pentium III (Cascades)",
+                         [11] = "Pentium III (Tualatin)",
+                 }
+               },
+               { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
+                 {
+                         [0] = "Pentium 4 (Unknown)",
+                         [1] = "Pentium 4 (Willamette)",
+                         [2] = "Pentium 4 (Northwood)",
+                         [4] = "Pentium 4 (Foster)",
+                         [5] = "Pentium 4 (Foster)",
+                 }
+               },
+       },
+       .c_init         = init_intel,
+       .c_size_cache   = intel_size_cache,
+};
+
+__init int intel_cpu_init(void)
+{
+       cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
+       return 0;
+}
+
+#ifndef CONFIG_X86_CMPXCHG
+unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
+{
+       u8 prev;
+       unsigned long flags;
+
+       /* Poor man's cmpxchg for 386. Unsuitable for SMP */
+       local_irq_save(flags);
+       prev = *(u8 *)ptr;
+       if (prev == old)
+               *(u8 *)ptr = new;
+       local_irq_restore(flags);
+       return prev;
+}
+EXPORT_SYMBOL(cmpxchg_386_u8);
+
+unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
+{
+       u16 prev;
+       unsigned long flags;
+
+       /* Poor man's cmpxchg for 386. Unsuitable for SMP */
+       local_irq_save(flags);
+       prev = *(u16 *)ptr;
+       if (prev == old)
+               *(u16 *)ptr = new;
+       local_irq_restore(flags);
+       return prev;
+}
+EXPORT_SYMBOL(cmpxchg_386_u16);
+
+unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
+{
+       u32 prev;
+       unsigned long flags;
+
+       /* Poor man's cmpxchg for 386. Unsuitable for SMP */
+       local_irq_save(flags);
+       prev = *(u32 *)ptr;
+       if (prev == old)
+               *(u32 *)ptr = new;
+       local_irq_restore(flags);
+       return prev;
+}
+EXPORT_SYMBOL(cmpxchg_386_u32);
+#endif
+
+// arch_initcall(intel_cpu_init);
+
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
new file mode 100644 (file)
index 0000000..db6c25a
--- /dev/null
@@ -0,0 +1,806 @@
+/*
+ *      Routines to indentify caches on Intel CPU.
+ *
+ *      Changes:
+ *      Venkatesh Pallipadi    : Adding cache identification through cpuid(4)
+ *             Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
+ *     Andi Kleen / Andreas Herrmann   : CPUID4 emulation on AMD.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+#include <asm/smp.h>
+
+#define LVL_1_INST     1
+#define LVL_1_DATA     2
+#define LVL_2          3
+#define LVL_3          4
+#define LVL_TRACE      5
+
+struct _cache_table
+{
+       unsigned char descriptor;
+       char cache_type;
+       short size;
+};
+
+/* all the cache descriptor types we care about (no TLB or trace cache entries) */
+static struct _cache_table cache_table[] __cpuinitdata =
+{
+       { 0x06, LVL_1_INST, 8 },        /* 4-way set assoc, 32 byte line size */
+       { 0x08, LVL_1_INST, 16 },       /* 4-way set assoc, 32 byte line size */
+       { 0x0a, LVL_1_DATA, 8 },        /* 2 way set assoc, 32 byte line size */
+       { 0x0c, LVL_1_DATA, 16 },       /* 4-way set assoc, 32 byte line size */
+       { 0x22, LVL_3,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x23, LVL_3,      1024 },     /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x25, LVL_3,      2048 },     /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x29, LVL_3,      4096 },     /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x2c, LVL_1_DATA, 32 },       /* 8-way set assoc, 64 byte line size */
+       { 0x30, LVL_1_INST, 32 },       /* 8-way set assoc, 64 byte line size */
+       { 0x39, LVL_2,      128 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x3a, LVL_2,      192 },      /* 6-way set assoc, sectored cache, 64 byte line size */
+       { 0x3b, LVL_2,      128 },      /* 2-way set assoc, sectored cache, 64 byte line size */
+       { 0x3c, LVL_2,      256 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x3d, LVL_2,      384 },      /* 6-way set assoc, sectored cache, 64 byte line size */
+       { 0x3e, LVL_2,      512 },      /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x41, LVL_2,      128 },      /* 4-way set assoc, 32 byte line size */
+       { 0x42, LVL_2,      256 },      /* 4-way set assoc, 32 byte line size */
+       { 0x43, LVL_2,      512 },      /* 4-way set assoc, 32 byte line size */
+       { 0x44, LVL_2,      1024 },     /* 4-way set assoc, 32 byte line size */
+       { 0x45, LVL_2,      2048 },     /* 4-way set assoc, 32 byte line size */
+       { 0x46, LVL_3,      4096 },     /* 4-way set assoc, 64 byte line size */
+       { 0x47, LVL_3,      8192 },     /* 8-way set assoc, 64 byte line size */
+       { 0x49, LVL_3,      4096 },     /* 16-way set assoc, 64 byte line size */
+       { 0x4a, LVL_3,      6144 },     /* 12-way set assoc, 64 byte line size */
+       { 0x4b, LVL_3,      8192 },     /* 16-way set assoc, 64 byte line size */
+       { 0x4c, LVL_3,     12288 },     /* 12-way set assoc, 64 byte line size */
+       { 0x4d, LVL_3,     16384 },     /* 16-way set assoc, 64 byte line size */
+       { 0x60, LVL_1_DATA, 16 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x66, LVL_1_DATA, 8 },        /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x67, LVL_1_DATA, 16 },       /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x68, LVL_1_DATA, 32 },       /* 4-way set assoc, sectored cache, 64 byte line size */
+       { 0x70, LVL_TRACE,  12 },       /* 8-way set assoc */
+       { 0x71, LVL_TRACE,  16 },       /* 8-way set assoc */
+       { 0x72, LVL_TRACE,  32 },       /* 8-way set assoc */
+       { 0x73, LVL_TRACE,  64 },       /* 8-way set assoc */
+       { 0x78, LVL_2,    1024 },       /* 4-way set assoc, 64 byte line size */
+       { 0x79, LVL_2,     128 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7a, LVL_2,     256 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7b, LVL_2,     512 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7c, LVL_2,    1024 },       /* 8-way set assoc, sectored cache, 64 byte line size */
+       { 0x7d, LVL_2,    2048 },       /* 8-way set assoc, 64 byte line size */
+       { 0x7f, LVL_2,     512 },       /* 2-way set assoc, 64 byte line size */
+       { 0x82, LVL_2,     256 },       /* 8-way set assoc, 32 byte line size */
+       { 0x83, LVL_2,     512 },       /* 8-way set assoc, 32 byte line size */
+       { 0x84, LVL_2,    1024 },       /* 8-way set assoc, 32 byte line size */
+       { 0x85, LVL_2,    2048 },       /* 8-way set assoc, 32 byte line size */
+       { 0x86, LVL_2,     512 },       /* 4-way set assoc, 64 byte line size */
+       { 0x87, LVL_2,    1024 },       /* 8-way set assoc, 64 byte line size */
+       { 0x00, 0, 0}
+};
+
+
+enum _cache_type
+{
+       CACHE_TYPE_NULL = 0,
+       CACHE_TYPE_DATA = 1,
+       CACHE_TYPE_INST = 2,
+       CACHE_TYPE_UNIFIED = 3
+};
+
+union _cpuid4_leaf_eax {
+       struct {
+               enum _cache_type        type:5;
+               unsigned int            level:3;
+               unsigned int            is_self_initializing:1;
+               unsigned int            is_fully_associative:1;
+               unsigned int            reserved:4;
+               unsigned int            num_threads_sharing:12;
+               unsigned int            num_cores_on_die:6;
+       } split;
+       u32 full;
+};
+
+union _cpuid4_leaf_ebx {
+       struct {
+               unsigned int            coherency_line_size:12;
+               unsigned int            physical_line_partition:10;
+               unsigned int            ways_of_associativity:10;
+       } split;
+       u32 full;
+};
+
+union _cpuid4_leaf_ecx {
+       struct {
+               unsigned int            number_of_sets:32;
+       } split;
+       u32 full;
+};
+
+struct _cpuid4_info {
+       union _cpuid4_leaf_eax eax;
+       union _cpuid4_leaf_ebx ebx;
+       union _cpuid4_leaf_ecx ecx;
+       unsigned long size;
+       cpumask_t shared_cpu_map;
+};
+
+unsigned short                 num_cache_leaves;
+
+/* AMD doesn't have CPUID4. Emulate it here to report the same
+   information to the user.  This makes some assumptions about the machine:
+   L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+
+   In theory the TLBs could be reported as fake type (they are in "dummy").
+   Maybe later */
+union l1_cache {
+       struct {
+               unsigned line_size : 8;
+               unsigned lines_per_tag : 8;
+               unsigned assoc : 8;
+               unsigned size_in_kb : 8;
+       };
+       unsigned val;
+};
+
+union l2_cache {
+       struct {
+               unsigned line_size : 8;
+               unsigned lines_per_tag : 4;
+               unsigned assoc : 4;
+               unsigned size_in_kb : 16;
+       };
+       unsigned val;
+};
+
+union l3_cache {
+       struct {
+               unsigned line_size : 8;
+               unsigned lines_per_tag : 4;
+               unsigned assoc : 4;
+               unsigned res : 2;
+               unsigned size_encoded : 14;
+       };
+       unsigned val;
+};
+
+static const unsigned short assocs[] = {
+       [1] = 1, [2] = 2, [4] = 4, [6] = 8,
+       [8] = 16, [0xa] = 32, [0xb] = 48,
+       [0xc] = 64,
+       [0xf] = 0xffff // ??
+};
+
+static const unsigned char levels[] = { 1, 1, 2, 3 };
+static const unsigned char types[] = { 1, 2, 3, 3 };
+
+static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
+                      union _cpuid4_leaf_ebx *ebx,
+                      union _cpuid4_leaf_ecx *ecx)
+{
+       unsigned dummy;
+       unsigned line_size, lines_per_tag, assoc, size_in_kb;
+       union l1_cache l1i, l1d;
+       union l2_cache l2;
+       union l3_cache l3;
+       union l1_cache *l1 = &l1d;
+
+       eax->full = 0;
+       ebx->full = 0;
+       ecx->full = 0;
+
+       cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
+       cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
+
+       switch (leaf) {
+       case 1:
+               l1 = &l1i;
+       case 0:
+               if (!l1->val)
+                       return;
+               assoc = l1->assoc;
+               line_size = l1->line_size;
+               lines_per_tag = l1->lines_per_tag;
+               size_in_kb = l1->size_in_kb;
+               break;
+       case 2:
+               if (!l2.val)
+                       return;
+               assoc = l2.assoc;
+               line_size = l2.line_size;
+               lines_per_tag = l2.lines_per_tag;
+               /* cpu_data has errata corrections for K7 applied */
+               size_in_kb = current_cpu_data.x86_cache_size;
+               break;
+       case 3:
+               if (!l3.val)
+                       return;
+               assoc = l3.assoc;
+               line_size = l3.line_size;
+               lines_per_tag = l3.lines_per_tag;
+               size_in_kb = l3.size_encoded * 512;
+               break;
+       default:
+               return;
+       }
+
+       eax->split.is_self_initializing = 1;
+       eax->split.type = types[leaf];
+       eax->split.level = levels[leaf];
+       if (leaf == 3)
+               eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
+       else
+               eax->split.num_threads_sharing = 0;
+       eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
+
+
+       if (assoc == 0xf)
+               eax->split.is_fully_associative = 1;
+       ebx->split.coherency_line_size = line_size - 1;
+       ebx->split.ways_of_associativity = assocs[assoc] - 1;
+       ebx->split.physical_line_partition = lines_per_tag - 1;
+       ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
+               (ebx->split.ways_of_associativity + 1) - 1;
+}
+
+static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+{
+       union _cpuid4_leaf_eax  eax;
+       union _cpuid4_leaf_ebx  ebx;
+       union _cpuid4_leaf_ecx  ecx;
+       unsigned                edx;
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               amd_cpuid4(index, &eax, &ebx, &ecx);
+       else
+               cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full,  &edx);
+       if (eax.split.type == CACHE_TYPE_NULL)
+               return -EIO; /* better error ? */
+
+       this_leaf->eax = eax;
+       this_leaf->ebx = ebx;
+       this_leaf->ecx = ecx;
+       this_leaf->size = (ecx.split.number_of_sets + 1) *
+               (ebx.split.coherency_line_size + 1) *
+               (ebx.split.physical_line_partition + 1) *
+               (ebx.split.ways_of_associativity + 1);
+       return 0;
+}
+
+static int __cpuinit find_num_cache_leaves(void)
+{
+       unsigned int            eax, ebx, ecx, edx;
+       union _cpuid4_leaf_eax  cache_eax;
+       int                     i = -1;
+
+       do {
+               ++i;
+               /* Do cpuid(4) loop to find out num_cache_leaves */
+               cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
+               cache_eax.full = eax;
+       } while (cache_eax.split.type != CACHE_TYPE_NULL);
+       return i;
+}
+
+unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
+{
+       unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
+       unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
+       unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
+       unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
+#ifdef CONFIG_X86_HT
+       unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
+#endif
+
+       if (c->cpuid_level > 3) {
+               static int is_initialized;
+
+               if (is_initialized == 0) {
+                       /* Init num_cache_leaves from boot CPU */
+                       num_cache_leaves = find_num_cache_leaves();
+                       is_initialized++;
+               }
+
+               /*
+                * Whenever possible use cpuid(4), deterministic cache
+                * parameters cpuid leaf to find the cache details
+                */
+               for (i = 0; i < num_cache_leaves; i++) {
+                       struct _cpuid4_info this_leaf;
+
+                       int retval;
+
+                       retval = cpuid4_cache_lookup(i, &this_leaf);
+                       if (retval >= 0) {
+                               switch(this_leaf.eax.split.level) {
+                                   case 1:
+                                       if (this_leaf.eax.split.type ==
+                                                       CACHE_TYPE_DATA)
+                                               new_l1d = this_leaf.size/1024;
+                                       else if (this_leaf.eax.split.type ==
+                                                       CACHE_TYPE_INST)
+                                               new_l1i = this_leaf.size/1024;
+                                       break;
+                                   case 2:
+                                       new_l2 = this_leaf.size/1024;
+                                       num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
+                                       index_msb = get_count_order(num_threads_sharing);
+                                       l2_id = c->apicid >> index_msb;
+                                       break;
+                                   case 3:
+                                       new_l3 = this_leaf.size/1024;
+                                       num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
+                                       index_msb = get_count_order(num_threads_sharing);
+                                       l3_id = c->apicid >> index_msb;
+                                       break;
+                                   default:
+                                       break;
+                               }
+                       }
+               }
+       }
+       /*
+        * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
+        * trace cache
+        */
+       if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
+               /* supports eax=2  call */
+               int i, j, n;
+               int regs[4];
+               unsigned char *dp = (unsigned char *)regs;
+               int only_trace = 0;
+
+               if (num_cache_leaves != 0 && c->x86 == 15)
+                       only_trace = 1;
+
+               /* Number of times to iterate */
+               n = cpuid_eax(2) & 0xFF;
+
+               for ( i = 0 ; i < n ; i++ ) {
+                       cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
+
+                       /* If bit 31 is set, this is an unknown format */
+                       for ( j = 0 ; j < 3 ; j++ ) {
+                               if ( regs[j] < 0 ) regs[j] = 0;
+                       }
+
+                       /* Byte 0 is level count, not a descriptor */
+                       for ( j = 1 ; j < 16 ; j++ ) {
+                               unsigned char des = dp[j];
+                               unsigned char k = 0;
+
+                               /* look up this descriptor in the table */
+                               while (cache_table[k].descriptor != 0)
+                               {
+                                       if (cache_table[k].descriptor == des) {
+                                               if (only_trace && cache_table[k].cache_type != LVL_TRACE)
+                                                       break;
+                                               switch (cache_table[k].cache_type) {
+                                               case LVL_1_INST:
+                                                       l1i += cache_table[k].size;
+                                                       break;
+                                               case LVL_1_DATA:
+                                                       l1d += cache_table[k].size;
+                                                       break;
+                                               case LVL_2:
+                                                       l2 += cache_table[k].size;
+                                                       break;
+                                               case LVL_3:
+                                                       l3 += cache_table[k].size;
+                                                       break;
+                                               case LVL_TRACE:
+                                                       trace += cache_table[k].size;
+                                                       break;
+                                               }
+
+                                               break;
+                                       }
+
+                                       k++;
+                               }
+                       }
+               }
+       }
+
+       if (new_l1d)
+               l1d = new_l1d;
+
+       if (new_l1i)
+               l1i = new_l1i;
+
+       if (new_l2) {
+               l2 = new_l2;
+#ifdef CONFIG_X86_HT
+               cpu_llc_id[cpu] = l2_id;
+#endif
+       }
+
+       if (new_l3) {
+               l3 = new_l3;
+#ifdef CONFIG_X86_HT
+               cpu_llc_id[cpu] = l3_id;
+#endif
+       }
+
+       if (trace)
+               printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
+       else if ( l1i )
+               printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
+
+       if (l1d)
+               printk(", L1 D cache: %dK\n", l1d);
+       else
+               printk("\n");
+
+       if (l2)
+               printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
+
+       if (l3)
+               printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+
+       c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
+
+       return l2;
+}
+
+/* pointer to _cpuid4_info array (for each cache leaf) */
+static struct _cpuid4_info *cpuid4_info[NR_CPUS];
+#define CPUID4_INFO_IDX(x,y)    (&((cpuid4_info[x])[y]))
+
+#ifdef CONFIG_SMP
+static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+{
+       struct _cpuid4_info     *this_leaf, *sibling_leaf;
+       unsigned long num_threads_sharing;
+       int index_msb, i;
+       struct cpuinfo_x86 *c = cpu_data;
+
+       this_leaf = CPUID4_INFO_IDX(cpu, index);
+       num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
+
+       if (num_threads_sharing == 1)
+               cpu_set(cpu, this_leaf->shared_cpu_map);
+       else {
+               index_msb = get_count_order(num_threads_sharing);
+
+               for_each_online_cpu(i) {
+                       if (c[i].apicid >> index_msb ==
+                           c[cpu].apicid >> index_msb) {
+                               cpu_set(i, this_leaf->shared_cpu_map);
+                               if (i != cpu && cpuid4_info[i])  {
+                                       sibling_leaf = CPUID4_INFO_IDX(i, index);
+                                       cpu_set(cpu, sibling_leaf->shared_cpu_map);
+                               }
+                       }
+               }
+       }
+}
+static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
+{
+       struct _cpuid4_info     *this_leaf, *sibling_leaf;
+       int sibling;
+
+       this_leaf = CPUID4_INFO_IDX(cpu, index);
+       for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
+               sibling_leaf = CPUID4_INFO_IDX(sibling, index); 
+               cpu_clear(cpu, sibling_leaf->shared_cpu_map);
+       }
+}
+#else
+static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
+static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
+#endif
+
+static void free_cache_attributes(unsigned int cpu)
+{
+       kfree(cpuid4_info[cpu]);
+       cpuid4_info[cpu] = NULL;
+}
+
+static int __cpuinit detect_cache_attributes(unsigned int cpu)
+{
+       struct _cpuid4_info     *this_leaf;
+       unsigned long           j;
+       int                     retval;
+       cpumask_t               oldmask;
+
+       if (num_cache_leaves == 0)
+               return -ENOENT;
+
+       cpuid4_info[cpu] = kzalloc(
+           sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
+       if (cpuid4_info[cpu] == NULL)
+               return -ENOMEM;
+
+       oldmask = current->cpus_allowed;
+       retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       if (retval)
+               goto out;
+
+       /* Do cpuid and store the results */
+       retval = 0;
+       for (j = 0; j < num_cache_leaves; j++) {
+               this_leaf = CPUID4_INFO_IDX(cpu, j);
+               retval = cpuid4_cache_lookup(j, this_leaf);
+               if (unlikely(retval < 0))
+                       break;
+               cache_shared_cpu_map_setup(cpu, j);
+       }
+       set_cpus_allowed(current, oldmask);
+
+out:
+       if (retval)
+               free_cache_attributes(cpu);
+       return retval;
+}
+
+#ifdef CONFIG_SYSFS
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
+
+/* pointer to kobject for cpuX/cache */
+static struct kobject * cache_kobject[NR_CPUS];
+
+struct _index_kobject {
+       struct kobject kobj;
+       unsigned int cpu;
+       unsigned short index;
+};
+
+/* pointer to array of kobjects for cpuX/cache/indexY */
+static struct _index_kobject *index_kobject[NR_CPUS];
+#define INDEX_KOBJECT_PTR(x,y)    (&((index_kobject[x])[y]))
+
+#define show_one_plus(file_name, object, val)                          \
+static ssize_t show_##file_name                                                \
+                       (struct _cpuid4_info *this_leaf, char *buf)     \
+{                                                                      \
+       return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
+}
+
+show_one_plus(level, eax.split.level, 0);
+show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
+show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
+show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
+show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
+
+static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
+{
+       return sprintf (buf, "%luK\n", this_leaf->size / 1024);
+}
+
+static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
+{
+       char mask_str[NR_CPUS];
+       cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
+       return sprintf(buf, "%s\n", mask_str);
+}
+
+static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
+       switch(this_leaf->eax.split.type) {
+           case CACHE_TYPE_DATA:
+               return sprintf(buf, "Data\n");
+               break;
+           case CACHE_TYPE_INST:
+               return sprintf(buf, "Instruction\n");
+               break;
+           case CACHE_TYPE_UNIFIED:
+               return sprintf(buf, "Unified\n");
+               break;
+           default:
+               return sprintf(buf, "Unknown\n");
+               break;
+       }
+}
+
+struct _cache_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct _cpuid4_info *, char *);
+       ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
+};
+
+#define define_one_ro(_name) \
+static struct _cache_attr _name = \
+       __ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(level);
+define_one_ro(type);
+define_one_ro(coherency_line_size);
+define_one_ro(physical_line_partition);
+define_one_ro(ways_of_associativity);
+define_one_ro(number_of_sets);
+define_one_ro(size);
+define_one_ro(shared_cpu_map);
+
+static struct attribute * default_attrs[] = {
+       &type.attr,
+       &level.attr,
+       &coherency_line_size.attr,
+       &physical_line_partition.attr,
+       &ways_of_associativity.attr,
+       &number_of_sets.attr,
+       &size.attr,
+       &shared_cpu_map.attr,
+       NULL
+};
+
+#define to_object(k) container_of(k, struct _index_kobject, kobj)
+#define to_attr(a) container_of(a, struct _cache_attr, attr)
+
+static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
+{
+       struct _cache_attr *fattr = to_attr(attr);
+       struct _index_kobject *this_leaf = to_object(kobj);
+       ssize_t ret;
+
+       ret = fattr->show ?
+               fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
+                       buf) :
+               0;
+       return ret;
+}
+
+static ssize_t store(struct kobject * kobj, struct attribute * attr,
+                    const char * buf, size_t count)
+{
+       return 0;
+}
+
+static struct sysfs_ops sysfs_ops = {
+       .show   = show,
+       .store  = store,
+};
+
+static struct kobj_type ktype_cache = {
+       .sysfs_ops      = &sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+static struct kobj_type ktype_percpu_entry = {
+       .sysfs_ops      = &sysfs_ops,
+};
+
+static void cpuid4_cache_sysfs_exit(unsigned int cpu)
+{
+       kfree(cache_kobject[cpu]);
+       kfree(index_kobject[cpu]);
+       cache_kobject[cpu] = NULL;
+       index_kobject[cpu] = NULL;
+       free_cache_attributes(cpu);
+}
+
+static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
+{
+
+       if (num_cache_leaves == 0)
+               return -ENOENT;
+
+       detect_cache_attributes(cpu);
+       if (cpuid4_info[cpu] == NULL)
+               return -ENOENT;
+
+       /* Allocate all required memory */
+       cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+       if (unlikely(cache_kobject[cpu] == NULL))
+               goto err_out;
+
+       index_kobject[cpu] = kzalloc(
+           sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
+       if (unlikely(index_kobject[cpu] == NULL))
+               goto err_out;
+
+       return 0;
+
+err_out:
+       cpuid4_cache_sysfs_exit(cpu);
+       return -ENOMEM;
+}
+
+/* Add/Remove cache interface for CPU device */
+static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+{
+       unsigned int cpu = sys_dev->id;
+       unsigned long i, j;
+       struct _index_kobject *this_object;
+       int retval = 0;
+
+       retval = cpuid4_cache_sysfs_init(cpu);
+       if (unlikely(retval < 0))
+               return retval;
+
+       cache_kobject[cpu]->parent = &sys_dev->kobj;
+       kobject_set_name(cache_kobject[cpu], "%s", "cache");
+       cache_kobject[cpu]->ktype = &ktype_percpu_entry;
+       retval = kobject_register(cache_kobject[cpu]);
+
+       for (i = 0; i < num_cache_leaves; i++) {
+               this_object = INDEX_KOBJECT_PTR(cpu,i);
+               this_object->cpu = cpu;
+               this_object->index = i;
+               this_object->kobj.parent = cache_kobject[cpu];
+               kobject_set_name(&(this_object->kobj), "index%1lu", i);
+               this_object->kobj.ktype = &ktype_cache;
+               retval = kobject_register(&(this_object->kobj));
+               if (unlikely(retval)) {
+                       for (j = 0; j < i; j++) {
+                               kobject_unregister(
+                                       &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
+                       }
+                       kobject_unregister(cache_kobject[cpu]);
+                       cpuid4_cache_sysfs_exit(cpu);
+                       break;
+               }
+       }
+       return retval;
+}
+
+static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
+{
+       unsigned int cpu = sys_dev->id;
+       unsigned long i;
+
+       if (cpuid4_info[cpu] == NULL)
+               return;
+       for (i = 0; i < num_cache_leaves; i++) {
+               cache_remove_shared_cpu_map(cpu, i);
+               kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
+       }
+       kobject_unregister(cache_kobject[cpu]);
+       cpuid4_cache_sysfs_exit(cpu);
+       return;
+}
+
+static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+       struct sys_device *sys_dev;
+
+       sys_dev = get_cpu_sysdev(cpu);
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               cache_add_dev(sys_dev);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               cache_remove_dev(sys_dev);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
+{
+    .notifier_call = cacheinfo_cpu_callback,
+};
+
+static int __cpuinit cache_sysfs_init(void)
+{
+       int i;
+
+       if (num_cache_leaves == 0)
+               return 0;
+
+       register_hotcpu_notifier(&cacheinfo_cpu_notifier);
+
+       for_each_online_cpu(i) {
+               cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
+                       (void *)(long)i);
+       }
+
+       return 0;
+}
+
+device_initcall(cache_sysfs_init);
+
+#endif
diff --git a/arch/x86/kernel/cpu/nexgen.c b/arch/x86/kernel/cpu/nexgen.c
new file mode 100644 (file)
index 0000000..961fbe1
--- /dev/null
@@ -0,0 +1,60 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/processor.h>
+
+#include "cpu.h"
+
+/*
+ *     Detect a NexGen CPU running without BIOS hypercode new enough
+ *     to have CPUID. (Thanks to Herbert Oppmann)
+ */
+static int __cpuinit deep_magic_nexgen_probe(void)
+{
+       int ret;
+       
+       __asm__ __volatile__ (
+               "       movw    $0x5555, %%ax\n"
+               "       xorw    %%dx,%%dx\n"
+               "       movw    $2, %%cx\n"
+               "       divw    %%cx\n"
+               "       movl    $0, %%eax\n"
+               "       jnz     1f\n"
+               "       movl    $1, %%eax\n"
+               "1:\n" 
+               : "=a" (ret) : : "cx", "dx" );
+       return  ret;
+}
+
+static void __cpuinit init_nexgen(struct cpuinfo_x86 * c)
+{
+       c->x86_cache_size = 256; /* A few had 1 MB... */
+}
+
+static void __cpuinit nexgen_identify(struct cpuinfo_x86 * c)
+{
+       /* Detect NexGen with old hypercode */
+       if ( deep_magic_nexgen_probe() ) {
+               strcpy(c->x86_vendor_id, "NexGenDriven");
+       }
+}
+
+static struct cpu_dev nexgen_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Nexgen",
+       .c_ident        = { "NexGenDriven" },
+       .c_models = {
+                       { .vendor = X86_VENDOR_NEXGEN,
+                         .family = 5,
+                         .model_names = { [1] = "Nx586" }
+                       },
+       },
+       .c_init         = init_nexgen,
+       .c_identify     = nexgen_identify,
+};
+
+int __init nexgen_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
+       return 0;
+}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
new file mode 100644 (file)
index 0000000..93fecd4
--- /dev/null
@@ -0,0 +1,713 @@
+/* local apic based NMI watchdog for various CPUs.
+   This file also handles reservation of performance counters for coordination
+   with other users (like oprofile).
+
+   Note that these events normally don't tick when the CPU idles. This means
+   the frequency varies with CPU load.
+
+   Original code for K7/P6 written by Keith Owens */
+
+#include <linux/percpu.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <asm/apic.h>
+#include <asm/intel_arch_perfmon.h>
+
+struct nmi_watchdog_ctlblk {
+       unsigned int cccr_msr;
+       unsigned int perfctr_msr;  /* the MSR to reset in NMI handler */
+       unsigned int evntsel_msr;  /* the MSR to select the events to handle */
+};
+
+/* Interface defining a CPU specific perfctr watchdog */
+struct wd_ops {
+       int (*reserve)(void);
+       void (*unreserve)(void);
+       int (*setup)(unsigned nmi_hz);
+       void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
+       void (*stop)(void);
+       unsigned perfctr;
+       unsigned evntsel;
+       u64 checkbit;
+};
+
+static struct wd_ops *wd_ops;
+
+/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
+ * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
+ */
+#define NMI_MAX_COUNTER_BITS 66
+
+/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
+ * evtsel_nmi_owner tracks the ownership of the event selection
+ * - different performance counters/ event selection may be reserved for
+ *   different subsystems this reservation system just tries to coordinate
+ *   things a little
+ */
+static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
+static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
+
+static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
+
+/* converts an msr to an appropriate reservation bit */
+static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
+{
+       /* returns the bit offset of the performance counter register */
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               return (msr - MSR_K7_PERFCTR0);
+       case X86_VENDOR_INTEL:
+               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+                       return (msr - MSR_ARCH_PERFMON_PERFCTR0);
+
+               switch (boot_cpu_data.x86) {
+               case 6:
+                       return (msr - MSR_P6_PERFCTR0);
+               case 15:
+                       return (msr - MSR_P4_BPU_PERFCTR0);
+               }
+       }
+       return 0;
+}
+
+/* converts an msr to an appropriate reservation bit */
+/* returns the bit offset of the event selection register */
+static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
+{
+       /* returns the bit offset of the event selection register */
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               return (msr - MSR_K7_EVNTSEL0);
+       case X86_VENDOR_INTEL:
+               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+                       return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
+
+               switch (boot_cpu_data.x86) {
+               case 6:
+                       return (msr - MSR_P6_EVNTSEL0);
+               case 15:
+                       return (msr - MSR_P4_BSU_ESCR0);
+               }
+       }
+       return 0;
+
+}
+
+/* checks for a bit availability (hack for oprofile) */
+int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
+{
+       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+
+       return (!test_bit(counter, perfctr_nmi_owner));
+}
+
+/* checks the an msr for availability */
+int avail_to_resrv_perfctr_nmi(unsigned int msr)
+{
+       unsigned int counter;
+
+       counter = nmi_perfctr_msr_to_bit(msr);
+       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+
+       return (!test_bit(counter, perfctr_nmi_owner));
+}
+
+int reserve_perfctr_nmi(unsigned int msr)
+{
+       unsigned int counter;
+
+       counter = nmi_perfctr_msr_to_bit(msr);
+       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+
+       if (!test_and_set_bit(counter, perfctr_nmi_owner))
+               return 1;
+       return 0;
+}
+
+void release_perfctr_nmi(unsigned int msr)
+{
+       unsigned int counter;
+
+       counter = nmi_perfctr_msr_to_bit(msr);
+       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+
+       clear_bit(counter, perfctr_nmi_owner);
+}
+
+int reserve_evntsel_nmi(unsigned int msr)
+{
+       unsigned int counter;
+
+       counter = nmi_evntsel_msr_to_bit(msr);
+       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+
+       if (!test_and_set_bit(counter, evntsel_nmi_owner))
+               return 1;
+       return 0;
+}
+
+void release_evntsel_nmi(unsigned int msr)
+{
+       unsigned int counter;
+
+       counter = nmi_evntsel_msr_to_bit(msr);
+       BUG_ON(counter > NMI_MAX_COUNTER_BITS);
+
+       clear_bit(counter, evntsel_nmi_owner);
+}
+
+EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
+EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
+EXPORT_SYMBOL(reserve_perfctr_nmi);
+EXPORT_SYMBOL(release_perfctr_nmi);
+EXPORT_SYMBOL(reserve_evntsel_nmi);
+EXPORT_SYMBOL(release_evntsel_nmi);
+
+void disable_lapic_nmi_watchdog(void)
+{
+       BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
+
+       if (atomic_read(&nmi_active) <= 0)
+               return;
+
+       on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
+       wd_ops->unreserve();
+
+       BUG_ON(atomic_read(&nmi_active) != 0);
+}
+
+void enable_lapic_nmi_watchdog(void)
+{
+       BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
+
+       /* are we already enabled */
+       if (atomic_read(&nmi_active) != 0)
+               return;
+
+       /* are we lapic aware */
+       if (!wd_ops)
+               return;
+       if (!wd_ops->reserve()) {
+               printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");
+               return;
+       }
+
+       on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
+       touch_nmi_watchdog();
+}
+
+/*
+ * Activate the NMI watchdog via the local APIC.
+ */
+
+static unsigned int adjust_for_32bit_ctr(unsigned int hz)
+{
+       u64 counter_val;
+       unsigned int retval = hz;
+
+       /*
+        * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
+        * are writable, with higher bits sign extending from bit 31.
+        * So, we can only program the counter with 31 bit values and
+        * 32nd bit should be 1, for 33.. to be 1.
+        * Find the appropriate nmi_hz
+        */
+       counter_val = (u64)cpu_khz * 1000;
+       do_div(counter_val, retval);
+       if (counter_val > 0x7fffffffULL) {
+               u64 count = (u64)cpu_khz * 1000;
+               do_div(count, 0x7fffffffUL);
+               retval = count + 1;
+       }
+       return retval;
+}
+
+static void
+write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz)
+{
+       u64 count = (u64)cpu_khz * 1000;
+
+       do_div(count, nmi_hz);
+       if(descr)
+               Dprintk("setting %s to -0x%08Lx\n", descr, count);
+       wrmsrl(perfctr_msr, 0 - count);
+}
+
+static void write_watchdog_counter32(unsigned int perfctr_msr,
+               const char *descr, unsigned nmi_hz)
+{
+       u64 count = (u64)cpu_khz * 1000;
+
+       do_div(count, nmi_hz);
+       if(descr)
+               Dprintk("setting %s to -0x%08Lx\n", descr, count);
+       wrmsr(perfctr_msr, (u32)(-count), 0);
+}
+
+/* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
+   nicely stable so there is not much variety */
+
+#define K7_EVNTSEL_ENABLE      (1 << 22)
+#define K7_EVNTSEL_INT         (1 << 20)
+#define K7_EVNTSEL_OS          (1 << 17)
+#define K7_EVNTSEL_USR         (1 << 16)
+#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING   0x76
+#define K7_NMI_EVENT           K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
+
+static int setup_k7_watchdog(unsigned nmi_hz)
+{
+       unsigned int perfctr_msr, evntsel_msr;
+       unsigned int evntsel;
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+
+       perfctr_msr = wd_ops->perfctr;
+       evntsel_msr = wd_ops->evntsel;
+
+       wrmsrl(perfctr_msr, 0UL);
+
+       evntsel = K7_EVNTSEL_INT
+               | K7_EVNTSEL_OS
+               | K7_EVNTSEL_USR
+               | K7_NMI_EVENT;
+
+       /* setup the timer */
+       wrmsr(evntsel_msr, evntsel, 0);
+       write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       evntsel |= K7_EVNTSEL_ENABLE;
+       wrmsr(evntsel_msr, evntsel, 0);
+
+       wd->perfctr_msr = perfctr_msr;
+       wd->evntsel_msr = evntsel_msr;
+       wd->cccr_msr = 0;  //unused
+       return 1;
+}
+
+static void single_msr_stop_watchdog(void)
+{
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+
+       wrmsr(wd->evntsel_msr, 0, 0);
+}
+
+static int single_msr_reserve(void)
+{
+       if (!reserve_perfctr_nmi(wd_ops->perfctr))
+               return 0;
+
+       if (!reserve_evntsel_nmi(wd_ops->evntsel)) {
+               release_perfctr_nmi(wd_ops->perfctr);
+               return 0;
+       }
+       return 1;
+}
+
+static void single_msr_unreserve(void)
+{
+       release_evntsel_nmi(wd_ops->evntsel);
+       release_perfctr_nmi(wd_ops->perfctr);
+}
+
+static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
+{
+       /* start the cycle over again */
+       write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
+}
+
+static struct wd_ops k7_wd_ops = {
+       .reserve = single_msr_reserve,
+       .unreserve = single_msr_unreserve,
+       .setup = setup_k7_watchdog,
+       .rearm = single_msr_rearm,
+       .stop = single_msr_stop_watchdog,
+       .perfctr = MSR_K7_PERFCTR0,
+       .evntsel = MSR_K7_EVNTSEL0,
+       .checkbit = 1ULL<<47,
+};
+
+/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
+
+#define P6_EVNTSEL0_ENABLE     (1 << 22)
+#define P6_EVNTSEL_INT         (1 << 20)
+#define P6_EVNTSEL_OS          (1 << 17)
+#define P6_EVNTSEL_USR         (1 << 16)
+#define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
+#define P6_NMI_EVENT           P6_EVENT_CPU_CLOCKS_NOT_HALTED
+
+static int setup_p6_watchdog(unsigned nmi_hz)
+{
+       unsigned int perfctr_msr, evntsel_msr;
+       unsigned int evntsel;
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+
+       perfctr_msr = wd_ops->perfctr;
+       evntsel_msr = wd_ops->evntsel;
+
+       /* KVM doesn't implement this MSR */
+       if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
+               return 0;
+
+       evntsel = P6_EVNTSEL_INT
+               | P6_EVNTSEL_OS
+               | P6_EVNTSEL_USR
+               | P6_NMI_EVENT;
+
+       /* setup the timer */
+       wrmsr(evntsel_msr, evntsel, 0);
+       nmi_hz = adjust_for_32bit_ctr(nmi_hz);
+       write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       evntsel |= P6_EVNTSEL0_ENABLE;
+       wrmsr(evntsel_msr, evntsel, 0);
+
+       wd->perfctr_msr = perfctr_msr;
+       wd->evntsel_msr = evntsel_msr;
+       wd->cccr_msr = 0;  //unused
+       return 1;
+}
+
+static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
+{
+       /* P6 based Pentium M need to re-unmask
+        * the apic vector but it doesn't hurt
+        * other P6 variant.
+        * ArchPerfom/Core Duo also needs this */
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       /* P6/ARCH_PERFMON has 32 bit counter write */
+       write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
+}
+
+static struct wd_ops p6_wd_ops = {
+       .reserve = single_msr_reserve,
+       .unreserve = single_msr_unreserve,
+       .setup = setup_p6_watchdog,
+       .rearm = p6_rearm,
+       .stop = single_msr_stop_watchdog,
+       .perfctr = MSR_P6_PERFCTR0,
+       .evntsel = MSR_P6_EVNTSEL0,
+       .checkbit = 1ULL<<39,
+};
+
+/* Intel P4 performance counters. By far the most complicated of all. */
+
+#define MSR_P4_MISC_ENABLE_PERF_AVAIL  (1<<7)
+#define P4_ESCR_EVENT_SELECT(N)        ((N)<<25)
+#define P4_ESCR_OS             (1<<3)
+#define P4_ESCR_USR            (1<<2)
+#define P4_CCCR_OVF_PMI0       (1<<26)
+#define P4_CCCR_OVF_PMI1       (1<<27)
+#define P4_CCCR_THRESHOLD(N)   ((N)<<20)
+#define P4_CCCR_COMPLEMENT     (1<<19)
+#define P4_CCCR_COMPARE                (1<<18)
+#define P4_CCCR_REQUIRED       (3<<16)
+#define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
+#define P4_CCCR_ENABLE         (1<<12)
+#define P4_CCCR_OVF            (1<<31)
+
+/* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
+   CRU_ESCR0 (with any non-null event selector) through a complemented
+   max threshold. [IA32-Vol3, Section 14.9.9] */
+
+static int setup_p4_watchdog(unsigned nmi_hz)
+{
+       unsigned int perfctr_msr, evntsel_msr, cccr_msr;
+       unsigned int evntsel, cccr_val;
+       unsigned int misc_enable, dummy;
+       unsigned int ht_num;
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+
+       rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
+       if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
+               return 0;
+
+#ifdef CONFIG_SMP
+       /* detect which hyperthread we are on */
+       if (smp_num_siblings == 2) {
+               unsigned int ebx, apicid;
+
+               ebx = cpuid_ebx(1);
+               apicid = (ebx >> 24) & 0xff;
+               ht_num = apicid & 1;
+       } else
+#endif
+               ht_num = 0;
+
+       /* performance counters are shared resources
+        * assign each hyperthread its own set
+        * (re-use the ESCR0 register, seems safe
+        * and keeps the cccr_val the same)
+        */
+       if (!ht_num) {
+               /* logical cpu 0 */
+               perfctr_msr = MSR_P4_IQ_PERFCTR0;
+               evntsel_msr = MSR_P4_CRU_ESCR0;
+               cccr_msr = MSR_P4_IQ_CCCR0;
+               cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
+       } else {
+               /* logical cpu 1 */
+               perfctr_msr = MSR_P4_IQ_PERFCTR1;
+               evntsel_msr = MSR_P4_CRU_ESCR0;
+               cccr_msr = MSR_P4_IQ_CCCR1;
+               cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
+       }
+
+       evntsel = P4_ESCR_EVENT_SELECT(0x3F)
+               | P4_ESCR_OS
+               | P4_ESCR_USR;
+
+       cccr_val |= P4_CCCR_THRESHOLD(15)
+                | P4_CCCR_COMPLEMENT
+                | P4_CCCR_COMPARE
+                | P4_CCCR_REQUIRED;
+
+       wrmsr(evntsel_msr, evntsel, 0);
+       wrmsr(cccr_msr, cccr_val, 0);
+       write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       cccr_val |= P4_CCCR_ENABLE;
+       wrmsr(cccr_msr, cccr_val, 0);
+       wd->perfctr_msr = perfctr_msr;
+       wd->evntsel_msr = evntsel_msr;
+       wd->cccr_msr = cccr_msr;
+       return 1;
+}
+
+static void stop_p4_watchdog(void)
+{
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+       wrmsr(wd->cccr_msr, 0, 0);
+       wrmsr(wd->evntsel_msr, 0, 0);
+}
+
+static int p4_reserve(void)
+{
+       if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0))
+               return 0;
+#ifdef CONFIG_SMP
+       if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1))
+               goto fail1;
+#endif
+       if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
+               goto fail2;
+       /* RED-PEN why is ESCR1 not reserved here? */
+       return 1;
+ fail2:
+#ifdef CONFIG_SMP
+       if (smp_num_siblings > 1)
+               release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
+ fail1:
+#endif
+       release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
+       return 0;
+}
+
+static void p4_unreserve(void)
+{
+#ifdef CONFIG_SMP
+       if (smp_num_siblings > 1)
+               release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
+#endif
+       release_evntsel_nmi(MSR_P4_CRU_ESCR0);
+       release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
+}
+
+static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
+{
+       unsigned dummy;
+       /*
+        * P4 quirks:
+        * - An overflown perfctr will assert its interrupt
+        *   until the OVF flag in its CCCR is cleared.
+        * - LVTPC is masked on interrupt and must be
+        *   unmasked by the LVTPC handler.
+        */
+       rdmsrl(wd->cccr_msr, dummy);
+       dummy &= ~P4_CCCR_OVF;
+       wrmsrl(wd->cccr_msr, dummy);
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       /* start the cycle over again */
+       write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
+}
+
+static struct wd_ops p4_wd_ops = {
+       .reserve = p4_reserve,
+       .unreserve = p4_unreserve,
+       .setup = setup_p4_watchdog,
+       .rearm = p4_rearm,
+       .stop = stop_p4_watchdog,
+       /* RED-PEN this is wrong for the other sibling */
+       .perfctr = MSR_P4_BPU_PERFCTR0,
+       .evntsel = MSR_P4_BSU_ESCR0,
+       .checkbit = 1ULL<<39,
+};
+
+/* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
+   all future Intel CPUs. */
+
+#define ARCH_PERFMON_NMI_EVENT_SEL     ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
+#define ARCH_PERFMON_NMI_EVENT_UMASK   ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
+
+static int setup_intel_arch_watchdog(unsigned nmi_hz)
+{
+       unsigned int ebx;
+       union cpuid10_eax eax;
+       unsigned int unused;
+       unsigned int perfctr_msr, evntsel_msr;
+       unsigned int evntsel;
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+
+       /*
+        * Check whether the Architectural PerfMon supports
+        * Unhalted Core Cycles Event or not.
+        * NOTE: Corresponding bit = 0 in ebx indicates event present.
+        */
+       cpuid(10, &(eax.full), &ebx, &unused, &unused);
+       if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
+           (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
+               return 0;
+
+       perfctr_msr = wd_ops->perfctr;
+       evntsel_msr = wd_ops->evntsel;
+
+       wrmsrl(perfctr_msr, 0UL);
+
+       evntsel = ARCH_PERFMON_EVENTSEL_INT
+               | ARCH_PERFMON_EVENTSEL_OS
+               | ARCH_PERFMON_EVENTSEL_USR
+               | ARCH_PERFMON_NMI_EVENT_SEL
+               | ARCH_PERFMON_NMI_EVENT_UMASK;
+
+       /* setup the timer */
+       wrmsr(evntsel_msr, evntsel, 0);
+       nmi_hz = adjust_for_32bit_ctr(nmi_hz);
+       write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+       wrmsr(evntsel_msr, evntsel, 0);
+
+       wd->perfctr_msr = perfctr_msr;
+       wd->evntsel_msr = evntsel_msr;
+       wd->cccr_msr = 0;  //unused
+       wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1);
+       return 1;
+}
+
+static struct wd_ops intel_arch_wd_ops = {
+       .reserve = single_msr_reserve,
+       .unreserve = single_msr_unreserve,
+       .setup = setup_intel_arch_watchdog,
+       .rearm = p6_rearm,
+       .stop = single_msr_stop_watchdog,
+       .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
+       .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
+};
+
+static struct wd_ops coreduo_wd_ops = {
+       .reserve = single_msr_reserve,
+       .unreserve = single_msr_unreserve,
+       .setup = setup_intel_arch_watchdog,
+       .rearm = p6_rearm,
+       .stop = single_msr_stop_watchdog,
+       .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+       .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
+};
+
+static void probe_nmi_watchdog(void)
+{
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
+                   boot_cpu_data.x86 != 16)
+                       return;
+               wd_ops = &k7_wd_ops;
+               break;
+       case X86_VENDOR_INTEL:
+               /* Work around Core Duo (Yonah) errata AE49 where perfctr1
+                  doesn't have a working enable bit. */
+               if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
+                       wd_ops = &coreduo_wd_ops;
+                       break;
+               }
+               if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+                       wd_ops = &intel_arch_wd_ops;
+                       break;
+               }
+               switch (boot_cpu_data.x86) {
+               case 6:
+                       if (boot_cpu_data.x86_model > 0xd)
+                               return;
+
+                       wd_ops = &p6_wd_ops;
+                       break;
+               case 15:
+                       if (boot_cpu_data.x86_model > 0x4)
+                               return;
+
+                       wd_ops = &p4_wd_ops;
+                       break;
+               default:
+                       return;
+               }
+               break;
+       }
+}
+
+/* Interface to nmi.c */
+
+int lapic_watchdog_init(unsigned nmi_hz)
+{
+       if (!wd_ops) {
+               probe_nmi_watchdog();
+               if (!wd_ops)
+                       return -1;
+
+               if (!wd_ops->reserve()) {
+                       printk(KERN_ERR
+                               "NMI watchdog: cannot reserve perfctrs\n");
+                       return -1;
+               }
+       }
+
+       if (!(wd_ops->setup(nmi_hz))) {
+               printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n",
+                      raw_smp_processor_id());
+               return -1;
+       }
+
+       return 0;
+}
+
+void lapic_watchdog_stop(void)
+{
+       if (wd_ops)
+               wd_ops->stop();
+}
+
+unsigned lapic_adjust_nmi_hz(unsigned hz)
+{
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+       if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
+           wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
+               hz = adjust_for_32bit_ctr(hz);
+       return hz;
+}
+
+int lapic_wd_event(unsigned nmi_hz)
+{
+       struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
+       u64 ctr;
+       rdmsrl(wd->perfctr_msr, ctr);
+       if (ctr & wd_ops->checkbit) { /* perfctr still running? */
+               return 0;
+       }
+       wd_ops->rearm(wd, nmi_hz);
+       return 1;
+}
+
+int lapic_watchdog_ok(void)
+{
+       return wd_ops != NULL;
+}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
new file mode 100644 (file)
index 0000000..1e31b6c
--- /dev/null
@@ -0,0 +1,192 @@
+#include <linux/smp.h>
+#include <linux/timex.h>
+#include <linux/string.h>
+#include <asm/semaphore.h>
+#include <linux/seq_file.h>
+#include <linux/cpufreq.h>
+
+/*
+ *     Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+       /* 
+        * These flag bits must match the definitions in <asm/cpufeature.h>.
+        * NULL means this bit is undefined or reserved; either way it doesn't
+        * have meaning as far as Linux is concerned.  Note that it's important
+        * to realize there is a difference between this table and CPUID -- if
+        * applications want to get the raw CPUID data, they should access
+        * /dev/cpu/<cpu_nr>/cpuid instead.
+        */
+       static const char * const x86_cap_flags[] = {
+               /* Intel-defined */
+               "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
+               "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+               "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
+               "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
+
+               /* AMD-defined */
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
+               NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
+               "3dnowext", "3dnow",
+
+               /* Transmeta-defined */
+               "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+               /* Other (Linux-defined) */
+               "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
+               NULL, NULL, NULL, NULL,
+               "constant_tsc", "up", NULL, "arch_perfmon",
+               "pebs", "bts", NULL, "sync_rdtsc",
+               "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+               /* Intel-defined (#2) */
+               "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
+               "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
+               NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+               /* VIA/Cyrix/Centaur-defined */
+               NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
+               "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+               /* AMD-defined (#2) */
+               "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
+               "altmovcr8", "abm", "sse4a",
+               "misalignsse", "3dnowprefetch",
+               "osvw", "ibs", NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+               /* Auxiliary (Linux-defined) */
+               "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+               NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+       };
+       static const char * const x86_power_flags[] = {
+               "ts",   /* temperature sensor */
+               "fid",  /* frequency id control */
+               "vid",  /* voltage id control */
+               "ttp",  /* thermal trip */
+               "tm",
+               "stc",
+               "100mhzsteps",
+               "hwpstate",
+               "",     /* constant_tsc - moved to flags */
+               /* nothing */
+       };
+       struct cpuinfo_x86 *c = v;
+       int i, n = c - cpu_data;
+       int fpu_exception;
+
+#ifdef CONFIG_SMP
+       if (!cpu_online(n))
+               return 0;
+#endif
+       seq_printf(m, "processor\t: %d\n"
+               "vendor_id\t: %s\n"
+               "cpu family\t: %d\n"
+               "model\t\t: %d\n"
+               "model name\t: %s\n",
+               n,
+               c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
+               c->x86,
+               c->x86_model,
+               c->x86_model_id[0] ? c->x86_model_id : "unknown");
+
+       if (c->x86_mask || c->cpuid_level >= 0)
+               seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+       else
+               seq_printf(m, "stepping\t: unknown\n");
+
+       if ( cpu_has(c, X86_FEATURE_TSC) ) {
+               unsigned int freq = cpufreq_quick_get(n);
+               if (!freq)
+                       freq = cpu_khz;
+               seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
+                       freq / 1000, (freq % 1000));
+       }
+
+       /* Cache size */
+       if (c->x86_cache_size >= 0)
+               seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+#ifdef CONFIG_X86_HT
+       if (c->x86_max_cores * smp_num_siblings > 1) {
+               seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
+               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
+               seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
+               seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
+       }
+#endif
+       
+       /* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
+       fpu_exception = c->hard_math && (ignore_fpu_irq || cpu_has_fpu);
+       seq_printf(m, "fdiv_bug\t: %s\n"
+                       "hlt_bug\t\t: %s\n"
+                       "f00f_bug\t: %s\n"
+                       "coma_bug\t: %s\n"
+                       "fpu\t\t: %s\n"
+                       "fpu_exception\t: %s\n"
+                       "cpuid level\t: %d\n"
+                       "wp\t\t: %s\n"
+                       "flags\t\t:",
+                    c->fdiv_bug ? "yes" : "no",
+                    c->hlt_works_ok ? "no" : "yes",
+                    c->f00f_bug ? "yes" : "no",
+                    c->coma_bug ? "yes" : "no",
+                    c->hard_math ? "yes" : "no",
+                    fpu_exception ? "yes" : "no",
+                    c->cpuid_level,
+                    c->wp_works_ok ? "yes" : "no");
+
+       for ( i = 0 ; i < 32*NCAPINTS ; i++ )
+               if ( test_bit(i, c->x86_capability) &&
+                    x86_cap_flags[i] != NULL )
+                       seq_printf(m, " %s", x86_cap_flags[i]);
+
+       for (i = 0; i < 32; i++)
+               if (c->x86_power & (1 << i)) {
+                       if (i < ARRAY_SIZE(x86_power_flags) &&
+                           x86_power_flags[i])
+                               seq_printf(m, "%s%s",
+                                          x86_power_flags[i][0]?" ":"",
+                                          x86_power_flags[i]);
+                       else
+                               seq_printf(m, " [%d]", i);
+               }
+
+       seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
+                    c->loops_per_jiffy/(500000/HZ),
+                    (c->loops_per_jiffy/(5000/HZ)) % 100);
+       seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
+
+       return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+       return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       ++*pos;
+       return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+       .start  = c_start,
+       .next   = c_next,
+       .stop   = c_stop,
+       .show   = show_cpuinfo,
+};
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
new file mode 100644 (file)
index 0000000..200fb3f
--- /dev/null
@@ -0,0 +1,116 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include "cpu.h"
+
+static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
+{
+       unsigned int cap_mask, uk, max, dummy;
+       unsigned int cms_rev1, cms_rev2;
+       unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
+       char cpu_info[65];
+
+       get_model_name(c);      /* Same as AMD/Cyrix */
+       display_cacheinfo(c);
+
+       /* Print CMS and CPU revision */
+       max = cpuid_eax(0x80860000);
+       cpu_rev = 0;
+       if ( max >= 0x80860001 ) {
+               cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
+               if (cpu_rev != 0x02000000) {
+                       printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
+                               (cpu_rev >> 24) & 0xff,
+                               (cpu_rev >> 16) & 0xff,
+                               (cpu_rev >> 8) & 0xff,
+                               cpu_rev & 0xff,
+                               cpu_freq);
+               }
+       }
+       if ( max >= 0x80860002 ) {
+               cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
+               if (cpu_rev == 0x02000000) {
+                       printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
+                               new_cpu_rev, cpu_freq);
+               }
+               printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
+                      (cms_rev1 >> 24) & 0xff,
+                      (cms_rev1 >> 16) & 0xff,
+                      (cms_rev1 >> 8) & 0xff,
+                      cms_rev1 & 0xff,
+                      cms_rev2);
+       }
+       if ( max >= 0x80860006 ) {
+               cpuid(0x80860003,
+                     (void *)&cpu_info[0],
+                     (void *)&cpu_info[4],
+                     (void *)&cpu_info[8],
+                     (void *)&cpu_info[12]);
+               cpuid(0x80860004,
+                     (void *)&cpu_info[16],
+                     (void *)&cpu_info[20],
+                     (void *)&cpu_info[24],
+                     (void *)&cpu_info[28]);
+               cpuid(0x80860005,
+                     (void *)&cpu_info[32],
+                     (void *)&cpu_info[36],
+                     (void *)&cpu_info[40],
+                     (void *)&cpu_info[44]);
+               cpuid(0x80860006,
+                     (void *)&cpu_info[48],
+                     (void *)&cpu_info[52],
+                     (void *)&cpu_info[56],
+                     (void *)&cpu_info[60]);
+               cpu_info[64] = '\0';
+               printk(KERN_INFO "CPU: %s\n", cpu_info);
+       }
+
+       /* Unhide possibly hidden capability flags */
+       rdmsr(0x80860004, cap_mask, uk);
+       wrmsr(0x80860004, ~0, uk);
+       c->x86_capability[0] = cpuid_edx(0x00000001);
+       wrmsr(0x80860004, cap_mask, uk);
+
+       /* All Transmeta CPUs have a constant TSC */
+       set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+       
+       /* If we can run i686 user-space code, call us an i686 */
+#define USER686 ((1 << X86_FEATURE_TSC)|\
+                (1 << X86_FEATURE_CX8)|\
+                (1 << X86_FEATURE_CMOV))
+        if (c->x86 == 5 && (c->x86_capability[0] & USER686) == USER686)
+               c->x86 = 6;
+
+#ifdef CONFIG_SYSCTL
+       /* randomize_va_space slows us down enormously;
+          it probably triggers retranslation of x86->native bytecode */
+       randomize_va_space = 0;
+#endif
+}
+
+static void __cpuinit transmeta_identify(struct cpuinfo_x86 * c)
+{
+       u32 xlvl;
+
+       /* Transmeta-defined flags: level 0x80860001 */
+       xlvl = cpuid_eax(0x80860000);
+       if ( (xlvl & 0xffff0000) == 0x80860000 ) {
+               if (  xlvl >= 0x80860001 )
+                       c->x86_capability[2] = cpuid_edx(0x80860001);
+       }
+}
+
+static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Transmeta",
+       .c_ident        = { "GenuineTMx86", "TransmetaCPU" },
+       .c_init         = init_transmeta,
+       .c_identify     = transmeta_identify,
+};
+
+int __init transmeta_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
+       return 0;
+}
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
new file mode 100644 (file)
index 0000000..a7a4e75
--- /dev/null
@@ -0,0 +1,26 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include "cpu.h"
+
+/* UMC chips appear to be only either 386 or 486, so no special init takes place.
+ */
+
+static struct cpu_dev umc_cpu_dev __cpuinitdata = {
+       .c_vendor       = "UMC",
+       .c_ident        = { "UMC UMC UMC" },
+       .c_models = {
+               { .vendor = X86_VENDOR_UMC, .family = 4, .model_names =
+                 { 
+                         [1] = "U5D", 
+                         [2] = "U5S", 
+                 }
+               },
+       },
+};
+
+int __init umc_init_cpu(void)
+{
+       cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
+       return 0;
+}
index e748050910342de949f5eab25b488dc2ba22465c..690aebf37c374d46bfd0ebb0b3982a7cbe4d4da2 100644 (file)
@@ -53,11 +53,11 @@ bootflag-y                  += ../../i386/kernel/bootflag.o
 cpuid-$(subst m,y,$(CONFIG_X86_CPUID))  += ../../i386/kernel/cpuid.o
 topology-y                     += ../../i386/kernel/topology.o
 microcode-$(subst m,y,$(CONFIG_MICROCODE))  += ../../i386/kernel/microcode.o
-intel_cacheinfo-y              += ../../i386/kernel/cpu/intel_cacheinfo.o
-addon_cpuid_features-y         += ../../i386/kernel/cpu/addon_cpuid_features.o
+intel_cacheinfo-y              += ../../x86/kernel/cpu/intel_cacheinfo.o
+addon_cpuid_features-y         += ../../x86/kernel/cpu/addon_cpuid_features.o
 quirks-y                       += ../../i386/kernel/quirks.o
 i8237-y                                += ../../i386/kernel/i8237.o
 msr-$(subst m,y,$(CONFIG_X86_MSR))  += ../../i386/kernel/msr.o
 alternative-y                  += ../../i386/kernel/alternative.o
 pcspeaker-y                    += ../../i386/kernel/pcspeaker.o
-perfctr-watchdog-y             += ../../i386/kernel/cpu/perfctr-watchdog.o
+perfctr-watchdog-y             += ../../x86/kernel/cpu/perfctr-watchdog.o