]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jul 2013 23:26:44 +0000 (16:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jul 2013 23:26:44 +0000 (16:26 -0700)
Pull x86 FPU changes from Ingo Molnar:
 "There are two bigger changes in this tree:

   - Add an [early-use-]safe static_cpu_has() variant and other
     robustness improvements, including the new X86_DEBUG_STATIC_CPU_HAS
     configurable debugging facility, motivated by recent obscure FPU
     code bugs, by Borislav Petkov

   - Reimplement FPU detection code in C and drop the old asm code, by
     Peter Anvin."

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu: Use static_cpu_has_safe before alternatives
  x86: Add a static_cpu_has_safe variant
  x86: Sanity-check static_cpu_has usage
  x86, cpu: Add a synthetic, always true, cpu feature
  x86: Get rid of ->hard_math and all the FPU asm fu

14 files changed:
arch/x86/Kconfig.debug
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/fpu-internal.h
arch/x86/include/asm/processor.h
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/head_32.S
arch/x86/kernel/i387.c
arch/x86/kernel/xsave.c
arch/x86/lguest/boot.c
arch/x86/xen/enlighten.c

index b6a770132b67315cc7d07b3da152f2027447e2d6..c963881de0d0bdac76923e8fa93e8cc02e6eaab4 100644 (file)
@@ -303,4 +303,14 @@ config DEBUG_NMI_SELFTEST
 
          If unsure, say N.
 
+config X86_DEBUG_STATIC_CPU_HAS
+       bool "Debug alternatives"
+       depends on DEBUG_KERNEL
+       ---help---
+         This option causes additional code to be generated which
+         fails if static_cpu_has() is used before alternatives have
+         run.
+
+         If unsure, say N.
+
 endmenu
index e99ac27f95b2cbcf9c0c5458cce4a946b7917b5e..47538a61c91bfddc1eff8a6712e4f89a5b0187b4 100644 (file)
@@ -92,7 +92,7 @@
 #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
 #define X86_FEATURE_11AP       (3*32+19) /* "" Bad local APIC aka 11AP */
 #define X86_FEATURE_NOPL       (3*32+20) /* The NOPL (0F 1F) instructions */
-                                         /* 21 available, was AMD_C1E */
+#define X86_FEATURE_ALWAYS     (3*32+21) /* "" Always-present feature */
 #define X86_FEATURE_XTOPOLOGY  (3*32+22) /* cpu topology enum extensions */
 #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
 #define X86_FEATURE_NONSTOP_TSC        (3*32+24) /* TSC does not stop in C states */
@@ -356,15 +356,36 @@ extern const char * const x86_power_flags[32];
 #endif /* CONFIG_X86_64 */
 
 #if __GNUC__ >= 4
+extern void warn_pre_alternatives(void);
+extern bool __static_cpu_has_safe(u16 bit);
+
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
  * These are only valid after alternatives have run, but will statically
  * patch the target code for additional performance.
- *
  */
 static __always_inline __pure bool __static_cpu_has(u16 bit)
 {
 #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+               /*
+                * Catch too early usage of this before alternatives
+                * have run.
+                */
+               asm goto("1: jmp %l[t_warn]\n"
+                        "2:\n"
+                        ".section .altinstructions,\"a\"\n"
+                        " .long 1b - .\n"
+                        " .long 0\n"           /* no replacement */
+                        " .word %P0\n"         /* 1: do replace */
+                        " .byte 2b - 1b\n"     /* source len */
+                        " .byte 0\n"           /* replacement len */
+                        ".previous\n"
+                        /* skipping size check since replacement size = 0 */
+                        : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
+#endif
+
                asm goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
@@ -379,7 +400,13 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                return true;
        t_no:
                return false;
-#else
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+       t_warn:
+               warn_pre_alternatives();
+               return false;
+#endif
+#else /* GCC_VERSION >= 40500 */
                u8 flag;
                /* Open-coded due to __stringify() in ALTERNATIVE() */
                asm volatile("1: movb $0,%0\n"
@@ -411,11 +438,94 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                __static_cpu_has(bit) :                         \
                boot_cpu_has(bit)                               \
 )
+
+static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+{
+#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+/*
+ * We need to spell the jumps to the compiler because, depending on the offset,
+ * the replacement jump can be bigger than the original jump, and this we cannot
+ * have. Thus, we force the jump to the widest, 4-byte, signed relative
+ * offset even though the last would often fit in less bytes.
+ */
+               asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+                        "2:\n"
+                        ".section .altinstructions,\"a\"\n"
+                        " .long 1b - .\n"              /* src offset */
+                        " .long 3f - .\n"              /* repl offset */
+                        " .word %P1\n"                 /* always replace */
+                        " .byte 2b - 1b\n"             /* src len */
+                        " .byte 4f - 3f\n"             /* repl len */
+                        ".previous\n"
+                        ".section .altinstr_replacement,\"ax\"\n"
+                        "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
+                        "4:\n"
+                        ".previous\n"
+                        ".section .altinstructions,\"a\"\n"
+                        " .long 1b - .\n"              /* src offset */
+                        " .long 0\n"                   /* no replacement */
+                        " .word %P0\n"                 /* feature bit */
+                        " .byte 2b - 1b\n"             /* src len */
+                        " .byte 0\n"                   /* repl len */
+                        ".previous\n"
+                        : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
+                        : : t_dynamic, t_no);
+               return true;
+       t_no:
+               return false;
+       t_dynamic:
+               return __static_cpu_has_safe(bit);
+#else /* GCC_VERSION >= 40500 */
+               u8 flag;
+               /* Open-coded due to __stringify() in ALTERNATIVE() */
+               asm volatile("1: movb $2,%0\n"
+                            "2:\n"
+                            ".section .altinstructions,\"a\"\n"
+                            " .long 1b - .\n"          /* src offset */
+                            " .long 3f - .\n"          /* repl offset */
+                            " .word %P2\n"             /* always replace */
+                            " .byte 2b - 1b\n"         /* source len */
+                            " .byte 4f - 3f\n"         /* replacement len */
+                            ".previous\n"
+                            ".section .discard,\"aw\",@progbits\n"
+                            " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+                            ".previous\n"
+                            ".section .altinstr_replacement,\"ax\"\n"
+                            "3: movb $0,%0\n"
+                            "4:\n"
+                            ".previous\n"
+                            ".section .altinstructions,\"a\"\n"
+                            " .long 1b - .\n"          /* src offset */
+                            " .long 5f - .\n"          /* repl offset */
+                            " .word %P1\n"             /* feature bit */
+                            " .byte 4b - 3b\n"         /* src len */
+                            " .byte 6f - 5f\n"         /* repl len */
+                            ".previous\n"
+                            ".section .discard,\"aw\",@progbits\n"
+                            " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
+                            ".previous\n"
+                            ".section .altinstr_replacement,\"ax\"\n"
+                            "5: movb $1,%0\n"
+                            "6:\n"
+                            ".previous\n"
+                            : "=qm" (flag)
+                            : "i" (bit), "i" (X86_FEATURE_ALWAYS));
+               return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
+#endif
+}
+
+#define static_cpu_has_safe(bit)                               \
+(                                                              \
+       __builtin_constant_p(boot_cpu_has(bit)) ?               \
+               boot_cpu_has(bit) :                             \
+               _static_cpu_has_safe(bit)                       \
+)
 #else
 /*
  * gcc 3.x is too stupid to do the static test; fall back to dynamic.
  */
-#define static_cpu_has(bit) boot_cpu_has(bit)
+#define static_cpu_has(bit)            boot_cpu_has(bit)
+#define static_cpu_has_safe(bit)       boot_cpu_has(bit)
 #endif
 
 #define cpu_has_bug(c, bit)    cpu_has(c, (bit))
index e25cc33ec54d5476dba4a46337ab893785e48783..4d0bda7b11e3b6715b026f5995183ddb4e218ae1 100644 (file)
@@ -62,10 +62,8 @@ extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
 #define xstateregs_active      fpregs_active
 
 #ifdef CONFIG_MATH_EMULATION
-# define HAVE_HWFP             (boot_cpu_data.hard_math)
 extern void finit_soft_fpu(struct i387_soft_struct *soft);
 #else
-# define HAVE_HWFP             1
 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
 #endif
 
@@ -345,7 +343,7 @@ static inline void __thread_fpu_end(struct task_struct *tsk)
 
 static inline void __thread_fpu_begin(struct task_struct *tsk)
 {
-       if (!use_eager_fpu())
+       if (!static_cpu_has_safe(X86_FEATURE_EAGER_FPU))
                clts();
        __thread_set_has_fpu(tsk);
 }
index 5b87d52eed0b4339262f3510ce5cf1bf46992482..29937c4f6ff8d560900bfbad9a79c919a24aa710 100644 (file)
@@ -89,9 +89,9 @@ struct cpuinfo_x86 {
        char                    wp_works_ok;    /* It doesn't on 386's */
 
        /* Problems on some 486Dx4's and old 386's: */
-       char                    hard_math;
        char                    rfu;
        char                    pad0;
+       char                    pad1;
 #else
        /* Number of 4K pages in DTLB/ITLB combined(in pages): */
        int                     x86_tlbsize;
@@ -164,6 +164,7 @@ extern const struct seq_operations cpuinfo_op;
 #define cache_line_size()      (boot_cpu_data.x86_cache_alignment)
 
 extern void cpu_detect(struct cpuinfo_x86 *c);
+extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c);
 
 extern void early_cpu_init(void);
 extern void identify_boot_cpu(void);
index 0ef4bba2acb75d8bdade42a54cec79ad8673d09a..d67c4be3e8b1e24ec845ecdb4c5de615523cf066 100644 (file)
@@ -28,7 +28,6 @@ void foo(void)
        OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
        OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
        OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
-       OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
        OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
        OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
        OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
index 4112be9a46591209aae8169506f11e4eaf28fe7a..03445346ee0aae247f31ebf2aef6a48be0dfce8a 100644 (file)
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
 
-static int __init no_387(char *s)
-{
-       boot_cpu_data.hard_math = 0;
-       write_cr0(X86_CR0_TS | X86_CR0_EM | X86_CR0_MP | read_cr0());
-       return 1;
-}
-
-__setup("no387", no_387);
-
 static double __initdata x = 4195835.0;
 static double __initdata y = 3145727.0;
 
@@ -44,15 +35,6 @@ static void __init check_fpu(void)
 {
        s32 fdiv_bug;
 
-       if (!boot_cpu_data.hard_math) {
-#ifndef CONFIG_MATH_EMULATION
-               pr_emerg("No coprocessor found and no math emulation present\n");
-               pr_emerg("Giving up\n");
-               for (;;) ;
-#endif
-               return;
-       }
-
        kernel_fpu_begin();
 
        /*
@@ -107,5 +89,6 @@ void __init check_bugs(void)
         * kernel_fpu_begin/end() in check_fpu() relies on the patched
         * alternative instructions.
         */
-       check_fpu();
+       if (cpu_has_fpu)
+               check_fpu();
 }
index 22018f70a6716e2f57012378c19df770ba607ca0..a4a07c0acb1f2e0428055b00c709ced475c41d9d 100644 (file)
@@ -711,10 +711,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                return;
 
        cpu_detect(c);
-
        get_cpu_vendor(c);
-
        get_cpu_cap(c);
+       fpu_detect(c);
 
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
@@ -724,6 +723,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 
        if (this_cpu->c_bsp_init)
                this_cpu->c_bsp_init(c);
+
+       setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 }
 
 void __init early_cpu_init(void)
@@ -1363,3 +1364,17 @@ void __cpuinit cpu_init(void)
        fpu_init();
 }
 #endif
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+void warn_pre_alternatives(void)
+{
+       WARN(1, "You're using static_cpu_has before alternatives have run!\n");
+}
+EXPORT_SYMBOL_GPL(warn_pre_alternatives);
+#endif
+
+inline bool __static_cpu_has_safe(u16 bit)
+{
+       return boot_cpu_has(bit);
+}
+EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
index d048d5ca43c1c8d04772e11206f5c9a41db9dead..7582f475b1637479e9125843475f326fe05e5f71 100644 (file)
@@ -333,7 +333,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
                switch (dir0_lsn) {
                case 0xd:  /* either a 486SLC or DLC w/o DEVID */
                        dir0_msn = 0;
-                       p = Cx486_name[(c->hard_math) ? 1 : 0];
+                       p = Cx486_name[(cpu_has_fpu ? 1 : 0)];
                        break;
 
                case 0xe:  /* a 486S A step */
index 37a198bd48c8f1defb7a5a256c3ec433b13b2758..aee6317b902fb490dbc13673dce0a89ea88e2a4b 100644 (file)
@@ -37,8 +37,8 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
                   static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
                   static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
                   static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
-                  c->hard_math ? "yes" : "no",
-                  c->hard_math ? "yes" : "no",
+                  static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+                  static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
                   c->cpuid_level,
                   c->wp_works_ok ? "yes" : "no");
 }
index 73afd11799ca7c10cfa015a3b0c83435a7003e9d..e65ddc62e1137a8a6f6f4cab04bfe28b9482e905 100644 (file)
@@ -444,7 +444,6 @@ is486:
        orl %ecx,%eax
        movl %eax,%cr0
 
-       call check_x87
        lgdt early_gdt_descr
        lidt idt_descr
        ljmp $(__KERNEL_CS),$1f
@@ -467,26 +466,6 @@ is486:
        pushl $0                # fake return address for unwinder
        jmp *(initial_code)
 
-/*
- * We depend on ET to be correct. This checks for 287/387.
- */
-check_x87:
-       movb $0,X86_HARD_MATH
-       clts
-       fninit
-       fstsw %ax
-       cmpb $0,%al
-       je 1f
-       movl %cr0,%eax          /* no coprocessor: have to set bits */
-       xorl $4,%eax            /* set EM */
-       movl %eax,%cr0
-       ret
-       ALIGN
-1:     movb $1,X86_HARD_MATH
-       .byte 0xDB,0xE4         /* fsetpm for 287, ignored by 387 */
-       ret
-
-       
 #include "verify_cpu.S"
 
 /*
index cb339097b9ea0cf4f57b2b406fa308a489cee881..b627746f6b1a81a5fc0c6e9e30adb1bb502f0b87 100644 (file)
@@ -131,7 +131,7 @@ static void __cpuinit init_thread_xstate(void)
         * xsave_init().
         */
 
-       if (!HAVE_HWFP) {
+       if (!cpu_has_fpu) {
                /*
                 * Disable xsave as we do not support it if i387
                 * emulation is enabled.
@@ -158,6 +158,14 @@ void __cpuinit fpu_init(void)
        unsigned long cr0;
        unsigned long cr4_mask = 0;
 
+#ifndef CONFIG_MATH_EMULATION
+       if (!cpu_has_fpu) {
+               pr_emerg("No FPU found and no math emulation present\n");
+               pr_emerg("Giving up\n");
+               for (;;)
+                       asm volatile("hlt");
+       }
+#endif
        if (cpu_has_fxsr)
                cr4_mask |= X86_CR4_OSFXSR;
        if (cpu_has_xmm)
@@ -167,7 +175,7 @@ void __cpuinit fpu_init(void)
 
        cr0 = read_cr0();
        cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
-       if (!HAVE_HWFP)
+       if (!cpu_has_fpu)
                cr0 |= X86_CR0_EM;
        write_cr0(cr0);
 
@@ -185,7 +193,7 @@ void __cpuinit fpu_init(void)
 
 void fpu_finit(struct fpu *fpu)
 {
-       if (!HAVE_HWFP) {
+       if (!cpu_has_fpu) {
                finit_soft_fpu(&fpu->state->soft);
                return;
        }
@@ -214,7 +222,7 @@ int init_fpu(struct task_struct *tsk)
        int ret;
 
        if (tsk_used_math(tsk)) {
-               if (HAVE_HWFP && tsk == current)
+               if (cpu_has_fpu && tsk == current)
                        unlazy_fpu(tsk);
                tsk->thread.fpu.last_cpu = ~0;
                return 0;
@@ -511,14 +519,13 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
        if (ret)
                return ret;
 
-       if (!HAVE_HWFP)
+       if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
 
-       if (!cpu_has_fxsr) {
+       if (!cpu_has_fxsr)
                return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                           &target->thread.fpu.state->fsave, 0,
                                           -1);
-       }
 
        sanitize_i387_state(target);
 
@@ -545,13 +552,13 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
 
        sanitize_i387_state(target);
 
-       if (!HAVE_HWFP)
+       if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
 
-       if (!cpu_has_fxsr) {
+       if (!cpu_has_fxsr)
                return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                         &target->thread.fpu.state->fsave, 0, -1);
-       }
+                                         &target->thread.fpu.state->fsave, 0,
+                                         -1);
 
        if (pos > 0 || count < sizeof(env))
                convert_from_fxsr(&env, target);
@@ -592,3 +599,33 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
 EXPORT_SYMBOL(dump_fpu);
 
 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+
+static int __init no_387(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_FPU);
+       return 1;
+}
+
+__setup("no387", no_387);
+
+void __cpuinit fpu_detect(struct cpuinfo_x86 *c)
+{
+       unsigned long cr0;
+       u16 fsw, fcw;
+
+       fsw = fcw = 0xffff;
+
+       cr0 = read_cr0();
+       cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
+       write_cr0(cr0);
+
+       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+                    : "+m" (fsw), "+m" (fcw));
+
+       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+               set_cpu_cap(c, X86_FEATURE_FPU);
+       else
+               clear_cpu_cap(c, X86_FEATURE_FPU);
+
+       /* The final cr0 value is set in fpu_init() */
+}
index ada87a329edcde71763601e19b1fc4a8c9062f3d..d6c28acdf99c19abd1f5554561b1b3030c057b3b 100644 (file)
@@ -243,7 +243,7 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
        if (!access_ok(VERIFY_WRITE, buf, size))
                return -EACCES;
 
-       if (!HAVE_HWFP)
+       if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_get(current, NULL, 0,
                        sizeof(struct user_i387_ia32_struct), NULL,
                        (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
@@ -350,11 +350,10 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
        if (!used_math() && init_fpu(tsk))
                return -1;
 
-       if (!HAVE_HWFP) {
+       if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_set(current, NULL,
                                       0, sizeof(struct user_i387_ia32_struct),
                                       NULL, buf) != 0;
-       }
 
        if (use_xsave()) {
                struct _fpx_sw_bytes fx_sw_user;
index 7114c63f047d06432a453884c4500474ea9681ea..d482bcaf61c182d7daac37a4c833303dd38eb658 100644 (file)
@@ -1410,7 +1410,7 @@ __init void lguest_init(void)
        new_cpu_data.x86_capability[0] = cpuid_edx(1);
 
        /* Math is always hard! */
-       new_cpu_data.hard_math = 1;
+       set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
 
        /* We don't have features.  We have puppies!  Puppies! */
 #ifdef CONFIG_X86_MCE
index a492be2635ac048a527d856b796431ccac72d9fd..2fa02bc50034773290712429f6593af6051614fa 100644 (file)
@@ -1557,7 +1557,7 @@ asmlinkage void __init xen_start_kernel(void)
 #ifdef CONFIG_X86_32
        /* set up basic CPUID stuff */
        cpu_detect(&new_cpu_data);
-       new_cpu_data.hard_math = 1;
+       set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
        new_cpu_data.wp_works_ok = 1;
        new_cpu_data.x86_capability[0] = cpuid_edx(1);
 #endif