]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/x86/kernel/cpu/amd.c
x86/speculation: Create spec-ctrl.h to avoid include hell
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / amd.c
index 3b9e220621f83c8a5161e8b57b297233370f72ea..1d36f64e15504651b9167ea94b2c85d1c725d51a 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/spec-ctrl.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
 #include <asm/delay.h>
@@ -544,6 +545,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                rdmsrl(MSR_FAM10H_NODE_ID, value);
                nodes_per_socket = ((value >> 3) & 7) + 1;
        }
+
+       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+               unsigned int bit;
+
+               switch (c->x86) {
+               case 0x15: bit = 54; break;
+               case 0x16: bit = 33; break;
+               case 0x17: bit = 10; break;
+               default: return;
+               }
+               /*
+                * Try to cache the base value so further operations can
+                * avoid RMW. If that faults, do not enable RDS.
+                */
+               if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_RDS);
+                       setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
+                       x86_amd_ls_cfg_rds_mask = 1ULL << bit;
+               }
+       }
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
@@ -760,8 +781,11 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x15: init_amd_bd(c); break;
        }
 
-       /* Enable workaround for FXSAVE leak */
-       if (c->x86 >= 6)
+       /*
+        * Enable workaround for FXSAVE leak on CPUs
+        * without a XSaveErPtr feature
+        */
+       if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
                set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 
        cpu_detect_cache_sizes(c);
@@ -782,8 +806,32 @@ static void init_amd(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_K8);
 
        if (cpu_has(c, X86_FEATURE_XMM2)) {
-               /* MFENCE stops RDTSC speculation */
-               set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+               unsigned long long val;
+               int ret;
+
+               /*
+                * A serializing LFENCE has less overhead than MFENCE, so
+                * use it for execution serialization.  On families which
+                * don't have that MSR, LFENCE is already serializing.
+                * msr_set_bit() uses the safe accessors, too, even if the MSR
+                * is not present.
+                */
+               msr_set_bit(MSR_F10H_DECFG,
+                           MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+               /*
+                * Verify that the MSR write was successful (could be running
+                * under a hypervisor) and only then assume that LFENCE is
+                * serializing.
+                */
+               ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+               if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+                       /* A serializing LFENCE stops RDTSC speculation */
+                       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+               } else {
+                       /* MFENCE stops RDTSC speculation */
+                       set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+               }
        }
 
        /*
@@ -803,6 +851,50 @@ static void init_amd(struct cpuinfo_x86 *c)
        /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
        if (!cpu_has(c, X86_FEATURE_XENPV))
                set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+       /* AMD speculative control support */
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+               pr_info_once("FEATURE SPEC_CTRL Present\n");
+               set_ibrs_supported();
+               set_ibpb_supported();
+               if (ibrs_inuse)
+                       sysctl_ibrs_enabled = 1;
+               if (ibpb_inuse)
+                       sysctl_ibpb_enabled = 1;
+       } else if (cpu_has(c, X86_FEATURE_IBPB)) {
+               pr_info_once("FEATURE SPEC_CTRL Not Present\n");
+               pr_info_once("FEATURE IBPB Present\n");
+               set_ibpb_supported();
+               if (ibpb_inuse)
+                       sysctl_ibpb_enabled = 1;
+       } else {
+               pr_info_once("FEATURE SPEC_CTRL Not Present\n");
+               pr_info_once("FEATURE IBPB Not Present\n");
+               /*
+                * On AMD processors that do not support the speculative
+                * control features, IBPB type support can be achieved by
+                * disabling indirect branch predictor support.
+                */
+               if (!ibpb_disabled) {
+                       u64 val;
+
+                       switch (c->x86) {
+                       case 0x10:
+                       case 0x12:
+                       case 0x16:
+                               pr_info_once("Disabling indirect branch predictor support\n");
+                               rdmsrl(MSR_F15H_IC_CFG, val);
+                               val |= MSR_F15H_IC_CFG_DIS_IND;
+                               wrmsrl(MSR_F15H_IC_CFG, val);
+                               break;
+                       }
+               }
+       }
+
+       if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
+               set_cpu_cap(c, X86_FEATURE_RDS);
+               set_cpu_cap(c, X86_FEATURE_AMD_RDS);
+       }
 }
 
 #ifdef CONFIG_X86_32