]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/x86/include/asm/nospec-branch.h
x86/speculation: Make "seccomp" the default mode for Speculative Store Bypass
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / nospec-branch.h
index bf312afe5a9f664bffef4873064e4cdb6e8c4892..5ce68e3e5bc94350fd71fdfbafa1e520ec50e26c 100644 (file)
@@ -7,50 +7,6 @@
 #include <asm/alternative-asm.h>
 #include <asm/cpufeatures.h>
 
-/*
- * Fill the CPU return stack buffer.
- *
- * Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
- *
- * This is required in various cases for retpoline and IBRS-based
- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
- * eliminate potentially bogus entries from the RSB, and sometimes
- * purely to ensure that it doesn't get empty, which on some CPUs would
- * allow predictions from other (unwanted!) sources to be used.
- *
- * We define a CPP macro such that it can be used from both .S files and
- * inline assembly. It's possible to do a .macro and then include that
- * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
- */
-
-#define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
-#define RSB_FILL_LOOPS         16      /* To avoid underflow */
-
-/*
- * Google experimented with loop-unrolling and this turned out to be
- * the optimal version — two calls, each with their own speculation
- * trap should their return address end up getting used, in a loop.
- */
-#define __FILL_RETURN_BUFFER(reg, nr, sp)      \
-       mov     $(nr/2), reg;                   \
-771:                                           \
-       call    772f;                           \
-773:   /* speculation trap */                  \
-       pause;                                  \
-       lfence;                                 \
-       jmp     773b;                           \
-772:                                           \
-       call    774f;                           \
-775:   /* speculation trap */                  \
-       pause;                                  \
-       lfence;                                 \
-       jmp     775b;                           \
-774:                                           \
-       dec     reg;                            \
-       jnz     771b;                           \
-       add     $(BITS_PER_LONG/8) * nr, sp;
-
 #ifdef __ASSEMBLY__
 
 /*
        .popsection
 .endm
 
+/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+.macro ANNOTATE_RETPOLINE_SAFE
+       .Lannotate_\@:
+       .pushsection .discard.retpoline_safe
+       _ASM_PTR .Lannotate_\@
+       .popsection
+.endm
+
 /*
  * These are the bare retpoline primitives for indirect jmp and call.
  * Do not use these directly; they only exist to make the ALTERNATIVE
 .macro JMP_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
        ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE_2 __stringify(jmp *\reg),                           \
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),  \
                __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
-               __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+               __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
 #else
        jmp     *\reg
 #endif
 .macro CALL_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
        ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE_2 __stringify(call *\reg),                          \
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
                __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
-               __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+               __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
 #else
        call    *\reg
 #endif
 .endm
 
- /*
-  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
-  * monstrosity above, manually.
-  */
-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
+/* This clobbers the BX register */
+.macro FILL_RETURN_BUFFER nr:req ftr:req
 #ifdef CONFIG_RETPOLINE
-       ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE "jmp .Lskip_rsb_\@",                                \
-               __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))    \
-               \ftr
-.Lskip_rsb_\@:
+       ALTERNATIVE "", "call __clear_rsb", \ftr
 #endif
 .endm
 
        ".long 999b - .\n\t"                                    \
        ".popsection\n\t"
 
+#define ANNOTATE_RETPOLINE_SAFE                                        \
+       "999:\n\t"                                              \
+       ".pushsection .discard.retpoline_safe\n\t"              \
+       _ASM_PTR " 999b\n\t"                                    \
+       ".popsection\n\t"
+
 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
 
 /*
 # define CALL_NOSPEC                                           \
        ANNOTATE_NOSPEC_ALTERNATIVE                             \
        ALTERNATIVE(                                            \
+       ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
        "call __x86_indirect_thunk_%V[thunk_target]\n",         \
        X86_FEATURE_RETPOLINE)
  * otherwise we'll run out of registers. We don't care about CET
  * here, anyway.
  */
-# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",    \
+# define CALL_NOSPEC                                           \
+       ALTERNATIVE(                                            \
+       ANNOTATE_RETPOLINE_SAFE                                 \
+       "call *%[thunk_target]\n",                              \
        "       jmp    904f;\n"                                 \
        "       .align 16\n"                                    \
        "901:   call   903f;\n"                                 \
@@ -195,24 +166,33 @@ enum spectre_v2_mitigation {
 };
 
 /*
- * On VMEXIT we must ensure that no RSB predictions learned in the guest
- * can be followed in the host, by overwriting the RSB completely. Both
- * retpoline and IBRS mitigations for Spectre v2 need this; only on future
- * CPUs with IBRS_ATT *might* it be avoided.
+ * The Intel specification for the SPEC_CTRL MSR requires that we
+ * preserve any already set reserved bits at boot time (e.g. for
+ * future additions that this kernel is not currently aware of).
+ * We then set any additional mitigation bits that we want
+ * ourselves and always use this as the base for SPEC_CTRL.
+ * We also use this when handling guest entry/exit as below.
  */
-static inline void vmexit_fill_RSB(void)
+extern void x86_spec_ctrl_set(u64);
+extern u64 x86_spec_ctrl_get_default(void);
+
+/* The Speculative Store Bypass disable variants */
+enum ssb_mitigation {
+       SPEC_STORE_BYPASS_NONE,
+       SPEC_STORE_BYPASS_DISABLE,
+       SPEC_STORE_BYPASS_PRCTL,
+       SPEC_STORE_BYPASS_SECCOMP,
+};
+
+static __always_inline
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
 {
-#ifdef CONFIG_RETPOLINE
-       unsigned long loops;
-
-       asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
-                     ALTERNATIVE("jmp 910f",
-                                 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
-                                 X86_FEATURE_RETPOLINE)
-                     "910:"
-                     : "=r" (loops), ASM_CALL_CONSTRAINT
-                     : : "memory" );
-#endif
+       asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+               : : "c" (msr),
+                   "a" (val),
+                   "d" (val >> 32),
+                   [feature] "i" (feature)
+               : "memory");
 }
 
 #endif /* __ASSEMBLY__ */