1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
16 #define RETPOLINE_THUNK_SIZE 32
19 * Fill the CPU return stack buffer.
21 * Each entry in the RSB, if used for a speculative 'ret', contains an
22 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
24 * This is required in various cases for retpoline and IBRS-based
25 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
26 * eliminate potentially bogus entries from the RSB, and sometimes
27 * purely to ensure that it doesn't get empty, which on some CPUs would
28 * allow predictions from other (unwanted!) sources to be used.
30 * We define a CPP macro such that it can be used from both .S files and
31 * inline assembly. It's possible to do a .macro and then include that
32 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
35 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
38 * Google experimented with loop-unrolling and this turned out to be
39 * the optimal version - two calls, each with their own speculation
40 * trap should their return address end up getting used, in a loop.
42 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
45 ANNOTATE_INTRA_FUNCTION_CALL; \
47 773: /* speculation trap */ \
53 ANNOTATE_INTRA_FUNCTION_CALL; \
55 775: /* speculation trap */ \
61 add $(BITS_PER_LONG/8) * 2, sp; \
68 * This should be used immediately before an indirect jump/call. It tells
69 * objtool the subsequent indirect jump/call is vouched safe for retpoline
72 .macro ANNOTATE_RETPOLINE_SAFE
74 .pushsection
.discard
.retpoline_safe
75 _ASM_PTR
.Lannotate_\@
80 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
81 * vs RETBleed validation.
83 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
86 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
87 * eventually turn into it's own annotation.
89 .macro ANNOTATE_UNRET_END
90 #ifdef CONFIG_DEBUG_ENTRY
91 ANNOTATE_RETPOLINE_SAFE
97 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
98 * indirect jmp/call which may be susceptible to the Spectre variant 2
101 .macro JMP_NOSPEC reg
:req
102 #ifdef CONFIG_RETPOLINE
103 ALTERNATIVE_2
__stringify(ANNOTATE_RETPOLINE_SAFE
; jmp
*%\reg
), \
104 __stringify(jmp __x86_indirect_thunk_
\reg
), X86_FEATURE_RETPOLINE
, \
105 __stringify(lfence
; ANNOTATE_RETPOLINE_SAFE
; jmp
*%\reg
), X86_FEATURE_RETPOLINE_LFENCE
111 .macro CALL_NOSPEC reg
:req
112 #ifdef CONFIG_RETPOLINE
113 ALTERNATIVE_2
__stringify(ANNOTATE_RETPOLINE_SAFE
; call
*%\reg
), \
114 __stringify(call __x86_indirect_thunk_
\reg
), X86_FEATURE_RETPOLINE
, \
115 __stringify(lfence
; ANNOTATE_RETPOLINE_SAFE
; call
*%\reg
), X86_FEATURE_RETPOLINE_LFENCE
122 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
123 * monstrosity above, manually.
125 .macro FILL_RETURN_BUFFER reg
:req nr
:req ftr
:req
126 ALTERNATIVE
"jmp .Lskip_rsb_\@", "", \ftr
127 __FILL_RETURN_BUFFER(\reg
,\nr
,%_ASM_SP
)
131 #ifdef CONFIG_CPU_UNRET_ENTRY
132 #define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
134 #define CALL_ZEN_UNTRAIN_RET ""
138 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
139 * return thunk isn't mapped into the userspace tables (then again, AMD
140 * typically has NO_MELTDOWN).
142 * While zen_untrain_ret() doesn't clobber anything but requires stack,
143 * entry_ibpb() will clobber AX, CX, DX.
145 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
146 * where we have a stack but before any RET instruction.
149 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
152 CALL_ZEN_UNTRAIN_RET
, X86_FEATURE_UNRET
, \
153 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
157 #else /* __ASSEMBLY__ */
159 #define ANNOTATE_RETPOLINE_SAFE \
161 ".pushsection .discard.retpoline_safe\n\t" \
162 _ASM_PTR " 999b\n\t" \
165 extern void __x86_return_thunk(void);
166 extern void zen_untrain_ret(void);
167 extern void entry_ibpb(void);
169 #ifdef CONFIG_RETPOLINE
171 typedef u8 retpoline_thunk_t
[RETPOLINE_THUNK_SIZE
];
174 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
175 #include <asm/GEN-for-each-reg.h>
178 extern retpoline_thunk_t __x86_indirect_thunk_array
[];
183 * Inline asm uses the %V modifier which is only in newer GCC
184 * which is ensured when CONFIG_RETPOLINE is defined.
186 # define CALL_NOSPEC \
188 ANNOTATE_RETPOLINE_SAFE \
189 "call *%[thunk_target]\n", \
190 "call __x86_indirect_thunk_%V[thunk_target]\n", \
191 X86_FEATURE_RETPOLINE, \
193 ANNOTATE_RETPOLINE_SAFE \
194 "call *%[thunk_target]\n", \
195 X86_FEATURE_RETPOLINE_LFENCE)
197 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
199 #else /* CONFIG_X86_32 */
201 * For i386 we use the original ret-equivalent retpoline, because
202 * otherwise we'll run out of registers. We don't care about CET
205 # define CALL_NOSPEC \
207 ANNOTATE_RETPOLINE_SAFE \
208 "call *%[thunk_target]\n", \
211 "901: call 903f;\n" \
216 "903: lea 4(%%esp), %%esp;\n" \
217 " pushl %[thunk_target];\n" \
220 "904: call 901b;\n", \
221 X86_FEATURE_RETPOLINE, \
223 ANNOTATE_RETPOLINE_SAFE \
224 "call *%[thunk_target]\n", \
225 X86_FEATURE_RETPOLINE_LFENCE)
227 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
229 #else /* No retpoline for C / inline asm */
230 # define CALL_NOSPEC "call *%[thunk_target]\n"
231 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
234 /* The Spectre V2 mitigation variants */
235 enum spectre_v2_mitigation
{
237 SPECTRE_V2_RETPOLINE
,
240 SPECTRE_V2_EIBRS_RETPOLINE
,
241 SPECTRE_V2_EIBRS_LFENCE
,
245 /* The indirect branch speculation control variants */
246 enum spectre_v2_user_mitigation
{
247 SPECTRE_V2_USER_NONE
,
248 SPECTRE_V2_USER_STRICT
,
249 SPECTRE_V2_USER_STRICT_PREFERRED
,
250 SPECTRE_V2_USER_PRCTL
,
251 SPECTRE_V2_USER_SECCOMP
,
254 /* The Speculative Store Bypass disable variants */
255 enum ssb_mitigation
{
256 SPEC_STORE_BYPASS_NONE
,
257 SPEC_STORE_BYPASS_DISABLE
,
258 SPEC_STORE_BYPASS_PRCTL
,
259 SPEC_STORE_BYPASS_SECCOMP
,
262 extern char __indirect_thunk_start
[];
263 extern char __indirect_thunk_end
[];
265 static __always_inline
266 void alternative_msr_write(unsigned int msr
, u64 val
, unsigned int feature
)
268 asm volatile(ALTERNATIVE("", "wrmsr", %c
[feature
])
271 "d" ((u32
)(val
>> 32)),
272 [feature
] "i" (feature
)
276 static inline void indirect_branch_prediction_barrier(void)
278 u64 val
= PRED_CMD_IBPB
;
280 alternative_msr_write(MSR_IA32_PRED_CMD
, val
, X86_FEATURE_USE_IBPB
);
283 /* The Intel SPEC CTRL MSR base value cache */
284 extern u64 x86_spec_ctrl_base
;
285 DECLARE_PER_CPU(u64
, x86_spec_ctrl_current
);
286 extern void write_spec_ctrl_current(u64 val
, bool force
);
287 extern u64
spec_ctrl_current(void);
290 * With retpoline, we must use IBRS to restrict branch prediction
291 * before calling into firmware.
293 * (Implemented as CPP macros due to header hell.)
295 #define firmware_restrict_branch_speculation_start() \
298 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
299 spec_ctrl_current() | SPEC_CTRL_IBRS, \
300 X86_FEATURE_USE_IBRS_FW); \
301 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
302 X86_FEATURE_USE_IBPB_FW); \
305 #define firmware_restrict_branch_speculation_end() \
307 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
308 spec_ctrl_current(), \
309 X86_FEATURE_USE_IBRS_FW); \
313 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
314 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
315 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
317 DECLARE_STATIC_KEY_FALSE(mds_user_clear
);
318 DECLARE_STATIC_KEY_FALSE(mds_idle_clear
);
320 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
322 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
324 #include <asm/segment.h>
327 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
329 * This uses the otherwise unused and obsolete VERW instruction in
330 * combination with microcode which triggers a CPU buffer flush when the
331 * instruction is executed.
333 static __always_inline
void mds_clear_cpu_buffers(void)
335 static const u16 ds
= __KERNEL_DS
;
338 * Has to be the memory-operand variant because only that
339 * guarantees the CPU buffer flush functionality according to
340 * documentation. The register-operand variant does not.
341 * Works with any segment selector, but a valid writable
342 * data segment is the fastest variant.
344 * "cc" clobber is required because VERW modifies ZF.
346 asm volatile("verw %[ds]" : : [ds
] "m" (ds
) : "cc");
350 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
352 * Clear CPU buffers if the corresponding static key is enabled
354 static __always_inline
void mds_user_clear_cpu_buffers(void)
356 if (static_branch_likely(&mds_user_clear
))
357 mds_clear_cpu_buffers();
361 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
363 * Clear CPU buffers if the corresponding static key is enabled
365 static inline void mds_idle_clear_cpu_buffers(void)
367 if (static_branch_likely(&mds_idle_clear
))
368 mds_clear_cpu_buffers();
371 #endif /* __ASSEMBLY__ */
373 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */