From e75582dd101fbf69940e724ca916180809ea6918 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Tue, 8 Aug 2017 12:06:58 +0300 Subject: [PATCH] x86, bpf, jit: prevent speculative execution when JIT is enabled CVE-2017-5753 (Spectre v1 Intel) When constant blinding is enabled (bpf_jit_harden = 1), this adds an observable speculation barrier before emitting x86 jitted code for the BPF_ALU(64)_OR_X and BPF_ALU_LHS_X (for BPF_REG_AX register) eBPF instructions. This is needed in order to prevent speculative execution on out of bounds BPF_MAP array indexes when JIT is enabled. This way an arbitary kernel memory is not exposed through side-channel attacks. Signed-off-by: Elena Reshetova Signed-off-by: Andy Whitcroft Signed-off-by: Kleber Sacilotto de Souza --- arch/x86/net/bpf_jit_comp.c | 28 +++++++++++++++++++++++++++- include/linux/filter.h | 9 +++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 4d50ced94686..7879fb0d7d7f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -16,6 +16,7 @@ #include int bpf_jit_enable __read_mostly; +u8 bpf_jit_fence = 0; /* * assembly code in arch/x86/net/bpf_jit.S @@ -107,6 +108,18 @@ static void bpf_flush_icache(void *start, void *end) set_fs(old_fs); } +static void emit_memory_barrier(u8 **pprog) +{ + u8 *prog = *pprog; + int cnt = 0; + + if (bpf_jit_fence) + EMIT3(0x0f, 0xae, 0xe8); + + *pprog = prog; + return; +} + #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) @@ -399,7 +412,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ADD: b2 = 0x01; break; case BPF_SUB: b2 = 0x29; break; case BPF_AND: b2 = 0x21; break; - case BPF_OR: b2 = 0x09; break; + case BPF_OR: b2 = 0x09; emit_memory_barrier(&prog); break; case BPF_XOR: b2 = 0x31; break; } if (BPF_CLASS(insn->code) == BPF_ALU64) @@ -646,6 +659,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_RSH | BPF_X: case BPF_ALU64 | BPF_ARSH | BPF_X: + /* If blinding is enabled, each + * BPF_LD | BPF_IMM | BPF_DW instruction + * is converted to 4 eBPF instructions with + * BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32) + * always present(number 3). Detect such cases + * and insert memory barriers. */ + if ((BPF_CLASS(insn->code) == BPF_ALU64) + && (BPF_OP(insn->code) == BPF_LSH) + && (src_reg == BPF_REG_AX)) + emit_memory_barrier(&prog); /* check for bad case when dst_reg == rcx */ if (dst_reg == BPF_REG_4) { /* mov r11, dst_reg */ @@ -1099,6 +1122,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (!bpf_jit_enable) return orig_prog; + if (bpf_jit_fence_present() && bpf_jit_blinding_enabled()) + bpf_jit_fence = 1; + tmp = bpf_jit_blind_constants(prog); /* If blinding was requested and we failed during blinding, * we must fall back to the interpreter. diff --git a/include/linux/filter.h b/include/linux/filter.h index bfef1e5734f8..7db1781c1983 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -779,6 +779,15 @@ static inline bool bpf_jit_blinding_enabled(void) return true; } +static inline bool bpf_jit_fence_present(void) +{ + /* Check if lfence is present on CPU + */ + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) + return true; + return false; +} + static inline bool bpf_jit_kallsyms_enabled(void) { /* There are a couple of corner cases where kallsyms should -- 2.39.5