]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0261-x86-bpf-jit-prevent-speculative-execution-when-JIT-i.patch
5e11a7b667702824c896773c66bd94db8915836b
[pve-kernel.git] / patches / kernel / 0261-x86-bpf-jit-prevent-speculative-execution-when-JIT-i.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Elena Reshetova <elena.reshetova@intel.com>
3 Date: Mon, 4 Sep 2017 13:11:45 +0300
4 Subject: [PATCH] x86, bpf, jit: prevent speculative execution when JIT is
5 enabled
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 CVE-2017-5753
11 CVE-2017-5715
12
13 When constant blinding is enabled (bpf_jit_harden = 1), this adds
14 a generic memory barrier (lfence for intel, mfence for AMD) before
15 emitting x86 jitted code for the BPF_ALU(64)_OR_X and BPF_ALU_LHS_X
16 (for BPF_REG_AX register) eBPF instructions. This is needed in order
17 to prevent speculative execution on out of bounds BPF_MAP array
18 indexes when JIT is enabled. This way an arbitary kernel memory is
19 not exposed through side-channel attacks.
20
21 For more details, please see this Google Project Zero report: tbd
22
23 Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
24 Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
25 Signed-off-by: Andy Whitcroft <apw@canonical.com>
26 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
27 (cherry picked from commit cf9676859a05d0d784067072e8121e63888bacc7)
28 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
29 ---
30 arch/x86/net/bpf_jit_comp.c | 33 ++++++++++++++++++++++++++++++++-
31 1 file changed, 32 insertions(+), 1 deletion(-)
32
33 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34 index 4d50ced94686..879dbfefb66d 100644
35 --- a/arch/x86/net/bpf_jit_comp.c
36 +++ b/arch/x86/net/bpf_jit_comp.c
37 @@ -107,6 +107,27 @@ static void bpf_flush_icache(void *start, void *end)
38 set_fs(old_fs);
39 }
40
41 +static void emit_memory_barrier(u8 **pprog)
42 +{
43 + u8 *prog = *pprog;
44 + int cnt = 0;
45 +
46 + if (bpf_jit_blinding_enabled()) {
47 + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
48 + /* x86 LFENCE opcode 0F AE E8 */
49 + EMIT3(0x0f, 0xae, 0xe8);
50 + else if (boot_cpu_has(X86_FEATURE_MFENCE_RDTSC))
51 + /* AMD MFENCE opcode 0F AE F0 */
52 + EMIT3(0x0f, 0xae, 0xf0);
53 + else
54 + /* we should never end up here,
55 + * but if we do, better not to emit anything*/
56 + return;
57 + }
58 + *pprog = prog;
59 + return;
60 +}
61 +
62 #define CHOOSE_LOAD_FUNC(K, func) \
63 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
64
65 @@ -399,7 +420,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
66 case BPF_ADD: b2 = 0x01; break;
67 case BPF_SUB: b2 = 0x29; break;
68 case BPF_AND: b2 = 0x21; break;
69 - case BPF_OR: b2 = 0x09; break;
70 + case BPF_OR: b2 = 0x09; emit_memory_barrier(&prog); break;
71 case BPF_XOR: b2 = 0x31; break;
72 }
73 if (BPF_CLASS(insn->code) == BPF_ALU64)
74 @@ -646,6 +667,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
75 case BPF_ALU64 | BPF_RSH | BPF_X:
76 case BPF_ALU64 | BPF_ARSH | BPF_X:
77
78 + /* If blinding is enabled, each
79 + * BPF_LD | BPF_IMM | BPF_DW instruction
80 + * is converted to 4 eBPF instructions with
81 + * BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32)
82 + * always present(number 3). Detect such cases
83 + * and insert memory barriers. */
84 + if ((BPF_CLASS(insn->code) == BPF_ALU64)
85 + && (BPF_OP(insn->code) == BPF_LSH)
86 + && (src_reg == BPF_REG_AX))
87 + emit_memory_barrier(&prog);
88 /* check for bad case when dst_reg == rcx */
89 if (dst_reg == BPF_REG_4) {
90 /* mov r11, dst_reg */
91 --
92 2.14.2
93