]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0229-bpf-fix-branch-pruning-logic.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0229-bpf-fix-branch-pruning-logic.patch
CommitLineData
321d628a
FG
1From 4cdf7c9d4e0958e38635df638229bba6f562511a Mon Sep 17 00:00:00 2001
2From: Alexei Starovoitov <ast@fb.com>
3Date: Thu, 4 Jan 2018 08:01:24 -0600
e4cdf2a5 4Subject: [PATCH 229/241] bpf: fix branch pruning logic
321d628a
FG
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9when the verifier detects that register contains a runtime constant
10and it's compared with another constant it will prune exploration
11of the branch that is guaranteed not to be taken at runtime.
12This is all correct, but malicious program may be constructed
13in such a way that it always has a constant comparison and
14the other branch is never taken under any conditions.
15In this case such path through the program will not be explored
16by the verifier. It won't be taken at run-time either, but since
17all instructions are JITed the malicious program may cause JITs
18to complain about using reserved fields, etc.
19To fix the issue we have to track the instructions explored by
20the verifier and sanitize instructions that are dead at run time
21with NOPs. We cannot reject such dead code, since llvm generates
22it for valid C code, since it doesn't do as much data flow
23analysis as the verifier does.
24
25Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
26Signed-off-by: Alexei Starovoitov <ast@kernel.org>
27Acked-by: Daniel Borkmann <daniel@iogearbox.net>
28Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
29(cherry picked from commit c131187db2d3fa2f8bf32fdf4e9a4ef805168467)
30CVE-2017-17862
31Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
32Signed-off-by: Andy Whitcroft <apw@canonical.com>
33Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
34(cherry picked from commit 2df70878d072d06f5bad0db3f2ee1ed47179dff8)
35Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
36---
37 include/linux/bpf_verifier.h | 2 +-
38 kernel/bpf/verifier.c | 27 +++++++++++++++++++++++++++
39 2 files changed, 28 insertions(+), 1 deletion(-)
40
41diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
42index 8e5d31f6faef..effeaa64257d 100644
43--- a/include/linux/bpf_verifier.h
44+++ b/include/linux/bpf_verifier.h
45@@ -75,7 +75,7 @@ struct bpf_insn_aux_data {
46 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
47 };
48 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
49- int converted_op_size; /* the valid value width after perceived conversion */
50+ bool seen; /* this insn was processed by the verifier */
51 };
52
53 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
54diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
55index 4ecb2e10c5e0..dab5ba668b97 100644
56--- a/kernel/bpf/verifier.c
57+++ b/kernel/bpf/verifier.c
58@@ -3152,6 +3152,7 @@ static int do_check(struct bpf_verifier_env *env)
59 if (err)
60 return err;
61
62+ env->insn_aux_data[insn_idx].seen = true;
63 if (class == BPF_ALU || class == BPF_ALU64) {
64 err = check_alu_op(env, insn);
65 if (err)
66@@ -3342,6 +3343,7 @@ static int do_check(struct bpf_verifier_env *env)
67 return err;
68
69 insn_idx++;
70+ env->insn_aux_data[insn_idx].seen = true;
71 } else {
72 verbose("invalid BPF_LD mode\n");
73 return -EINVAL;
74@@ -3523,6 +3525,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
75 u32 off, u32 cnt)
76 {
77 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
78+ int i;
79
80 if (cnt == 1)
81 return 0;
82@@ -3532,6 +3535,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
83 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
84 memcpy(new_data + off + cnt - 1, old_data + off,
85 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
86+ for (i = off; i < off + cnt - 1; i++)
87+ new_data[i].seen = true;
88 env->insn_aux_data = new_data;
89 vfree(old_data);
90 return 0;
91@@ -3550,6 +3555,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
92 return new_prog;
93 }
94
95+/* The verifier does more data flow analysis than llvm and will not explore
96+ * branches that are dead at run time. Malicious programs can have dead code
97+ * too. Therefore replace all dead at-run-time code with nops.
98+ */
99+static void sanitize_dead_code(struct bpf_verifier_env *env)
100+{
101+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
102+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
103+ struct bpf_insn *insn = env->prog->insnsi;
104+ const int insn_cnt = env->prog->len;
105+ int i;
106+
107+ for (i = 0; i < insn_cnt; i++) {
108+ if (aux_data[i].seen)
109+ continue;
110+ memcpy(insn + i, &nop, sizeof(nop));
111+ }
112+}
113+
114 /* convert load instructions that access fields of 'struct __sk_buff'
115 * into sequence of instructions that access fields of 'struct sk_buff'
116 */
117@@ -3841,6 +3865,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
118 while (pop_stack(env, NULL) >= 0);
119 free_states(env);
120
121+ if (ret == 0)
122+ sanitize_dead_code(env);
123+
124 if (ret == 0)
125 /* program is valid, convert *(u32*)(ctx + off) accesses */
126 ret = convert_ctx_accesses(env);
127--
1282.14.2
129