]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
bpf: Prevent memory disambiguation attack
authorAlexei Starovoitov <ast@kernel.org>
Tue, 15 May 2018 16:27:05 +0000 (09:27 -0700)
committerStefan Bader <stefan.bader@canonical.com>
Thu, 24 May 2018 07:59:19 +0000 (09:59 +0200)
Detect code patterns where malicious 'speculative store bypass' can be used
and sanitize such patterns.

 39: (bf) r3 = r10
 40: (07) r3 += -216
 41: (79) r8 = *(u64 *)(r7 +0)   // slow read
 42: (7a) *(u64 *)(r10 -72) = 0  // verifier inserts this instruction
 43: (7b) *(u64 *)(r8 +0) = r3   // this store becomes slow due to r8
 44: (79) r1 = *(u64 *)(r6 +0)   // cpu speculatively executes this load
 45: (71) r2 = *(u8 *)(r1 +0)    // speculatively arbitrary 'load byte'
                                 // is now sanitized

Above code after x86 JIT becomes:
 e5: mov    %rbp,%rdx
 e8: add    $0xffffffffffffff28,%rdx
 ef: mov    0x0(%r13),%r14
 f3: movq   $0x0,-0x48(%rbp)
 fb: mov    %rdx,0x0(%r14)
 ff: mov    0x0(%rbx),%rdi
103: movzbq 0x0(%rdi),%rsi

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
CVE-2018-3639 (x86)

(backported from commit af86ca4e3088fe5eacf2f7e58c01fa68ca067672)
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
include/linux/bpf_verifier.h
kernel/bpf/verifier.c

index effeaa64257d5977e0dba9c0750de4b917d5a9c5..afe5d2a6580ccd581612869b2286d4cdde10ca15 100644 (file)
@@ -75,6 +75,7 @@ struct bpf_insn_aux_data {
                struct bpf_map *map_ptr;        /* pointer for call insn into lookup_elem */
        };
        int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
+       int sanitize_stack_off; /* stack slot to be cleared */
        bool seen; /* this insn was processed by the verifier */
 };
 
index dab5ba668b97526938b3fc541d4d14d18b069c7c..ee7c03fc3f4464b45d20dd2d317f2f4d61937300 100644 (file)
@@ -568,8 +568,9 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
 /* check_stack_read/write functions track spill/fill of registers,
  * stack boundary and alignment are checked in check_mem_access()
  */
-static int check_stack_write(struct bpf_verifier_state *state, int off,
-                            int size, int value_regno)
+static int check_stack_write(struct bpf_verifier_env *env,
+                            struct bpf_verifier_state *state, int off,
+                            int size, int value_regno, int insn_idx)
 {
        int i;
        /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -589,8 +590,32 @@ static int check_stack_write(struct bpf_verifier_state *state, int off,
                state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
                        state->regs[value_regno];
 
-               for (i = 0; i < BPF_REG_SIZE; i++)
+               for (i = 0; i < BPF_REG_SIZE; i++) {
+                       if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC &&
+                           !env->allow_ptr_leaks) {
+                               int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
+                               int soff = off;
+
+                               /* detected reuse of integer stack slot with a pointer
+                                * which means either llvm is reusing stack slot or
+                                * an attacker is trying to exploit CVE-2018-3639
+                                * (speculative store bypass)
+                                * Have to sanitize that slot with preemptive
+                                * store of zero.
+                                */
+                               if (*poff && *poff != soff) {
+                                       /* disallow programs where single insn stores
+                                        * into two different stack slots, since verifier
+                                        * cannot sanitize them
+                                        */
+                                       verbose("insn %d cannot access two stack slots fp%d and fp%d",
+                                               insn_idx, *poff, soff);
+                                       return -EINVAL;
+                               }
+                               *poff = soff;
+                       }
                        state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
+               }
        } else {
                /* regular write of data into stack */
                state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
@@ -944,7 +969,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                                verbose("attempt to corrupt spilled pointer on stack\n");
                                return -EACCES;
                        }
-                       err = check_stack_write(state, off, size, value_regno);
+                       err = check_stack_write(env, state, off, size,
+                                               value_regno, insn_idx);
                } else {
                        err = check_stack_read(state, off, size, value_regno);
                }
@@ -3623,6 +3649,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                else
                        continue;
 
+               if (type == BPF_WRITE &&
+                   env->insn_aux_data[i + delta].sanitize_stack_off) {
+                       struct bpf_insn patch[] = {
+                               /* Sanitize suspicious stack slot with zero.
+                                * There are no memory dependencies for this store,
+                                * since it's only using frame pointer and immediate
+                                * constant of zero
+                                */
+                               BPF_ST_MEM(BPF_DW, BPF_REG_FP,
+                                          env->insn_aux_data[i + delta].sanitize_stack_off,
+                                          0),
+                               /* the original STX instruction will immediately
+                                * overwrite the same stack slot with appropriate value
+                                */
+                               *insn,
+                       };
+
+                       cnt = ARRAY_SIZE(patch);
+                       new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
                if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
                        continue;