]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - kernel/bpf/verifier.c
bpf: No need to simulate speculative domain for immediates
[mirror_ubuntu-hirsute-kernel.git] / kernel / bpf / verifier.c
index 361d0983fbbb0a78b201636239010596aaa7389c..63d761cdd6782198eb47f77c81597767ab7a57c9 100644 (file)
@@ -1304,9 +1304,7 @@ static bool __reg64_bound_s32(s64 a)
 
 static bool __reg64_bound_u32(u64 a)
 {
-       if (a > U32_MIN && a < U32_MAX)
-               return true;
-       return false;
+       return a > U32_MIN && a < U32_MAX;
 }
 
 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
@@ -1317,10 +1315,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
                reg->s32_min_value = (s32)reg->smin_value;
                reg->s32_max_value = (s32)reg->smax_value;
        }
-       if (__reg64_bound_u32(reg->umin_value))
+       if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
                reg->u32_min_value = (u32)reg->umin_value;
-       if (__reg64_bound_u32(reg->umax_value))
                reg->u32_max_value = (u32)reg->umax_value;
+       }
 
        /* Intersecting with the old var_off might have improved our bounds
         * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
@@ -5723,41 +5721,25 @@ enum {
 };
 
 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
-                             const struct bpf_reg_state *off_reg,
-                             u32 *alu_limit, u8 opcode)
+                             u32 *alu_limit, bool mask_to_left)
 {
-       bool off_is_neg = off_reg->smin_value < 0;
-       bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
-                           (opcode == BPF_SUB && !off_is_neg);
-       u32 off, max = 0, ptr_limit = 0;
-
-       if (!tnum_is_const(off_reg->var_off) &&
-           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
-               return REASON_BOUNDS;
+       u32 max = 0, ptr_limit = 0;
 
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
                /* Offset 0 is out-of-bounds, but acceptable start for the
-                * left direction, see BPF_REG_FP.
+                * left direction, see BPF_REG_FP. Also, unknown scalar
+                * offset where we would need to deal with min/max bounds is
+                * currently prohibited for unprivileged.
                 */
                max = MAX_BPF_STACK + mask_to_left;
-               /* Indirect variable offset stack access is prohibited in
-                * unprivileged mode so it's not handled here.
-                */
-               off = ptr_reg->off + ptr_reg->var_off.value;
-               if (mask_to_left)
-                       ptr_limit = MAX_BPF_STACK + off;
-               else
-                       ptr_limit = -off - 1;
+               ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
                break;
        case PTR_TO_MAP_VALUE:
                max = ptr_reg->map_ptr->value_size;
-               if (mask_to_left) {
-                       ptr_limit = ptr_reg->umax_value + ptr_reg->off;
-               } else {
-                       off = ptr_reg->smin_value + ptr_reg->off;
-                       ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
-               }
+               ptr_limit = (mask_to_left ?
+                            ptr_reg->smin_value :
+                            ptr_reg->umax_value) + ptr_reg->off;
                break;
        default:
                return REASON_TYPE;
@@ -5808,14 +5790,22 @@ static bool sanitize_needed(u8 opcode)
        return opcode == BPF_ADD || opcode == BPF_SUB;
 }
 
+struct bpf_sanitize_info {
+       struct bpf_insn_aux_data aux;
+       bool mask_to_left;
+};
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                            struct bpf_insn *insn,
                            const struct bpf_reg_state *ptr_reg,
                            const struct bpf_reg_state *off_reg,
-                           struct bpf_reg_state *dst_reg)
+                           struct bpf_reg_state *dst_reg,
+                           struct bpf_sanitize_info *info,
+                           const bool commit_window)
 {
+       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
        struct bpf_verifier_state *vstate = env->cur_state;
-       struct bpf_insn_aux_data *aux = cur_aux(env);
+       bool off_is_imm = tnum_is_const(off_reg->var_off);
        bool off_is_neg = off_reg->smin_value < 0;
        bool ptr_is_dst_reg = ptr_reg == dst_reg;
        u8 opcode = BPF_OP(insn->code);
@@ -5834,18 +5824,47 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        if (vstate->speculative)
                goto do_sim;
 
-       alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
-       alu_state |= ptr_is_dst_reg ?
-                    BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+       if (!commit_window) {
+               if (!tnum_is_const(off_reg->var_off) &&
+                   (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+                       return REASON_BOUNDS;
 
-       err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+               info->mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+                                    (opcode == BPF_SUB && !off_is_neg);
+       }
+
+       err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
        if (err < 0)
                return err;
 
+       if (commit_window) {
+               /* In commit phase we narrow the masking window based on
+                * the observed pointer move after the simulated operation.
+                */
+               alu_state = info->aux.alu_state;
+               alu_limit = abs(info->aux.alu_limit - alu_limit);
+       } else {
+               alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+               alu_state |= ptr_is_dst_reg ?
+                            BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+       }
+
        err = update_alu_sanitation_state(aux, alu_state, alu_limit);
        if (err < 0)
                return err;
 do_sim:
+       /* If we're in commit phase, we're done here given we already
+        * pushed the truncated dst_reg into the speculative verification
+        * stack.
+        *
+        * Also, when register is a known constant, we rewrite register-based
+        * operation to immediate-based, and thus do not need masking (and as
+        * a consequence, do not need to simulate the zero-truncation either).
+        */
+       if (commit_window || off_is_imm)
+               return 0;
+
        /* Simulate and find potential out-of-bounds access under
         * speculative execution from truncation as a result of
         * masking when off was not within expected range. If off
@@ -5988,6 +6007,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
            smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
        u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
            umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+       struct bpf_sanitize_info info = {};
        u8 opcode = BPF_OP(insn->code);
        u32 dst = insn->dst_reg;
        int ret;
@@ -6054,12 +6074,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        /* pointer types do not carry 32-bit bounds at the moment. */
        __mark_reg32_unbounded(dst_reg);
 
-       switch (opcode) {
-       case BPF_ADD:
-               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
+                                      &info, false);
                if (ret < 0)
                        return sanitize_err(env, insn, ret, off_reg, dst_reg);
+       }
 
+       switch (opcode) {
+       case BPF_ADD:
                /* We can take a fixed offset as long as it doesn't overflow
                 * the s32 'off' field
                 */
@@ -6110,10 +6133,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                }
                break;
        case BPF_SUB:
-               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
-               if (ret < 0)
-                       return sanitize_err(env, insn, ret, off_reg, dst_reg);
-
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
                        verbose(env, "R%d tried to subtract pointer from scalar\n",
@@ -6196,6 +6215,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (sanitize_check_bounds(env, insn, dst_reg) < 0)
                return -EACCES;
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
+                                      &info, true);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, off_reg, dst_reg);
+       }
 
        return 0;
 }
@@ -6375,10 +6400,17 @@ static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
                                 struct bpf_reg_state *src_reg)
 {
+       bool src_known = tnum_subreg_is_const(src_reg->var_off);
+       bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
        struct tnum var32_off = tnum_subreg(dst_reg->var_off);
        s32 smin_val = src_reg->s32_min_value;
        u32 umax_val = src_reg->u32_max_value;
 
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
+               return;
+       }
+
        /* We get our minimum from the var_off, since that's inherently
         * bitwise.  Our maximum is the minimum of the operands' maxima.
         */
@@ -6397,7 +6429,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
                dst_reg->s32_min_value = dst_reg->u32_min_value;
                dst_reg->s32_max_value = dst_reg->u32_max_value;
        }
-
 }
 
 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -6438,10 +6469,17 @@ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
                                struct bpf_reg_state *src_reg)
 {
+       bool src_known = tnum_subreg_is_const(src_reg->var_off);
+       bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
        struct tnum var32_off = tnum_subreg(dst_reg->var_off);
        s32 smin_val = src_reg->s32_min_value;
        u32 umin_val = src_reg->u32_min_value;
 
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
+               return;
+       }
+
        /* We get our maximum from the var_off, and our minimum is the
         * maximum of the operands' minima
         */
@@ -6500,9 +6538,16 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
                                 struct bpf_reg_state *src_reg)
 {
+       bool src_known = tnum_subreg_is_const(src_reg->var_off);
+       bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
        struct tnum var32_off = tnum_subreg(dst_reg->var_off);
        s32 smin_val = src_reg->s32_min_value;
 
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
+               return;
+       }
+
        /* We get both minimum and maximum from the var32_off. */
        dst_reg->u32_min_value = var32_off.value;
        dst_reg->u32_max_value = var32_off.value | var32_off.mask;
@@ -11484,7 +11529,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                        const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
                        struct bpf_insn insn_buf[16];
                        struct bpf_insn *patch = &insn_buf[0];
-                       bool issrc, isneg;
+                       bool issrc, isneg, isimm;
                        u32 off_reg;
 
                        aux = &env->insn_aux_data[i + delta];
@@ -11495,28 +11540,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                        isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
                        issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
                                BPF_ALU_SANITIZE_SRC;
+                       isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
 
                        off_reg = issrc ? insn->src_reg : insn->dst_reg;
-                       if (isneg)
-                               *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
-                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
-                       *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
-                       *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
-                       *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
-                       *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
-                       if (issrc) {
-                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
-                                                        off_reg);
-                               insn->src_reg = BPF_REG_AX;
+                       if (isimm) {
+                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
                        } else {
-                               *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
-                                                        BPF_REG_AX);
+                               if (isneg)
+                                       *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+                               *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+                               *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+                               *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+                               *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
                        }
+                       if (!issrc)
+                               *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
+                       insn->src_reg = BPF_REG_AX;
                        if (isneg)
                                insn->code = insn->code == code_add ?
                                             code_sub : code_add;
                        *patch++ = *insn;
-                       if (issrc && isneg)
+                       if (issrc && isneg && !isimm)
                                *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
                        cnt = patch - insn_buf;