]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
selftests/bpf: add fp-leaking precise subprog result tests
authorAndrii Nakryiko <andrii@kernel.org>
Thu, 4 Apr 2024 21:45:36 +0000 (14:45 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 5 Apr 2024 01:31:08 +0000 (18:31 -0700)
Add selftests validating that BPF verifier handles precision marking
for SCALAR registers derived from r10 (fp) register correctly.

Given `r0 = (s8)r10;` syntax is not supported by older Clang compilers,
use the raw BPF instruction syntax to maximize compatibility.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20240404214536.3551295-2-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/include/linux/filter.h
tools/testing/selftests/bpf/progs/verifier_subprog_precision.c

index 736bdeccdfe44bd872a9ffc8a6c34024de01efbc..65aa8ce142e598c943147ca5e47133f1db017d81 100644 (file)
                .off   = 0,                                     \
                .imm   = IMM })
 
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF)                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF)                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_X,             \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Short form of mov based on type,  BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
 
 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                     \
index 6f5d19665cf67d0478a6f978a3695d012b5f6df2..4a58e0398e72abf96df7b9a9309b5ad22f2eaa18 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 #include "bpf_misc.h"
+#include <../../../tools/include/linux/filter.h>
 
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
 
@@ -76,6 +77,94 @@ __naked int subprog_result_precise(void)
        );
 }
 
+__naked __noinline __used
+static unsigned long fp_leaking_subprog()
+{
+       asm volatile (
+               ".8byte %[r0_eq_r10_cast_s8];"
+               "exit;"
+               :: __imm_insn(r0_eq_r10_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_10, 8))
+       );
+}
+
+__naked __noinline __used
+static unsigned long sneaky_fp_leaking_subprog()
+{
+       asm volatile (
+               "r1 = r10;"
+               ".8byte %[r0_eq_r1_cast_s8];"
+               "exit;"
+               :: __imm_insn(r0_eq_r1_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_1, 8))
+       );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("6: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
+__msg("mark_precise: frame0: regs=r0 stack= before 10: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 9: (bf) r0 = (s8)r10")
+__msg("7: R0_w=scalar")
+__naked int fp_precise_subprog_result(void)
+{
+       asm volatile (
+               "call fp_leaking_subprog;"
+               /* use subprog's returned value (which is derived from r10=fp
+                * register), as index into vals array, forcing all of that to
+                * be known precisely
+                */
+               "r0 &= 3;"
+               "r0 *= 4;"
+               "r1 = %[vals];"
+               /* force precision marking */
+               "r1 += r0;"
+               "r0 = *(u32 *)(r1 + 0);"
+               "exit;"
+               :
+               : __imm_ptr(vals)
+               : __clobber_common
+       );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("6: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = (s8)r1")
+/* here r1 is marked precise, even though it's fp register, but that's fine
+ * because by the time we get out of subprogram it has to be derived from r10
+ * anyways, at which point we'll break precision chain
+ */
+__msg("mark_precise: frame1: regs=r1 stack= before 9: (bf) r1 = r10")
+__msg("7: R0_w=scalar")
+__naked int sneaky_fp_precise_subprog_result(void)
+{
+       asm volatile (
+               "call sneaky_fp_leaking_subprog;"
+               /* use subprog's returned value (which is derived from r10=fp
+                * register), as index into vals array, forcing all of that to
+                * be known precisely
+                */
+               "r0 &= 3;"
+               "r0 *= 4;"
+               "r1 = %[vals];"
+               /* force precision marking */
+               "r1 += r0;"
+               "r0 = *(u32 *)(r1 + 0);"
+               "exit;"
+               :
+               : __imm_ptr(vals)
+               : __clobber_common
+       );
+}
+
 SEC("?raw_tp")
 __success __log_level(2)
 __msg("9: (0f) r1 += r0")