]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
bpf: Rename BPF_XADD and prepare to encode other atomics in .imm
authorBrendan Jackman <jackmanb@google.com>
Thu, 14 Jan 2021 18:17:44 +0000 (18:17 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 15 Jan 2021 02:34:29 +0000 (18:34 -0800)
A subsequent patch will add additional atomic operations. These new
operations will use the same opcode field as the existing XADD, with
the immediate discriminating different operations.

In preparation, rename the instruction mode BPF_ATOMIC and start
calling the zero immediate BPF_ADD.

This is possible (doesn't break existing valid BPF progs) because the
immediate field is currently reserved MBZ and BPF_ADD is zero.

All uses are removed from the tree but the BPF_XADD definition is
kept around to avoid breaking builds for people including kernel
headers.

Signed-off-by: Brendan Jackman <jackmanb@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Björn Töpel <bjorn.topel@gmail.com>
Link: https://lore.kernel.org/bpf/20210114181751.768687-5-jackmanb@google.com
35 files changed:
Documentation/networking/filter.rst
arch/arm/net/bpf_jit_32.c
arch/arm64/net/bpf_jit_comp.c
arch/mips/net/ebpf_jit.c
arch/powerpc/net/bpf_jit_comp64.c
arch/riscv/net/bpf_jit_comp32.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp_64.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
include/linux/filter.h
include/uapi/linux/bpf.h
kernel/bpf/core.c
kernel/bpf/disasm.c
kernel/bpf/verifier.c
lib/test_bpf.c
samples/bpf/bpf_insn.h
samples/bpf/cookie_uid_helper_example.c
samples/bpf/sock_example.c
samples/bpf/test_cgrp2_attach.c
tools/include/linux/filter.h
tools/include/uapi/linux/bpf.h
tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
tools/testing/selftests/bpf/test_cgroup_storage.c
tools/testing/selftests/bpf/verifier/ctx.c
tools/testing/selftests/bpf/verifier/direct_packet_access.c
tools/testing/selftests/bpf/verifier/leak_ptr.c
tools/testing/selftests/bpf/verifier/meta_access.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_illegal_alu.c
tools/testing/selftests/bpf/verifier/xadd.c

index debb59e374debb7ba77ceb778042d7483f850be5..1583d59d806d865a080db2d1e16efa2d94a7fc2d 100644 (file)
@@ -1006,13 +1006,13 @@ Size modifier is one of ...
 
 Mode modifier is one of::
 
-  BPF_IMM  0x00  /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
-  BPF_ABS  0x20
-  BPF_IND  0x40
-  BPF_MEM  0x60
-  BPF_LEN  0x80  /* classic BPF only, reserved in eBPF */
-  BPF_MSH  0xa0  /* classic BPF only, reserved in eBPF */
-  BPF_XADD 0xc0  /* eBPF only, exclusive add */
+  BPF_IMM     0x00  /* used for 32-bit mov in classic BPF and 64-bit in eBPF */
+  BPF_ABS     0x20
+  BPF_IND     0x40
+  BPF_MEM     0x60
+  BPF_LEN     0x80  /* classic BPF only, reserved in eBPF */
+  BPF_MSH     0xa0  /* classic BPF only, reserved in eBPF */
+  BPF_ATOMIC  0xc0  /* eBPF only, atomic operations */
 
 eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and
 (BPF_IND | <size> | BPF_LD) which are used to access packet data.
@@ -1044,11 +1044,19 @@ Unlike classic BPF instruction set, eBPF has generic load/store operations::
     BPF_MEM | <size> | BPF_STX:  *(size *) (dst_reg + off) = src_reg
     BPF_MEM | <size> | BPF_ST:   *(size *) (dst_reg + off) = imm32
     BPF_MEM | <size> | BPF_LDX:  dst_reg = *(size *) (src_reg + off)
-    BPF_XADD | BPF_W  | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
-    BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
 
-Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and
-2 byte atomic increments are not supported.
+Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW.
+
+It also includes atomic operations, which use the immediate field for extra
+encoding.
+
+   .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W  | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg
+   .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg
+
+Note that 1 and 2 byte atomic operations are not supported.
+
+You may encounter BPF_XADD - this is a legacy name for BPF_ATOMIC, referring to
+the exclusive-add operation encoded when the immediate field is zero.
 
 eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists
 of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single
index 0207b6ea6e8a0b3833f6b5376cc7c1fe75740358..897634d0a67ca34b338399d93fb1d51f86c5e58a 100644 (file)
@@ -1620,10 +1620,9 @@ exit:
                }
                emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
                break;
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W:
-       /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       /* Atomic ops */
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
                goto notyet;
        /* STX: *(size *)(dst + off) = src */
        case BPF_STX | BPF_MEM | BPF_W:
index ef9f1d5e989d062a589e286d3dac30693d1e43c0..f7b194878a99a0931ef43250ad5bb1b04498517b 100644 (file)
@@ -875,10 +875,18 @@ emit_cond_jmp:
                }
                break;
 
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W:
-       /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (insn->imm != BPF_ADD) {
+                       pr_err_once("unknown atomic op code %02x\n", insn->imm);
+                       return -EINVAL;
+               }
+
+               /* STX XADD: lock *(u32 *)(dst + off) += src
+                * and
+                * STX XADD: lock *(u64 *)(dst + off) += src
+                */
+
                if (!off) {
                        reg = dst;
                } else {
index 561154cbcc401eb8e5eee381af4873f638583fc1..939dd06764bc9ff91f8540af452026f7327de8cc 100644 (file)
@@ -1423,8 +1423,8 @@ jeq_common:
        case BPF_STX | BPF_H | BPF_MEM:
        case BPF_STX | BPF_W | BPF_MEM:
        case BPF_STX | BPF_DW | BPF_MEM:
-       case BPF_STX | BPF_W | BPF_XADD:
-       case BPF_STX | BPF_DW | BPF_XADD:
+       case BPF_STX | BPF_W | BPF_ATOMIC:
+       case BPF_STX | BPF_DW | BPF_ATOMIC:
                if (insn->dst_reg == BPF_REG_10) {
                        ctx->flags |= EBPF_SEEN_FP;
                        dst = MIPS_R_SP;
@@ -1438,7 +1438,12 @@ jeq_common:
                src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
                if (src < 0)
                        return src;
-               if (BPF_MODE(insn->code) == BPF_XADD) {
+               if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+                       if (insn->imm != BPF_ADD) {
+                               pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm);
+                               return -EINVAL;
+                       }
+
                        /*
                         * If mem_off does not fit within the 9 bit ll/sc
                         * instruction immediate field, use a temp reg.
index 022103c6a201aa5386c112d9fa9bfbdcb1cc6b1e..aaf1a887f653b81f6a6641aa3f5906ac1458daf6 100644 (file)
@@ -683,10 +683,18 @@ emit_clear:
                        break;
 
                /*
-                * BPF_STX XADD (atomic_add)
+                * BPF_STX ATOMIC (atomic ops)
                 */
-               /* *(u32 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_W:
+               case BPF_STX | BPF_ATOMIC | BPF_W:
+                       if (insn->imm != BPF_ADD) {
+                               pr_err_ratelimited(
+                                       "eBPF filter atomic op code %02x (@%d) unsupported\n",
+                                       code, i);
+                               return -ENOTSUPP;
+                       }
+
+                       /* *(u32 *)(dst + off) += src */
+
                        /* Get EA into TMP_REG_1 */
                        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
                        tmp_idx = ctx->idx * 4;
@@ -699,8 +707,15 @@ emit_clear:
                        /* we're done if this succeeded */
                        PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
-               /* *(u64 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_DW:
+               case BPF_STX | BPF_ATOMIC | BPF_DW:
+                       if (insn->imm != BPF_ADD) {
+                               pr_err_ratelimited(
+                                       "eBPF filter atomic op code %02x (@%d) unsupported\n",
+                                       code, i);
+                               return -ENOTSUPP;
+                       }
+                       /* *(u64 *)(dst + off) += src */
+
                        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
                        tmp_idx = ctx->idx * 4;
                        EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
index 579575f9cdae0873057ceb878fc6e9dce46e21c8..81de865f4c7c3592e6d730b2d3f723e9cacf09e7 100644 (file)
@@ -881,7 +881,7 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
        const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
        const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
 
-       if (mode == BPF_XADD && size != BPF_W)
+       if (mode == BPF_ATOMIC && size != BPF_W)
                return -1;
 
        emit_imm(RV_REG_T0, off, ctx);
@@ -899,7 +899,7 @@ static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
                case BPF_MEM:
                        emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
                        break;
-               case BPF_XADD:
+               case BPF_ATOMIC: /* Only BPF_ADD supported */
                        emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
                             ctx);
                        break;
@@ -1260,7 +1260,6 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_STX | BPF_MEM | BPF_H:
        case BPF_STX | BPF_MEM | BPF_W:
        case BPF_STX | BPF_MEM | BPF_DW:
-       case BPF_STX | BPF_XADD | BPF_W:
                if (BPF_CLASS(code) == BPF_ST) {
                        emit_imm32(tmp2, imm, ctx);
                        src = tmp2;
@@ -1271,8 +1270,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        return -1;
                break;
 
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+               if (insn->imm != BPF_ADD) {
+                       pr_info_once(
+                               "bpf-jit: not supported: atomic operation %02x ***\n",
+                               insn->imm);
+                       return -EFAULT;
+               }
+
+               if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
+                                  BPF_MODE(code)))
+                       return -1;
+               break;
+
        /* No hardware support for 8-byte atomics in RV32. */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
                /* Fallthrough. */
 
 notsupported:
index 8a56b52931170ac0ebc78872c8352837bf998278..b44ff52f84a620cdfd423e790b04b2de572eeb5e 100644 (file)
@@ -1027,10 +1027,18 @@ out_be:
                emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
                emit_sd(RV_REG_T1, 0, rs, ctx);
                break;
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W:
-       /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (insn->imm != BPF_ADD) {
+                       pr_err("bpf-jit: not supported: atomic operation %02x ***\n",
+                              insn->imm);
+                       return -EINVAL;
+               }
+
+               /* atomic_add: lock *(u32 *)(dst + off) += src
+                * atomic_add: lock *(u64 *)(dst + off) += src
+                */
+
                if (off) {
                        if (is_12b_int(off)) {
                                emit_addi(RV_REG_T1, rd, off, ctx);
index 0a418279287691b5ae4553134859b19ae06bc37b..f973e2ead1973af2c74a4263f7d71411d645b1f6 100644 (file)
@@ -1205,18 +1205,23 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                jit->seen |= SEEN_MEM;
                break;
        /*
-        * BPF_STX XADD (atomic_add)
+        * BPF_ATOMIC
         */
-       case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
-               /* laal %w0,%src,off(%dst) */
-               EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
-                             dst_reg, off);
-               jit->seen |= SEEN_MEM;
-               break;
-       case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
-               /* laalg %w0,%src,off(%dst) */
-               EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
-                             dst_reg, off);
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+               if (insn->imm != BPF_ADD) {
+                       pr_err("Unknown atomic operation %02x\n", insn->imm);
+                       return -1;
+               }
+
+               /* *(u32/u64 *)(dst + off) += src
+                *
+                * BFW_W:  laal  %w0,%src,off(%dst)
+                * BPF_DW: laalg %w0,%src,off(%dst)
+                */
+               EMIT6_DISP_LH(0xeb000000,
+                             BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea,
+                             REG_W0, src_reg, dst_reg, off);
                jit->seen |= SEEN_MEM;
                break;
        /*
index 3364e2a009899c39860e902627b62ee2d07ad2b9..4b8d3c65d2666e3858432ced5d4ce8b72f78c771 100644 (file)
@@ -1366,12 +1366,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                break;
        }
 
-       /* STX XADD: lock *(u32 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_W: {
+       case BPF_STX | BPF_ATOMIC | BPF_W: {
                const u8 tmp = bpf2sparc[TMP_REG_1];
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                const u8 tmp3 = bpf2sparc[TMP_REG_3];
 
+               if (insn->imm != BPF_ADD) {
+                       pr_err_once("unknown atomic op %02x\n", insn->imm);
+                       return -EINVAL;
+               }
+
+               /* lock *(u32 *)(dst + off) += src */
+
                if (insn->dst_reg == BPF_REG_FP)
                        ctx->saw_frame_pointer = true;
 
@@ -1390,11 +1396,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                break;
        }
        /* STX XADD: lock *(u64 *)(dst + off) += src */
-       case BPF_STX | BPF_XADD | BPF_DW: {
+       case BPF_STX | BPF_ATOMIC | BPF_DW: {
                const u8 tmp = bpf2sparc[TMP_REG_1];
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                const u8 tmp3 = bpf2sparc[TMP_REG_3];
 
+               if (insn->imm != BPF_ADD) {
+                       pr_err_once("unknown atomic op %02x\n", insn->imm);
+                       return -EINVAL;
+               }
+
                if (insn->dst_reg == BPF_REG_FP)
                        ctx->saw_frame_pointer = true;
 
index 93f32e0ba0ef99dea58671e86510647a0e2f4765..b1829a534da14e03fca39858335b4511b94bad20 100644 (file)
@@ -795,6 +795,33 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
        *pprog = prog;
 }
 
+static int emit_atomic(u8 **pprog, u8 atomic_op,
+                      u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
+{
+       u8 *prog = *pprog;
+       int cnt = 0;
+
+       EMIT1(0xF0); /* lock prefix */
+
+       maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
+
+       /* emit opcode */
+       switch (atomic_op) {
+       case BPF_ADD:
+               /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
+               EMIT1(simple_alu_opcodes[atomic_op]);
+               break;
+       default:
+               pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
+               return -EFAULT;
+       }
+
+       emit_insn_suffix(&prog, dst_reg, src_reg, off);
+
+       *pprog = prog;
+       return 0;
+}
+
 static bool ex_handler_bpf(const struct exception_table_entry *x,
                           struct pt_regs *regs, int trapnr,
                           unsigned long error_code, unsigned long fault_addr)
@@ -839,6 +866,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
        int i, cnt = 0, excnt = 0;
        int proglen = 0;
        u8 *prog = temp;
+       int err;
 
        detect_reg_usage(insn, insn_cnt, callee_regs_used,
                         &tail_call_seen);
@@ -1250,18 +1278,12 @@ st:                     if (is_imm8(insn->off))
                        }
                        break;
 
-                       /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
-               case BPF_STX | BPF_XADD | BPF_W:
-                       /* Emit 'lock add dword ptr [rax + off], eax' */
-                       if (is_ereg(dst_reg) || is_ereg(src_reg))
-                               EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
-                       else
-                               EMIT2(0xF0, 0x01);
-                       goto xadd;
-               case BPF_STX | BPF_XADD | BPF_DW:
-                       EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
-xadd:
-                       emit_modrm_dstoff(&prog, dst_reg, src_reg, insn->off);
+               case BPF_STX | BPF_ATOMIC | BPF_W:
+               case BPF_STX | BPF_ATOMIC | BPF_DW:
+                       err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
+                                         insn->off, BPF_SIZE(insn->code));
+                       if (err)
+                               return err;
                        break;
 
                        /* call */
index 96fde03aa9877a68f978cbef19b35a222baef633..d17b67c69f89ae7a4104942bdccf559031175b1e 100644 (file)
@@ -2243,10 +2243,8 @@ emit_jmp:
                                return -EFAULT;
                        }
                        break;
-               /* STX XADD: lock *(u32 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_W:
-               /* STX XADD: lock *(u64 *)(dst + off) += src */
-               case BPF_STX | BPF_XADD | BPF_DW:
+               case BPF_STX | BPF_ATOMIC | BPF_W:
+               case BPF_STX | BPF_ATOMIC | BPF_DW:
                        goto notyet;
                case BPF_JMP | BPF_EXIT:
                        if (seen_exit) {
index 0a721f6e8676e22f73fa8ade0345d765c3d30d82..e31f8fbbc696d71bd1fb745e975b2402e3a96d6f 100644 (file)
@@ -3109,13 +3109,19 @@ mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
        return 0;
 }
 
-static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
+       if (meta->insn.imm != BPF_ADD)
+               return -EOPNOTSUPP;
+
        return mem_xadd(nfp_prog, meta, false);
 }
 
-static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
+       if (meta->insn.imm != BPF_ADD)
+               return -EOPNOTSUPP;
+
        return mem_xadd(nfp_prog, meta, true);
 }
 
@@ -3475,8 +3481,8 @@ static const instr_cb_t instr_cb[256] = {
        [BPF_STX | BPF_MEM | BPF_H] =   mem_stx2,
        [BPF_STX | BPF_MEM | BPF_W] =   mem_stx4,
        [BPF_STX | BPF_MEM | BPF_DW] =  mem_stx8,
-       [BPF_STX | BPF_XADD | BPF_W] =  mem_xadd4,
-       [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
+       [BPF_STX | BPF_ATOMIC | BPF_W] =        mem_atomic4,
+       [BPF_STX | BPF_ATOMIC | BPF_DW] =       mem_atomic8,
        [BPF_ST | BPF_MEM | BPF_B] =    mem_st1,
        [BPF_ST | BPF_MEM | BPF_H] =    mem_st2,
        [BPF_ST | BPF_MEM | BPF_W] =    mem_st4,
index fac9c6f9e197b44882309b08522187ee0f5eba74..d0e17eebddd949bd8e07609714ad69ac876ac0ad 100644 (file)
@@ -428,9 +428,9 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
        return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
 }
 
-static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
 {
-       return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
+       return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
 }
 
 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
index e92ee510fd52a867ce0c8280503b142371b9830f..9d235c0ce46a8149370131d4e7b403ad3224b955 100644 (file)
@@ -479,7 +479,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
                        pr_vlog(env, "map writes not supported\n");
                        return -EOPNOTSUPP;
                }
-               if (is_mbpf_xadd(meta)) {
+               if (is_mbpf_atomic(meta)) {
                        err = nfp_bpf_map_mark_used(env, meta, reg,
                                                    NFP_MAP_USE_ATOMIC_CNT);
                        if (err)
@@ -523,12 +523,17 @@ exit_check_ptr:
 }
 
 static int
-nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
-                  struct bpf_verifier_env *env)
+nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+                    struct bpf_verifier_env *env)
 {
        const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
        const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
 
+       if (meta->insn.imm != BPF_ADD) {
+               pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
+               return -EOPNOTSUPP;
+       }
+
        if (dreg->type != PTR_TO_MAP_VALUE) {
                pr_vlog(env, "atomic add not to a map value pointer: %d\n",
                        dreg->type);
@@ -655,8 +660,8 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
        if (is_mbpf_store(meta))
                return nfp_bpf_check_store(nfp_prog, meta, env);
 
-       if (is_mbpf_xadd(meta))
-               return nfp_bpf_check_xadd(nfp_prog, meta, env);
+       if (is_mbpf_atomic(meta))
+               return nfp_bpf_check_atomic(nfp_prog, meta, env);
 
        if (is_mbpf_alu(meta))
                return nfp_bpf_check_alu(nfp_prog, meta, env);
index 5edf2b66088128cfc36594f0fb64ec539b5e7efb..392e94b7966877d9215e6d950f15daffbca58f76 100644 (file)
@@ -259,15 +259,23 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
                .off   = OFF,                                   \
                .imm   = 0 })
 
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
 
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+/*
+ * Atomic operations:
+ *
+ *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)                 \
        ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
                .off   = OFF,                                   \
-               .imm   = 0 })
+               .imm   = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
 
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
index a1ad32456f89aa90c50a16b660ab62831f0dc00c..6b3996343e63746c10bedd9645d3d4a77d49d52a 100644 (file)
@@ -19,7 +19,8 @@
 
 /* ld/ldx fields */
 #define BPF_DW         0x18    /* double word (64-bit) */
-#define BPF_XADD       0xc0    /* exclusive add */
+#define BPF_ATOMIC     0xc0    /* atomic memory ops - op type in immediate */
+#define BPF_XADD       0xc0    /* exclusive add - legacy name */
 
 /* alu/jmp fields */
 #define BPF_MOV                0xb0    /* mov reg to reg */
@@ -2448,7 +2449,7 @@ union bpf_attr {
  *             running simultaneously.
  *
  *             A user should care about the synchronization by himself.
- *             For example, by using the **BPF_STX_XADD** instruction to alter
+ *             For example, by using the **BPF_ATOMIC** instructions to alter
  *             the shared data.
  *     Return
  *             A pointer to the local storage area.
index 69c3c308de5e055163db728dab8daf3b9d911fcd..4836ebf459cfe81665d56eaf48041df1eb1bccdf 100644 (file)
@@ -1309,8 +1309,8 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
        INSN_3(STX, MEM,  H),                   \
        INSN_3(STX, MEM,  W),                   \
        INSN_3(STX, MEM,  DW),                  \
-       INSN_3(STX, XADD, W),                   \
-       INSN_3(STX, XADD, DW),                  \
+       INSN_3(STX, ATOMIC, W),                 \
+       INSN_3(STX, ATOMIC, DW),                \
        /*   Immediate based. */                \
        INSN_3(ST, MEM, B),                     \
        INSN_3(ST, MEM, H),                     \
@@ -1618,13 +1618,25 @@ out:
        LDX_PROBE(DW, 8)
 #undef LDX_PROBE
 
-       STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
-               atomic_add((u32) SRC, (atomic_t *)(unsigned long)
-                          (DST + insn->off));
+       STX_ATOMIC_W:
+               switch (IMM) {
+               case BPF_ADD:
+                       /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
+                       atomic_add((u32) SRC, (atomic_t *)(unsigned long)
+                                  (DST + insn->off));
+               default:
+                       goto default_label;
+               }
                CONT;
-       STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
-               atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
-                            (DST + insn->off));
+       STX_ATOMIC_DW:
+               switch (IMM) {
+               case BPF_ADD:
+                       /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
+                       atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
+                                    (DST + insn->off));
+               default:
+                       goto default_label;
+               }
                CONT;
 
        default_label:
@@ -1634,7 +1646,8 @@ out:
                 *
                 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
                 */
-               pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
+               pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
+                       insn->code, insn->imm);
                BUG_ON(1);
                return 0;
 }
index b44d8c447afd1d699b27e152e5bf06a24dff80f0..37c8d6e9b4cce8e2bd0cc2d878872f5a14f7e4fe 100644 (file)
@@ -153,14 +153,16 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
                                bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
                                insn->dst_reg,
                                insn->off, insn->src_reg);
-               else if (BPF_MODE(insn->code) == BPF_XADD)
+               else if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+                        insn->imm == BPF_ADD) {
                        verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n",
                                insn->code,
                                bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
                                insn->dst_reg, insn->off,
                                insn->src_reg);
-               else
+               } else {
                        verbose(cbs->private_data, "BUG_%02x\n", insn->code);
+               }
        } else if (class == BPF_ST) {
                if (BPF_MODE(insn->code) != BPF_MEM) {
                        verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
index ae2aee48cf821d5dd74262e51d8f5125e1d253cb..cfc137b81ac6661adaa492d982f80eb146fda73a 100644 (file)
@@ -3604,13 +3604,17 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        return err;
 }
 
-static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
+static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
 {
        int err;
 
-       if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
-           insn->imm != 0) {
-               verbose(env, "BPF_XADD uses reserved fields\n");
+       if (insn->imm != BPF_ADD) {
+               verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
+               return -EINVAL;
+       }
+
+       if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
+               verbose(env, "invalid atomic operand size\n");
                return -EINVAL;
        }
 
@@ -3633,19 +3637,19 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
            is_pkt_reg(env, insn->dst_reg) ||
            is_flow_key_reg(env, insn->dst_reg) ||
            is_sk_reg(env, insn->dst_reg)) {
-               verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
+               verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
                        insn->dst_reg,
                        reg_type_str[reg_state(env, insn->dst_reg)->type]);
                return -EACCES;
        }
 
-       /* check whether atomic_add can read the memory */
+       /* check whether we can read the memory */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1, true);
        if (err)
                return err;
 
-       /* check whether atomic_add can write into the same memory */
+       /* check whether we can write into the same memory */
        return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                BPF_SIZE(insn->code), BPF_WRITE, -1, true);
 }
@@ -9524,8 +9528,8 @@ static int do_check(struct bpf_verifier_env *env)
                } else if (class == BPF_STX) {
                        enum bpf_reg_type *prev_dst_type, dst_reg_type;
 
-                       if (BPF_MODE(insn->code) == BPF_XADD) {
-                               err = check_xadd(env, env->insn_idx, insn);
+                       if (BPF_MODE(insn->code) == BPF_ATOMIC) {
+                               err = check_atomic(env, env->insn_idx, insn);
                                if (err)
                                        return err;
                                env->insn_idx++;
@@ -10010,7 +10014,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
 
                if (BPF_CLASS(insn->code) == BPF_STX &&
                    ((BPF_MODE(insn->code) != BPF_MEM &&
-                     BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
+                     BPF_MODE(insn->code) != BPF_ATOMIC) || insn->imm != 0)) {
                        verbose(env, "BPF_STX uses reserved fields\n");
                        return -EINVAL;
                }
index ca7d635bccd9dbfc0ac4d6cd43433b4261f4d40d..49ec9e8d8aed6c81b0f1413cb95a9128dc25486b 100644 (file)
@@ -4295,13 +4295,13 @@ static struct bpf_test tests[] = {
                { { 0, 0xffffffff } },
                .stack_depth = 40,
        },
-       /* BPF_STX | BPF_XADD | BPF_W/DW */
+       /* BPF_STX | BPF_ATOMIC | BPF_W/DW */
        {
                "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_W, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
                        BPF_LDX_MEM(BPF_W, R0, R10, -40),
                        BPF_EXIT_INSN(),
                },
@@ -4316,7 +4316,7 @@ static struct bpf_test tests[] = {
                        BPF_ALU64_REG(BPF_MOV, R1, R10),
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_W, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
                        BPF_ALU64_REG(BPF_MOV, R0, R10),
                        BPF_ALU64_REG(BPF_SUB, R0, R1),
                        BPF_EXIT_INSN(),
@@ -4331,7 +4331,7 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_W, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
@@ -4352,7 +4352,7 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
                        BPF_LDX_MEM(BPF_DW, R0, R10, -40),
                        BPF_EXIT_INSN(),
                },
@@ -4367,7 +4367,7 @@ static struct bpf_test tests[] = {
                        BPF_ALU64_REG(BPF_MOV, R1, R10),
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
                        BPF_ALU64_REG(BPF_MOV, R0, R10),
                        BPF_ALU64_REG(BPF_SUB, R0, R1),
                        BPF_EXIT_INSN(),
@@ -4382,7 +4382,7 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
                        BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
-                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
index 544237980582b909fe251fd68c7ee6b8eb946f5a..db67a2847395f74d66ddf3c989b43dc25ae38178 100644 (file)
@@ -138,11 +138,11 @@ struct bpf_insn;
 
 #define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
        ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
                .off   = OFF,                                   \
-               .imm   = 0 })
+               .imm   = BPF_ADD })
 
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
index deb0e3e0324d4810f6ad0b2764d4ee4fe4b86896..c5ff7a13918c98cd6955f9d41b38222b450da899 100644 (file)
@@ -147,12 +147,12 @@ static void prog_load(void)
                 */
                BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
                BPF_MOV64_IMM(BPF_REG_1, 1),
-               BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
-                               offsetof(struct stats, packets)),
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
+                             offsetof(struct stats, packets)),
                BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
                                offsetof(struct __sk_buff, len)),
-               BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1,
-                               offsetof(struct stats, bytes)),
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1,
+                             offsetof(struct stats, bytes)),
                BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
                                offsetof(struct __sk_buff, len)),
                BPF_EXIT_INSN(),
index 00aae1d33fcad09d17a313e87ef02d2216b94f1d..23d1930e19270d7a90d47d66e25b37ef4c4bfebd 100644 (file)
@@ -54,7 +54,7 @@ static int test_sock(void)
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
                BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
                BPF_EXIT_INSN(),
        };
index 20fbd1241db335ff929fd80044da39de71d5ea2f..390ff38d2ac67227e3364b028e767f76036a39ef 100644 (file)
@@ -53,7 +53,7 @@ static int prog_load(int map_fd, int verdict)
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                /* Count bytes */
                BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
@@ -64,7 +64,8 @@ static int prog_load(int map_fd, int verdict)
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
                BPF_EXIT_INSN(),
index ca28b6ab8db7c7d315d2e128331d7223c2113825..e870c9039f0d5bb228c1d352e0d83553b08ce0ba 100644 (file)
                .off   = OFF,                                   \
                .imm   = 0 })
 
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+/*
+ * Atomic operations:
+ *
+ *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
+ */
 
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)                 \
        ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
                .off   = OFF,                                   \
-               .imm   = 0 })
+               .imm   = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
 
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
index a1ad32456f89aa90c50a16b660ab62831f0dc00c..6b3996343e63746c10bedd9645d3d4a77d49d52a 100644 (file)
@@ -19,7 +19,8 @@
 
 /* ld/ldx fields */
 #define BPF_DW         0x18    /* double word (64-bit) */
-#define BPF_XADD       0xc0    /* exclusive add */
+#define BPF_ATOMIC     0xc0    /* atomic memory ops - op type in immediate */
+#define BPF_XADD       0xc0    /* exclusive add - legacy name */
 
 /* alu/jmp fields */
 #define BPF_MOV                0xb0    /* mov reg to reg */
@@ -2448,7 +2449,7 @@ union bpf_attr {
  *             running simultaneously.
  *
  *             A user should care about the synchronization by himself.
- *             For example, by using the **BPF_STX_XADD** instruction to alter
+ *             For example, by using the **BPF_ATOMIC** instructions to alter
  *             the shared data.
  *     Return
  *             A pointer to the local storage area.
index b549fcfacc0bd3b943847e8835719b4fa3f34fd9..0a1fc9816cef307635f696861e43e4c68a6ca73d 100644 (file)
@@ -45,13 +45,13 @@ static int prog_load_cnt(int verdict, int val)
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
                BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
                BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
                BPF_MOV64_IMM(BPF_REG_2, 0),
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
                BPF_MOV64_IMM(BPF_REG_1, val),
-               BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+               BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
 
                BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
                BPF_MOV64_IMM(BPF_REG_2, 0),
index d946252a25bbc3978378b890d37e3535edb5a2c0..0cda61da5d395a1736fcbe3eed2f6e4e7deaf70a 100644 (file)
@@ -29,7 +29,7 @@ int main(int argc, char **argv)
                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                             BPF_FUNC_get_local_storage),
                BPF_MOV64_IMM(BPF_REG_1, 1),
-               BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+               BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
                BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
                BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
index 93d6b164148125cdfc9d2adfa2d516fcff6f6115..23080862aafd2b72e84f0f36c91970a135dfdf04 100644 (file)
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
 {
-       "context stores via XADD",
+       "context stores via BPF_ATOMIC",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
-       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
-                    BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)),
        BPF_EXIT_INSN(),
        },
-       .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+       .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
index ae72536603fe2bdf39f15725672e296b7a48547c..ac1e19d0f5200818e00bf7da085b2eca442d69ec 100644 (file)
        BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0),
        BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
        BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
        BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
        BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8),
        BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
        BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
        BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
index d6eec17f2cd2e0ef9bf2a7d28625657737dd02e1..73f0dea95546cddcf3ad82175d8179205632b075 100644 (file)
@@ -5,7 +5,7 @@
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
                    offsetof(struct __sk_buff, cb[0])),
        BPF_LD_MAP_FD(BPF_REG_2, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2,
                      offsetof(struct __sk_buff, cb[0])),
        BPF_EXIT_INSN(),
        },
@@ -13,7 +13,7 @@
        .errstr_unpriv = "R2 leaks addr into mem",
        .result_unpriv = REJECT,
        .result = REJECT,
-       .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+       .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
 },
 {
        "leak pointer into ctx 2",
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
                    offsetof(struct __sk_buff, cb[0])),
-       BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10,
                      offsetof(struct __sk_buff, cb[0])),
        BPF_EXIT_INSN(),
        },
        .errstr_unpriv = "R10 leaks addr into mem",
        .result_unpriv = REJECT,
        .result = REJECT,
-       .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+       .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
 },
 {
        "leak pointer into ctx 3",
@@ -56,7 +56,7 @@
        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
        BPF_MOV64_IMM(BPF_REG_3, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
index 205292b8dd65f3daf72e43cdfd71336db9d4f1cf..b45e8af414204bbbccf31916f2325480376e9d86 100644 (file)
        BPF_MOV64_IMM(BPF_REG_5, 42),
        BPF_MOV64_IMM(BPF_REG_6, 24),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
        BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
        BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
        BPF_MOV64_IMM(BPF_REG_5, 42),
        BPF_MOV64_IMM(BPF_REG_6, 24),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8),
        BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
        BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
        BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
index a3fe0fbaed41a7bab2d0b2f82e7285fee0416c6f..ee298627abaeec23f57ec69f7113201a32eec48a 100644 (file)
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
        BPF_MOV64_IMM(BPF_REG_0, 1),
-       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+       BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW,
+                    BPF_REG_10, BPF_REG_0, -8, BPF_ADD),
        BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
        BPF_EXIT_INSN(),
index ed1c2cea1dea66c06083c7b41ce0280ce7dd7b54..489062867218335d075adde0740051aa5642f006 100644 (file)
@@ -82,7 +82,7 @@
        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
        BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
-       BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0),
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
        BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
        BPF_EXIT_INSN(),
index c5de2e62cc8bbc919e913da12b8de428ebfcd46a..b96ef35268150b5bca3b442282daddb10d222041 100644 (file)
@@ -3,7 +3,7 @@
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 1),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
        BPF_EXIT_INSN(),
        },
@@ -22,7 +22,7 @@
        BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
        BPF_EXIT_INSN(),
        BPF_MOV64_IMM(BPF_REG_1, 1),
-       BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
        BPF_EXIT_INSN(),
        },
        BPF_MOV64_IMM(BPF_REG_0, 1),
        BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
        BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
-       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
-       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
        BPF_EXIT_INSN(),
        },
        .result = REJECT,
-       .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+       .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
@@ -62,8 +62,8 @@
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
        BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
        BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
@@ -82,8 +82,8 @@
        BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
        BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
        BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
-       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
+       BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
        BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
        BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
        BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),