swreg dst = reg_both(meta->insn.dst_reg * 2);
switch (meta->insn.off) {
- case offsetof(struct sk_buff, len):
- if (size != FIELD_SIZEOF(struct sk_buff, len))
+ case offsetof(struct __sk_buff, len):
+ if (size != FIELD_SIZEOF(struct __sk_buff, len))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
break;
- case offsetof(struct sk_buff, data):
- if (size != sizeof(void *))
+ case offsetof(struct __sk_buff, data):
+ if (size != FIELD_SIZEOF(struct __sk_buff, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
- case offsetof(struct sk_buff, cb) +
- offsetof(struct bpf_skb_data_end, data_end):
- if (size != sizeof(void *))
+ case offsetof(struct __sk_buff, data_end):
+ if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
{
swreg dst = reg_both(meta->insn.dst_reg * 2);
- if (size != sizeof(void *))
- return -EINVAL;
-
switch (meta->insn.off) {
- case offsetof(struct xdp_buff, data):
+ case offsetof(struct xdp_md, data):
+ if (size != FIELD_SIZEOF(struct xdp_md, data))
+ return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
- case offsetof(struct xdp_buff, data_end):
+ case offsetof(struct xdp_md, data_end):
+ if (size != FIELD_SIZEOF(struct xdp_md, data_end))
+ return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
break;
return 0;
}
-/**
- * nfp_bpf_jit() - translate BPF code into NFP assembly
- * @nfp_prog: nfp_prog prepared based on @filter
- * @filter: kernel BPF filter struct
- */
-int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter)
+int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
- ret = nfp_prog_verify(nfp_prog, filter);
- if (ret)
- return ret;
-
ret = nfp_bpf_optimize(nfp_prog);
if (ret)
return ret;
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
.xdp_offload = nfp_bpf_xdp_offload,
+
+ .bpf_verifier_prep = nfp_bpf_verifier_prep,
+ .bpf_translate = nfp_bpf_translate,
+ .bpf_destroy = nfp_bpf_destroy,
};
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
+ * @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
* @start_off: address of the first instruction in the memory
* @tgt_out: jump target for normal exit
unsigned int prog_len;
unsigned int __prog_alloc_len;
+ struct nfp_insn_meta *verifier_meta;
+
enum bpf_prog_type type;
unsigned int start_off;
struct list_head insns;
};
-int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter);
+int nfp_bpf_jit(struct nfp_prog *prog);
-int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+struct netdev_bpf;
+struct nfp_app;
struct nfp_net;
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog);
+int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
+int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
#endif
kfree(nfp_prog);
}
-static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog)
+int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
+ struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
int ret;
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
- return NULL;
+ return -ENOMEM;
+ prog->aux->offload->dev_priv = nfp_prog;
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
if (ret)
goto err_free;
- return nfp_prog;
+ nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
+ bpf->verifier.ops = &nfp_bpf_analyzer_ops;
+
+ return 0;
err_free:
nfp_prog_free(nfp_prog);
- return NULL;
+ return ret;
}
-static int
-nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog,
- struct bpf_prog *prog)
+int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
{
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
if (!nfp_prog->prog)
return -ENOMEM;
- return nfp_bpf_jit(nfp_prog, prog);
+ return nfp_bpf_jit(nfp_prog);
}
-static void nfp_bpf_destroy(struct nfp_prog *nfp_prog)
+int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
{
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
+
kfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
+
+ return 0;
}
-static struct nfp_prog *
-nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
- dma_addr_t *dma_addr)
+static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
{
- struct nfp_prog *nfp_prog;
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
+ dma_addr_t dma_addr;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
- return NULL;
+ return -EOPNOTSUPP;
}
- nfp_prog = nfp_bpf_verifier_prep(prog);
- if (!nfp_prog)
- return NULL;
-
- err = nfp_bpf_translate(nn, nfp_prog, prog);
- if (err)
- goto err_destroy_prog;
-
- *dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
- nfp_prog->prog_len * sizeof(u64),
- DMA_TO_DEVICE);
- if (dma_mapping_error(nn->dp.dev, *dma_addr))
- goto err_destroy_prog;
-
- return 0;
-
-err_destroy_prog:
- nfp_bpf_destroy(nfp_prog);
- return NULL;
-}
-
-static void
-nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog,
- dma_addr_t dma_addr)
-{
- int err;
+ dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ nfp_prog->prog_len * sizeof(u64),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(nn->dp.dev, dma_addr))
+ return -ENOMEM;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
- nfp_bpf_destroy(nfp_prog);
+
+ return err;
}
static void nfp_net_bpf_start(struct nfp_net *nn)
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog)
{
- struct nfp_prog *nfp_prog;
- dma_addr_t dma_addr;
+ int err;
+
+ if (prog && !prog->aux->offload)
+ return -EINVAL;
if (prog && old_prog) {
u8 cap;
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
- nfp_prog = nfp_net_bpf_offload_prepare(nn, prog, &dma_addr);
- if (!nfp_prog)
- return -EINVAL;
+ err = nfp_net_bpf_load(nn, prog);
+ if (err)
+ return err;
- nfp_net_bpf_load(nn, nfp_prog, dma_addr);
if (!old_prog)
nfp_net_bpf_start(nn);
#include "main.h"
-/* Analyzer/verifier definitions */
-struct nfp_bpf_analyzer_priv {
- struct nfp_prog *prog;
- struct nfp_insn_meta *meta;
-};
-
static struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
- struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
- struct nfp_insn_meta *meta = priv->meta;
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
- meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
- priv->meta = meta;
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
+ nfp_prog->verifier_meta = meta;
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
}
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
- return nfp_bpf_check_exit(priv->prog, env);
+ return nfp_bpf_check_exit(nfp_prog, env);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
- return nfp_bpf_check_ptr(priv->prog, meta, env,
+ return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
- return nfp_bpf_check_ptr(priv->prog, meta, env,
+ return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
return 0;
}
-static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
-
-int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
-{
- struct nfp_bpf_analyzer_priv *priv;
- int ret;
-
- nfp_prog->stack_depth = prog->aux->stack_depth;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->prog = nfp_prog;
- priv->meta = nfp_prog_first_meta(nfp_prog);
-
- ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
-
- kfree(priv);
-
- return ret;
-}
struct bpf_prog;
struct net_device;
+struct netdev_bpf;
struct pci_dev;
struct sk_buff;
struct sk_buff;
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
+ * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
+ * @bpf_translate: translate call for dev-specific BPF programs
+ * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
+ int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
+ int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+ int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
return app->type->xdp_offload(app, nn, prog);
}
+static inline int
+nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
+{
+ if (!app || !app->type->bpf_verifier_prep)
+ return -EOPNOTSUPP;
+ return app->type->bpf_verifier_prep(app, nn, bpf);
+}
+
+static inline int
+nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->bpf_translate)
+ return -EOPNOTSUPP;
+ return app->type->bpf_translate(app, nn, prog);
+}
+
+static inline int
+nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->bpf_destroy)
+ return -EOPNOTSUPP;
+ return app->type->bpf_destroy(app, nn, prog);
+}
+
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_app_bpf_translate(nn->app, nn,
+ xdp->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_app_bpf_destroy(nn->app, nn,
+ xdp->offload.prog);
default:
return -EINVAL;
}