]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
97a8f00674d01c46901f8c8bdb7e59207dd39422
2 * Copyright (C) 2016 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define pr_fmt(fmt) "NFP net bpf: " fmt
36 #include <linux/kernel.h>
37 #include <linux/bpf.h>
38 #include <linux/filter.h>
39 #include <linux/pkt_cls.h>
40 #include <linux/unistd.h>
45 /* --- NFP prog --- */
46 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
47 * It's safe to modify the next pointers (but not pos).
49 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
50 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
51 next = list_next_entry(pos, l); \
52 &(nfp_prog)->insns != &pos->l && \
53 &(nfp_prog)->insns != &next->l; \
54 pos = nfp_meta_next(pos), \
55 next = nfp_meta_next(pos))
57 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
58 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
59 next = list_next_entry(pos, l), \
60 next2 = list_next_entry(next, l); \
61 &(nfp_prog)->insns != &pos->l && \
62 &(nfp_prog)->insns != &next->l && \
63 &(nfp_prog)->insns != &next2->l; \
64 pos = nfp_meta_next(pos), \
65 next = nfp_meta_next(pos), \
66 next2 = nfp_meta_next(next))
69 nfp_meta_has_next(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
71 return meta
->l
.next
!= &nfp_prog
->insns
;
75 nfp_meta_has_prev(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
77 return meta
->l
.prev
!= &nfp_prog
->insns
;
80 static void nfp_prog_free(struct nfp_prog
*nfp_prog
)
82 struct nfp_insn_meta
*meta
, *tmp
;
84 list_for_each_entry_safe(meta
, tmp
, &nfp_prog
->insns
, l
) {
91 static void nfp_prog_push(struct nfp_prog
*nfp_prog
, u64 insn
)
93 if (nfp_prog
->__prog_alloc_len
== nfp_prog
->prog_len
) {
94 nfp_prog
->error
= -ENOSPC
;
98 nfp_prog
->prog
[nfp_prog
->prog_len
] = insn
;
102 static unsigned int nfp_prog_current_offset(struct nfp_prog
*nfp_prog
)
104 return nfp_prog
->start_off
+ nfp_prog
->prog_len
;
108 nfp_prog_offset_to_index(struct nfp_prog
*nfp_prog
, unsigned int offset
)
110 return offset
- nfp_prog
->start_off
;
114 struct nfp_insn_ur_regs
{
115 enum alu_dst_ab dst_ab
;
122 struct nfp_insn_re_regs
{
123 enum alu_dst_ab dst_ab
;
131 static u16
nfp_swreg_to_unreg(u32 swreg
, bool is_dst
)
133 u16 val
= FIELD_GET(NN_REG_VAL
, swreg
);
135 switch (FIELD_GET(NN_REG_TYPE
, swreg
)) {
138 case NN_REG_GPR_BOTH
:
141 return UR_REG_NN
| val
;
143 return UR_REG_XFR
| val
;
146 pr_err("immediate too large\n");
149 return UR_REG_IMM_encode(val
);
151 return is_dst
? UR_REG_NO_DST
: REG_NONE
;
153 pr_err("unrecognized reg encoding %08x\n", swreg
);
159 swreg_to_unrestricted(u32 dst
, u32 lreg
, u32 rreg
, struct nfp_insn_ur_regs
*reg
)
161 memset(reg
, 0, sizeof(*reg
));
163 /* Decode destination */
164 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_IMM
)
167 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_GPR_B
)
168 reg
->dst_ab
= ALU_DST_B
;
169 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_GPR_BOTH
)
171 reg
->dst
= nfp_swreg_to_unreg(dst
, true);
173 /* Decode source operands */
174 if (FIELD_GET(NN_REG_TYPE
, lreg
) == FIELD_GET(NN_REG_TYPE
, rreg
))
177 if (FIELD_GET(NN_REG_TYPE
, lreg
) == NN_REG_GPR_B
||
178 FIELD_GET(NN_REG_TYPE
, rreg
) == NN_REG_GPR_A
) {
179 reg
->areg
= nfp_swreg_to_unreg(rreg
, false);
180 reg
->breg
= nfp_swreg_to_unreg(lreg
, false);
183 reg
->areg
= nfp_swreg_to_unreg(lreg
, false);
184 reg
->breg
= nfp_swreg_to_unreg(rreg
, false);
190 static u16
nfp_swreg_to_rereg(u32 swreg
, bool is_dst
, bool has_imm8
, bool *i8
)
192 u16 val
= FIELD_GET(NN_REG_VAL
, swreg
);
194 switch (FIELD_GET(NN_REG_TYPE
, swreg
)) {
197 case NN_REG_GPR_BOTH
:
200 return RE_REG_XFR
| val
;
202 if (val
& ~(0x7f | has_imm8
<< 7)) {
203 pr_err("immediate too large\n");
207 return RE_REG_IMM_encode(val
& 0x7f);
209 return is_dst
? RE_REG_NO_DST
: REG_NONE
;
211 pr_err("unrecognized reg encoding\n");
217 swreg_to_restricted(u32 dst
, u32 lreg
, u32 rreg
, struct nfp_insn_re_regs
*reg
,
220 memset(reg
, 0, sizeof(*reg
));
222 /* Decode destination */
223 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_IMM
)
226 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_GPR_B
)
227 reg
->dst_ab
= ALU_DST_B
;
228 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_GPR_BOTH
)
230 reg
->dst
= nfp_swreg_to_rereg(dst
, true, false, NULL
);
232 /* Decode source operands */
233 if (FIELD_GET(NN_REG_TYPE
, lreg
) == FIELD_GET(NN_REG_TYPE
, rreg
))
236 if (FIELD_GET(NN_REG_TYPE
, lreg
) == NN_REG_GPR_B
||
237 FIELD_GET(NN_REG_TYPE
, rreg
) == NN_REG_GPR_A
) {
238 reg
->areg
= nfp_swreg_to_rereg(rreg
, false, has_imm8
, ®
->i8
);
239 reg
->breg
= nfp_swreg_to_rereg(lreg
, false, has_imm8
, ®
->i8
);
242 reg
->areg
= nfp_swreg_to_rereg(lreg
, false, has_imm8
, ®
->i8
);
243 reg
->breg
= nfp_swreg_to_rereg(rreg
, false, has_imm8
, ®
->i8
);
249 /* --- Emitters --- */
250 static const struct cmd_tgt_act cmd_tgt_act
[__CMD_TGT_MAP_SIZE
] = {
251 [CMD_TGT_WRITE8
] = { 0x00, 0x42 },
252 [CMD_TGT_READ8
] = { 0x01, 0x43 },
253 [CMD_TGT_READ_LE
] = { 0x01, 0x40 },
254 [CMD_TGT_READ_SWAP_LE
] = { 0x03, 0x40 },
258 __emit_cmd(struct nfp_prog
*nfp_prog
, enum cmd_tgt_map op
,
259 u8 mode
, u8 xfer
, u8 areg
, u8 breg
, u8 size
, bool sync
)
261 enum cmd_ctx_swap ctx
;
267 ctx
= CMD_CTX_NO_SWAP
;
269 insn
= FIELD_PREP(OP_CMD_A_SRC
, areg
) |
270 FIELD_PREP(OP_CMD_CTX
, ctx
) |
271 FIELD_PREP(OP_CMD_B_SRC
, breg
) |
272 FIELD_PREP(OP_CMD_TOKEN
, cmd_tgt_act
[op
].token
) |
273 FIELD_PREP(OP_CMD_XFER
, xfer
) |
274 FIELD_PREP(OP_CMD_CNT
, size
) |
275 FIELD_PREP(OP_CMD_SIG
, sync
) |
276 FIELD_PREP(OP_CMD_TGT_CMD
, cmd_tgt_act
[op
].tgt_cmd
) |
277 FIELD_PREP(OP_CMD_MODE
, mode
);
279 nfp_prog_push(nfp_prog
, insn
);
283 emit_cmd(struct nfp_prog
*nfp_prog
, enum cmd_tgt_map op
,
284 u8 mode
, u8 xfer
, u32 lreg
, u32 rreg
, u8 size
, bool sync
)
286 struct nfp_insn_re_regs reg
;
289 err
= swreg_to_restricted(reg_none(), lreg
, rreg
, ®
, false);
291 nfp_prog
->error
= err
;
295 pr_err("cmd can't swap arguments\n");
296 nfp_prog
->error
= -EFAULT
;
300 __emit_cmd(nfp_prog
, op
, mode
, xfer
, reg
.areg
, reg
.breg
, size
, sync
);
304 __emit_br(struct nfp_prog
*nfp_prog
, enum br_mask mask
, enum br_ev_pip ev_pip
,
305 enum br_ctx_signal_state css
, u16 addr
, u8 defer
)
307 u16 addr_lo
, addr_hi
;
310 addr_lo
= addr
& (OP_BR_ADDR_LO
>> __bf_shf(OP_BR_ADDR_LO
));
311 addr_hi
= addr
!= addr_lo
;
314 FIELD_PREP(OP_BR_MASK
, mask
) |
315 FIELD_PREP(OP_BR_EV_PIP
, ev_pip
) |
316 FIELD_PREP(OP_BR_CSS
, css
) |
317 FIELD_PREP(OP_BR_DEFBR
, defer
) |
318 FIELD_PREP(OP_BR_ADDR_LO
, addr_lo
) |
319 FIELD_PREP(OP_BR_ADDR_HI
, addr_hi
);
321 nfp_prog_push(nfp_prog
, insn
);
324 static void emit_br_def(struct nfp_prog
*nfp_prog
, u16 addr
, u8 defer
)
327 pr_err("BUG: branch defer out of bounds %d\n", defer
);
328 nfp_prog
->error
= -EFAULT
;
331 __emit_br(nfp_prog
, BR_UNC
, BR_EV_PIP_UNCOND
, BR_CSS_NONE
, addr
, defer
);
335 emit_br(struct nfp_prog
*nfp_prog
, enum br_mask mask
, u16 addr
, u8 defer
)
337 __emit_br(nfp_prog
, mask
,
338 mask
!= BR_UNC
? BR_EV_PIP_COND
: BR_EV_PIP_UNCOND
,
339 BR_CSS_NONE
, addr
, defer
);
343 __emit_br_byte(struct nfp_prog
*nfp_prog
, u8 areg
, u8 breg
, bool imm8
,
344 u8 byte
, bool equal
, u16 addr
, u8 defer
)
346 u16 addr_lo
, addr_hi
;
349 addr_lo
= addr
& (OP_BB_ADDR_LO
>> __bf_shf(OP_BB_ADDR_LO
));
350 addr_hi
= addr
!= addr_lo
;
352 insn
= OP_BBYTE_BASE
|
353 FIELD_PREP(OP_BB_A_SRC
, areg
) |
354 FIELD_PREP(OP_BB_BYTE
, byte
) |
355 FIELD_PREP(OP_BB_B_SRC
, breg
) |
356 FIELD_PREP(OP_BB_I8
, imm8
) |
357 FIELD_PREP(OP_BB_EQ
, equal
) |
358 FIELD_PREP(OP_BB_DEFBR
, defer
) |
359 FIELD_PREP(OP_BB_ADDR_LO
, addr_lo
) |
360 FIELD_PREP(OP_BB_ADDR_HI
, addr_hi
);
362 nfp_prog_push(nfp_prog
, insn
);
366 emit_br_byte_neq(struct nfp_prog
*nfp_prog
,
367 u32 dst
, u8 imm
, u8 byte
, u16 addr
, u8 defer
)
369 struct nfp_insn_re_regs reg
;
372 err
= swreg_to_restricted(reg_none(), dst
, reg_imm(imm
), ®
, true);
374 nfp_prog
->error
= err
;
378 __emit_br_byte(nfp_prog
, reg
.areg
, reg
.breg
, reg
.i8
, byte
, false, addr
,
383 __emit_immed(struct nfp_prog
*nfp_prog
, u16 areg
, u16 breg
, u16 imm_hi
,
384 enum immed_width width
, bool invert
,
385 enum immed_shift shift
, bool wr_both
)
389 insn
= OP_IMMED_BASE
|
390 FIELD_PREP(OP_IMMED_A_SRC
, areg
) |
391 FIELD_PREP(OP_IMMED_B_SRC
, breg
) |
392 FIELD_PREP(OP_IMMED_IMM
, imm_hi
) |
393 FIELD_PREP(OP_IMMED_WIDTH
, width
) |
394 FIELD_PREP(OP_IMMED_INV
, invert
) |
395 FIELD_PREP(OP_IMMED_SHIFT
, shift
) |
396 FIELD_PREP(OP_IMMED_WR_AB
, wr_both
);
398 nfp_prog_push(nfp_prog
, insn
);
402 emit_immed(struct nfp_prog
*nfp_prog
, u32 dst
, u16 imm
,
403 enum immed_width width
, bool invert
, enum immed_shift shift
)
405 struct nfp_insn_ur_regs reg
;
408 if (FIELD_GET(NN_REG_TYPE
, dst
) == NN_REG_IMM
) {
409 nfp_prog
->error
= -EFAULT
;
413 err
= swreg_to_unrestricted(dst
, dst
, reg_imm(imm
& 0xff), ®
);
415 nfp_prog
->error
= err
;
419 __emit_immed(nfp_prog
, reg
.areg
, reg
.breg
, imm
>> 8, width
,
420 invert
, shift
, reg
.wr_both
);
424 __emit_shf(struct nfp_prog
*nfp_prog
, u16 dst
, enum alu_dst_ab dst_ab
,
425 enum shf_sc sc
, u8 shift
,
426 u16 areg
, enum shf_op op
, u16 breg
, bool i8
, bool sw
, bool wr_both
)
430 if (!FIELD_FIT(OP_SHF_SHIFT
, shift
)) {
431 nfp_prog
->error
= -EFAULT
;
435 if (sc
== SHF_SC_L_SHF
)
439 FIELD_PREP(OP_SHF_A_SRC
, areg
) |
440 FIELD_PREP(OP_SHF_SC
, sc
) |
441 FIELD_PREP(OP_SHF_B_SRC
, breg
) |
442 FIELD_PREP(OP_SHF_I8
, i8
) |
443 FIELD_PREP(OP_SHF_SW
, sw
) |
444 FIELD_PREP(OP_SHF_DST
, dst
) |
445 FIELD_PREP(OP_SHF_SHIFT
, shift
) |
446 FIELD_PREP(OP_SHF_OP
, op
) |
447 FIELD_PREP(OP_SHF_DST_AB
, dst_ab
) |
448 FIELD_PREP(OP_SHF_WR_AB
, wr_both
);
450 nfp_prog_push(nfp_prog
, insn
);
454 emit_shf(struct nfp_prog
*nfp_prog
, u32 dst
, u32 lreg
, enum shf_op op
, u32 rreg
,
455 enum shf_sc sc
, u8 shift
)
457 struct nfp_insn_re_regs reg
;
460 err
= swreg_to_restricted(dst
, lreg
, rreg
, ®
, true);
462 nfp_prog
->error
= err
;
466 __emit_shf(nfp_prog
, reg
.dst
, reg
.dst_ab
, sc
, shift
,
467 reg
.areg
, op
, reg
.breg
, reg
.i8
, reg
.swap
, reg
.wr_both
);
471 __emit_alu(struct nfp_prog
*nfp_prog
, u16 dst
, enum alu_dst_ab dst_ab
,
472 u16 areg
, enum alu_op op
, u16 breg
, bool swap
, bool wr_both
)
477 FIELD_PREP(OP_ALU_A_SRC
, areg
) |
478 FIELD_PREP(OP_ALU_B_SRC
, breg
) |
479 FIELD_PREP(OP_ALU_DST
, dst
) |
480 FIELD_PREP(OP_ALU_SW
, swap
) |
481 FIELD_PREP(OP_ALU_OP
, op
) |
482 FIELD_PREP(OP_ALU_DST_AB
, dst_ab
) |
483 FIELD_PREP(OP_ALU_WR_AB
, wr_both
);
485 nfp_prog_push(nfp_prog
, insn
);
489 emit_alu(struct nfp_prog
*nfp_prog
, u32 dst
, u32 lreg
, enum alu_op op
, u32 rreg
)
491 struct nfp_insn_ur_regs reg
;
494 err
= swreg_to_unrestricted(dst
, lreg
, rreg
, ®
);
496 nfp_prog
->error
= err
;
500 __emit_alu(nfp_prog
, reg
.dst
, reg
.dst_ab
,
501 reg
.areg
, op
, reg
.breg
, reg
.swap
, reg
.wr_both
);
505 __emit_ld_field(struct nfp_prog
*nfp_prog
, enum shf_sc sc
,
506 u8 areg
, u8 bmask
, u8 breg
, u8 shift
, bool imm8
,
507 bool zero
, bool swap
, bool wr_both
)
512 FIELD_PREP(OP_LDF_A_SRC
, areg
) |
513 FIELD_PREP(OP_LDF_SC
, sc
) |
514 FIELD_PREP(OP_LDF_B_SRC
, breg
) |
515 FIELD_PREP(OP_LDF_I8
, imm8
) |
516 FIELD_PREP(OP_LDF_SW
, swap
) |
517 FIELD_PREP(OP_LDF_ZF
, zero
) |
518 FIELD_PREP(OP_LDF_BMASK
, bmask
) |
519 FIELD_PREP(OP_LDF_SHF
, shift
) |
520 FIELD_PREP(OP_LDF_WR_AB
, wr_both
);
522 nfp_prog_push(nfp_prog
, insn
);
526 emit_ld_field_any(struct nfp_prog
*nfp_prog
, enum shf_sc sc
, u8 shift
,
527 u32 dst
, u8 bmask
, u32 src
, bool zero
)
529 struct nfp_insn_re_regs reg
;
532 err
= swreg_to_restricted(reg_none(), dst
, src
, ®
, true);
534 nfp_prog
->error
= err
;
538 __emit_ld_field(nfp_prog
, sc
, reg
.areg
, bmask
, reg
.breg
, shift
,
539 reg
.i8
, zero
, reg
.swap
, reg
.wr_both
);
543 emit_ld_field(struct nfp_prog
*nfp_prog
, u32 dst
, u8 bmask
, u32 src
,
544 enum shf_sc sc
, u8 shift
)
546 emit_ld_field_any(nfp_prog
, sc
, shift
, dst
, bmask
, src
, false);
549 /* --- Wrappers --- */
550 static bool pack_immed(u32 imm
, u16
*val
, enum immed_shift
*shift
)
552 if (!(imm
& 0xffff0000)) {
554 *shift
= IMMED_SHIFT_0B
;
555 } else if (!(imm
& 0xff0000ff)) {
557 *shift
= IMMED_SHIFT_1B
;
558 } else if (!(imm
& 0x0000ffff)) {
560 *shift
= IMMED_SHIFT_2B
;
568 static void wrp_immed(struct nfp_prog
*nfp_prog
, u32 dst
, u32 imm
)
570 enum immed_shift shift
;
573 if (pack_immed(imm
, &val
, &shift
)) {
574 emit_immed(nfp_prog
, dst
, val
, IMMED_WIDTH_ALL
, false, shift
);
575 } else if (pack_immed(~imm
, &val
, &shift
)) {
576 emit_immed(nfp_prog
, dst
, val
, IMMED_WIDTH_ALL
, true, shift
);
578 emit_immed(nfp_prog
, dst
, imm
& 0xffff, IMMED_WIDTH_ALL
,
579 false, IMMED_SHIFT_0B
);
580 emit_immed(nfp_prog
, dst
, imm
>> 16, IMMED_WIDTH_WORD
,
581 false, IMMED_SHIFT_2B
);
585 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
586 * If the @imm is small enough encode it directly in operand and return
587 * otherwise load @imm to a spare register and return its encoding.
589 static u32
ur_load_imm_any(struct nfp_prog
*nfp_prog
, u32 imm
, u32 tmp_reg
)
591 if (FIELD_FIT(UR_REG_IMM_MAX
, imm
))
594 wrp_immed(nfp_prog
, tmp_reg
, imm
);
598 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
599 * If the @imm is small enough encode it directly in operand and return
600 * otherwise load @imm to a spare register and return its encoding.
602 static u32
re_load_imm_any(struct nfp_prog
*nfp_prog
, u32 imm
, u32 tmp_reg
)
604 if (FIELD_FIT(RE_REG_IMM_MAX
, imm
))
607 wrp_immed(nfp_prog
, tmp_reg
, imm
);
612 wrp_br_special(struct nfp_prog
*nfp_prog
, enum br_mask mask
,
613 enum br_special special
)
615 emit_br(nfp_prog
, mask
, 0, 0);
617 nfp_prog
->prog
[nfp_prog
->prog_len
- 1] |=
618 FIELD_PREP(OP_BR_SPECIAL
, special
);
621 static void wrp_reg_mov(struct nfp_prog
*nfp_prog
, u16 dst
, u16 src
)
623 emit_alu(nfp_prog
, reg_both(dst
), reg_none(), ALU_OP_NONE
, reg_b(src
));
627 construct_data_ind_ld(struct nfp_prog
*nfp_prog
, u16 offset
,
628 u16 src
, bool src_valid
, u8 size
)
634 /* We load the value from the address indicated in @offset and then
635 * shift out the data we don't need. Note: this is big endian!
637 sz
= size
< 4 ? 4 : size
;
638 shift
= size
< 4 ? 4 - size
: 0;
641 /* Calculate the true offset (src_reg + imm) */
642 tmp_reg
= ur_load_imm_any(nfp_prog
, offset
, imm_b(nfp_prog
));
643 emit_alu(nfp_prog
, imm_both(nfp_prog
),
644 reg_a(src
), ALU_OP_ADD
, tmp_reg
);
645 /* Check packet length (size guaranteed to fit b/c it's u8) */
646 emit_alu(nfp_prog
, imm_a(nfp_prog
),
647 imm_a(nfp_prog
), ALU_OP_ADD
, reg_imm(size
));
648 emit_alu(nfp_prog
, reg_none(),
649 NFP_BPF_ABI_LEN
, ALU_OP_SUB
, imm_a(nfp_prog
));
650 wrp_br_special(nfp_prog
, BR_BLO
, OP_BR_GO_ABORT
);
652 emit_cmd(nfp_prog
, CMD_TGT_READ8
, CMD_MODE_32b
, 0,
653 pkt_reg(nfp_prog
), imm_b(nfp_prog
), sz
- 1, true);
655 /* Check packet length */
656 tmp_reg
= ur_load_imm_any(nfp_prog
, offset
+ size
,
658 emit_alu(nfp_prog
, reg_none(),
659 NFP_BPF_ABI_LEN
, ALU_OP_SUB
, tmp_reg
);
660 wrp_br_special(nfp_prog
, BR_BLO
, OP_BR_GO_ABORT
);
662 tmp_reg
= re_load_imm_any(nfp_prog
, offset
, imm_b(nfp_prog
));
663 emit_cmd(nfp_prog
, CMD_TGT_READ8
, CMD_MODE_32b
, 0,
664 pkt_reg(nfp_prog
), tmp_reg
, sz
- 1, true);
669 emit_shf(nfp_prog
, reg_both(0), reg_none(), SHF_OP_NONE
,
670 reg_xfer(0), SHF_SC_R_SHF
, shift
* 8);
672 for (; i
* 4 < size
; i
++)
673 emit_alu(nfp_prog
, reg_both(i
),
674 reg_none(), ALU_OP_NONE
, reg_xfer(i
));
677 wrp_immed(nfp_prog
, reg_both(1), 0);
682 static int construct_data_ld(struct nfp_prog
*nfp_prog
, u16 offset
, u8 size
)
684 return construct_data_ind_ld(nfp_prog
, offset
, 0, false, size
);
687 static int wrp_set_mark(struct nfp_prog
*nfp_prog
, u8 src
)
689 emit_alu(nfp_prog
, NFP_BPF_ABI_MARK
,
690 reg_none(), ALU_OP_NONE
, reg_b(src
));
691 emit_alu(nfp_prog
, NFP_BPF_ABI_FLAGS
,
692 NFP_BPF_ABI_FLAGS
, ALU_OP_OR
, reg_imm(NFP_BPF_ABI_FLAG_MARK
));
698 wrp_alu_imm(struct nfp_prog
*nfp_prog
, u8 dst
, enum alu_op alu_op
, u32 imm
)
702 if (alu_op
== ALU_OP_AND
) {
704 wrp_immed(nfp_prog
, reg_both(dst
), 0);
708 if (alu_op
== ALU_OP_OR
) {
710 wrp_immed(nfp_prog
, reg_both(dst
), ~0U);
714 if (alu_op
== ALU_OP_XOR
) {
716 emit_alu(nfp_prog
, reg_both(dst
), reg_none(),
717 ALU_OP_NEG
, reg_b(dst
));
722 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
, imm_b(nfp_prog
));
723 emit_alu(nfp_prog
, reg_both(dst
), reg_a(dst
), alu_op
, tmp_reg
);
727 wrp_alu64_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
728 enum alu_op alu_op
, bool skip
)
730 const struct bpf_insn
*insn
= &meta
->insn
;
731 u64 imm
= insn
->imm
; /* sign extend */
738 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, alu_op
, imm
& ~0U);
739 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2 + 1, alu_op
, imm
>> 32);
745 wrp_alu64_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
748 u8 dst
= meta
->insn
.dst_reg
* 2, src
= meta
->insn
.src_reg
* 2;
750 emit_alu(nfp_prog
, reg_both(dst
), reg_a(dst
), alu_op
, reg_b(src
));
751 emit_alu(nfp_prog
, reg_both(dst
+ 1),
752 reg_a(dst
+ 1), alu_op
, reg_b(src
+ 1));
758 wrp_alu32_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
759 enum alu_op alu_op
, bool skip
)
761 const struct bpf_insn
*insn
= &meta
->insn
;
768 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, alu_op
, insn
->imm
);
769 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
775 wrp_alu32_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
778 u8 dst
= meta
->insn
.dst_reg
* 2, src
= meta
->insn
.src_reg
* 2;
780 emit_alu(nfp_prog
, reg_both(dst
), reg_a(dst
), alu_op
, reg_b(src
));
781 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), 0);
787 wrp_test_reg_one(struct nfp_prog
*nfp_prog
, u8 dst
, enum alu_op alu_op
, u8 src
,
788 enum br_mask br_mask
, u16 off
)
790 emit_alu(nfp_prog
, reg_none(), reg_a(dst
), alu_op
, reg_b(src
));
791 emit_br(nfp_prog
, br_mask
, off
, 0);
795 wrp_test_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
796 enum alu_op alu_op
, enum br_mask br_mask
)
798 const struct bpf_insn
*insn
= &meta
->insn
;
800 if (insn
->off
< 0) /* TODO */
803 wrp_test_reg_one(nfp_prog
, insn
->dst_reg
* 2, alu_op
,
804 insn
->src_reg
* 2, br_mask
, insn
->off
);
805 wrp_test_reg_one(nfp_prog
, insn
->dst_reg
* 2 + 1, alu_op
,
806 insn
->src_reg
* 2 + 1, br_mask
, insn
->off
);
812 wrp_cmp_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
813 enum br_mask br_mask
, bool swap
)
815 const struct bpf_insn
*insn
= &meta
->insn
;
816 u64 imm
= insn
->imm
; /* sign extend */
817 u8 reg
= insn
->dst_reg
* 2;
820 if (insn
->off
< 0) /* TODO */
823 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
825 emit_alu(nfp_prog
, reg_none(), reg_a(reg
), ALU_OP_SUB
, tmp_reg
);
827 emit_alu(nfp_prog
, reg_none(), tmp_reg
, ALU_OP_SUB
, reg_a(reg
));
829 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
831 emit_alu(nfp_prog
, reg_none(),
832 reg_a(reg
+ 1), ALU_OP_SUB_C
, tmp_reg
);
834 emit_alu(nfp_prog
, reg_none(),
835 tmp_reg
, ALU_OP_SUB_C
, reg_a(reg
+ 1));
837 emit_br(nfp_prog
, br_mask
, insn
->off
, 0);
843 wrp_cmp_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
844 enum br_mask br_mask
, bool swap
)
846 const struct bpf_insn
*insn
= &meta
->insn
;
847 u8 areg
= insn
->src_reg
* 2, breg
= insn
->dst_reg
* 2;
849 if (insn
->off
< 0) /* TODO */
858 emit_alu(nfp_prog
, reg_none(), reg_a(areg
), ALU_OP_SUB
, reg_b(breg
));
859 emit_alu(nfp_prog
, reg_none(),
860 reg_a(areg
+ 1), ALU_OP_SUB_C
, reg_b(breg
+ 1));
861 emit_br(nfp_prog
, br_mask
, insn
->off
, 0);
866 /* --- Callbacks --- */
867 static int mov_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
869 const struct bpf_insn
*insn
= &meta
->insn
;
871 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2, insn
->src_reg
* 2);
872 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2 + 1, insn
->src_reg
* 2 + 1);
877 static int mov_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
879 u64 imm
= meta
->insn
.imm
; /* sign extend */
881 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2), imm
& ~0U);
882 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), imm
>> 32);
887 static int xor_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
889 return wrp_alu64_reg(nfp_prog
, meta
, ALU_OP_XOR
);
892 static int xor_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
894 return wrp_alu64_imm(nfp_prog
, meta
, ALU_OP_XOR
, !meta
->insn
.imm
);
897 static int and_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
899 return wrp_alu64_reg(nfp_prog
, meta
, ALU_OP_AND
);
902 static int and_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
904 return wrp_alu64_imm(nfp_prog
, meta
, ALU_OP_AND
, !~meta
->insn
.imm
);
907 static int or_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
909 return wrp_alu64_reg(nfp_prog
, meta
, ALU_OP_OR
);
912 static int or_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
914 return wrp_alu64_imm(nfp_prog
, meta
, ALU_OP_OR
, !meta
->insn
.imm
);
917 static int add_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
919 const struct bpf_insn
*insn
= &meta
->insn
;
921 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2),
922 reg_a(insn
->dst_reg
* 2), ALU_OP_ADD
,
923 reg_b(insn
->src_reg
* 2));
924 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1),
925 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_ADD_C
,
926 reg_b(insn
->src_reg
* 2 + 1));
931 static int add_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
933 const struct bpf_insn
*insn
= &meta
->insn
;
934 u64 imm
= insn
->imm
; /* sign extend */
936 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, ALU_OP_ADD
, imm
& ~0U);
937 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2 + 1, ALU_OP_ADD_C
, imm
>> 32);
942 static int sub_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
944 const struct bpf_insn
*insn
= &meta
->insn
;
946 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2),
947 reg_a(insn
->dst_reg
* 2), ALU_OP_SUB
,
948 reg_b(insn
->src_reg
* 2));
949 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1),
950 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_SUB_C
,
951 reg_b(insn
->src_reg
* 2 + 1));
956 static int sub_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
958 const struct bpf_insn
*insn
= &meta
->insn
;
959 u64 imm
= insn
->imm
; /* sign extend */
961 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, ALU_OP_SUB
, imm
& ~0U);
962 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2 + 1, ALU_OP_SUB_C
, imm
>> 32);
967 static int shl_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
969 const struct bpf_insn
*insn
= &meta
->insn
;
974 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2 + 1, insn
->dst_reg
* 2);
975 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2), 0);
980 static int shr_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
982 const struct bpf_insn
*insn
= &meta
->insn
;
987 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2, insn
->dst_reg
* 2 + 1);
988 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
993 static int mov_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
995 const struct bpf_insn
*insn
= &meta
->insn
;
997 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2, insn
->src_reg
* 2);
998 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
1003 static int mov_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1005 const struct bpf_insn
*insn
= &meta
->insn
;
1007 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2), insn
->imm
);
1008 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
1013 static int xor_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1015 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_XOR
);
1018 static int xor_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1020 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_XOR
, !~meta
->insn
.imm
);
1023 static int and_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1025 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_AND
);
1028 static int and_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1030 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_AND
, !~meta
->insn
.imm
);
1033 static int or_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1035 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_OR
);
1038 static int or_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1040 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_OR
, !meta
->insn
.imm
);
1043 static int add_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1045 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_ADD
);
1048 static int add_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1050 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_ADD
, !meta
->insn
.imm
);
1053 static int sub_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1055 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_SUB
);
1058 static int sub_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1060 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_SUB
, !meta
->insn
.imm
);
1063 static int shl_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1065 const struct bpf_insn
*insn
= &meta
->insn
;
1068 return 1; /* TODO: zero shift means indirect */
1070 emit_shf(nfp_prog
, reg_both(insn
->dst_reg
* 2),
1071 reg_none(), SHF_OP_NONE
, reg_b(insn
->dst_reg
* 2),
1072 SHF_SC_L_SHF
, insn
->imm
);
1073 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
1078 static int imm_ld8_part2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1080 wrp_immed(nfp_prog
, reg_both(nfp_meta_prev(meta
)->insn
.dst_reg
* 2 + 1),
1086 static int imm_ld8(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1088 const struct bpf_insn
*insn
= &meta
->insn
;
1090 meta
->double_cb
= imm_ld8_part2
;
1091 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2), insn
->imm
);
1096 static int data_ld1(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1098 return construct_data_ld(nfp_prog
, meta
->insn
.imm
, 1);
1101 static int data_ld2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1103 return construct_data_ld(nfp_prog
, meta
->insn
.imm
, 2);
1106 static int data_ld4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1108 return construct_data_ld(nfp_prog
, meta
->insn
.imm
, 4);
1111 static int data_ind_ld1(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1113 return construct_data_ind_ld(nfp_prog
, meta
->insn
.imm
,
1114 meta
->insn
.src_reg
* 2, true, 1);
1117 static int data_ind_ld2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1119 return construct_data_ind_ld(nfp_prog
, meta
->insn
.imm
,
1120 meta
->insn
.src_reg
* 2, true, 2);
1123 static int data_ind_ld4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1125 return construct_data_ind_ld(nfp_prog
, meta
->insn
.imm
,
1126 meta
->insn
.src_reg
* 2, true, 4);
1129 static int mem_ldx4_skb(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1131 if (meta
->insn
.off
== offsetof(struct sk_buff
, len
))
1132 emit_alu(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2),
1133 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_LEN
);
1140 static int mem_ldx4_xdp(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1142 u32 dst
= reg_both(meta
->insn
.dst_reg
* 2);
1144 if (meta
->insn
.off
!= offsetof(struct xdp_md
, data
) &&
1145 meta
->insn
.off
!= offsetof(struct xdp_md
, data_end
))
1148 emit_alu(nfp_prog
, dst
, reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_PKT
);
1150 if (meta
->insn
.off
== offsetof(struct xdp_md
, data
))
1153 emit_alu(nfp_prog
, dst
, dst
, ALU_OP_ADD
, NFP_BPF_ABI_LEN
);
1158 static int mem_ldx4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1162 if (nfp_prog
->act
== NN_ACT_XDP
)
1163 ret
= mem_ldx4_xdp(nfp_prog
, meta
);
1165 ret
= mem_ldx4_skb(nfp_prog
, meta
);
1167 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), 0);
1172 static int mem_stx4_skb(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1174 if (meta
->insn
.off
== offsetof(struct sk_buff
, mark
))
1175 return wrp_set_mark(nfp_prog
, meta
->insn
.src_reg
* 2);
1180 static int mem_stx4_xdp(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1185 static int mem_stx4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1187 if (nfp_prog
->act
== NN_ACT_XDP
)
1188 return mem_stx4_xdp(nfp_prog
, meta
);
1189 return mem_stx4_skb(nfp_prog
, meta
);
1192 static int jump(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1194 if (meta
->insn
.off
< 0) /* TODO */
1196 emit_br(nfp_prog
, BR_UNC
, meta
->insn
.off
, 0);
1201 static int jeq_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1203 const struct bpf_insn
*insn
= &meta
->insn
;
1204 u64 imm
= insn
->imm
; /* sign extend */
1205 u32 or1
= reg_a(insn
->dst_reg
* 2), or2
= reg_b(insn
->dst_reg
* 2 + 1);
1208 if (insn
->off
< 0) /* TODO */
1212 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
1213 emit_alu(nfp_prog
, imm_a(nfp_prog
),
1214 reg_a(insn
->dst_reg
* 2), ALU_OP_XOR
, tmp_reg
);
1215 or1
= imm_a(nfp_prog
);
1219 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
1220 emit_alu(nfp_prog
, imm_b(nfp_prog
),
1221 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_XOR
, tmp_reg
);
1222 or2
= imm_b(nfp_prog
);
1225 emit_alu(nfp_prog
, reg_none(), or1
, ALU_OP_OR
, or2
);
1226 emit_br(nfp_prog
, BR_BEQ
, insn
->off
, 0);
1231 static int jgt_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1233 return wrp_cmp_imm(nfp_prog
, meta
, BR_BLO
, false);
1236 static int jge_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1238 return wrp_cmp_imm(nfp_prog
, meta
, BR_BHS
, true);
1241 static int jset_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1243 const struct bpf_insn
*insn
= &meta
->insn
;
1244 u64 imm
= insn
->imm
; /* sign extend */
1247 if (insn
->off
< 0) /* TODO */
1256 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
1257 emit_alu(nfp_prog
, reg_none(),
1258 reg_a(insn
->dst_reg
* 2), ALU_OP_AND
, tmp_reg
);
1259 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1263 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
1264 emit_alu(nfp_prog
, reg_none(),
1265 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_AND
, tmp_reg
);
1266 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1272 static int jne_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1274 const struct bpf_insn
*insn
= &meta
->insn
;
1275 u64 imm
= insn
->imm
; /* sign extend */
1278 if (insn
->off
< 0) /* TODO */
1282 emit_alu(nfp_prog
, reg_none(), reg_a(insn
->dst_reg
* 2),
1283 ALU_OP_OR
, reg_b(insn
->dst_reg
* 2 + 1));
1284 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1287 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
1288 emit_alu(nfp_prog
, reg_none(),
1289 reg_a(insn
->dst_reg
* 2), ALU_OP_XOR
, tmp_reg
);
1290 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1292 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
1293 emit_alu(nfp_prog
, reg_none(),
1294 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_XOR
, tmp_reg
);
1295 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1300 static int jeq_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1302 const struct bpf_insn
*insn
= &meta
->insn
;
1304 if (insn
->off
< 0) /* TODO */
1307 emit_alu(nfp_prog
, imm_a(nfp_prog
), reg_a(insn
->dst_reg
* 2),
1308 ALU_OP_XOR
, reg_b(insn
->src_reg
* 2));
1309 emit_alu(nfp_prog
, imm_b(nfp_prog
), reg_a(insn
->dst_reg
* 2 + 1),
1310 ALU_OP_XOR
, reg_b(insn
->src_reg
* 2 + 1));
1311 emit_alu(nfp_prog
, reg_none(),
1312 imm_a(nfp_prog
), ALU_OP_OR
, imm_b(nfp_prog
));
1313 emit_br(nfp_prog
, BR_BEQ
, insn
->off
, 0);
1318 static int jgt_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1320 return wrp_cmp_reg(nfp_prog
, meta
, BR_BLO
, false);
1323 static int jge_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1325 return wrp_cmp_reg(nfp_prog
, meta
, BR_BHS
, true);
1328 static int jset_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1330 return wrp_test_reg(nfp_prog
, meta
, ALU_OP_AND
, BR_BNE
);
1333 static int jne_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1335 return wrp_test_reg(nfp_prog
, meta
, ALU_OP_XOR
, BR_BNE
);
1338 static int goto_out(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1340 wrp_br_special(nfp_prog
, BR_UNC
, OP_BR_GO_OUT
);
1345 static const instr_cb_t instr_cb
[256] = {
1346 [BPF_ALU64
| BPF_MOV
| BPF_X
] = mov_reg64
,
1347 [BPF_ALU64
| BPF_MOV
| BPF_K
] = mov_imm64
,
1348 [BPF_ALU64
| BPF_XOR
| BPF_X
] = xor_reg64
,
1349 [BPF_ALU64
| BPF_XOR
| BPF_K
] = xor_imm64
,
1350 [BPF_ALU64
| BPF_AND
| BPF_X
] = and_reg64
,
1351 [BPF_ALU64
| BPF_AND
| BPF_K
] = and_imm64
,
1352 [BPF_ALU64
| BPF_OR
| BPF_X
] = or_reg64
,
1353 [BPF_ALU64
| BPF_OR
| BPF_K
] = or_imm64
,
1354 [BPF_ALU64
| BPF_ADD
| BPF_X
] = add_reg64
,
1355 [BPF_ALU64
| BPF_ADD
| BPF_K
] = add_imm64
,
1356 [BPF_ALU64
| BPF_SUB
| BPF_X
] = sub_reg64
,
1357 [BPF_ALU64
| BPF_SUB
| BPF_K
] = sub_imm64
,
1358 [BPF_ALU64
| BPF_LSH
| BPF_K
] = shl_imm64
,
1359 [BPF_ALU64
| BPF_RSH
| BPF_K
] = shr_imm64
,
1360 [BPF_ALU
| BPF_MOV
| BPF_X
] = mov_reg
,
1361 [BPF_ALU
| BPF_MOV
| BPF_K
] = mov_imm
,
1362 [BPF_ALU
| BPF_XOR
| BPF_X
] = xor_reg
,
1363 [BPF_ALU
| BPF_XOR
| BPF_K
] = xor_imm
,
1364 [BPF_ALU
| BPF_AND
| BPF_X
] = and_reg
,
1365 [BPF_ALU
| BPF_AND
| BPF_K
] = and_imm
,
1366 [BPF_ALU
| BPF_OR
| BPF_X
] = or_reg
,
1367 [BPF_ALU
| BPF_OR
| BPF_K
] = or_imm
,
1368 [BPF_ALU
| BPF_ADD
| BPF_X
] = add_reg
,
1369 [BPF_ALU
| BPF_ADD
| BPF_K
] = add_imm
,
1370 [BPF_ALU
| BPF_SUB
| BPF_X
] = sub_reg
,
1371 [BPF_ALU
| BPF_SUB
| BPF_K
] = sub_imm
,
1372 [BPF_ALU
| BPF_LSH
| BPF_K
] = shl_imm
,
1373 [BPF_LD
| BPF_IMM
| BPF_DW
] = imm_ld8
,
1374 [BPF_LD
| BPF_ABS
| BPF_B
] = data_ld1
,
1375 [BPF_LD
| BPF_ABS
| BPF_H
] = data_ld2
,
1376 [BPF_LD
| BPF_ABS
| BPF_W
] = data_ld4
,
1377 [BPF_LD
| BPF_IND
| BPF_B
] = data_ind_ld1
,
1378 [BPF_LD
| BPF_IND
| BPF_H
] = data_ind_ld2
,
1379 [BPF_LD
| BPF_IND
| BPF_W
] = data_ind_ld4
,
1380 [BPF_LDX
| BPF_MEM
| BPF_W
] = mem_ldx4
,
1381 [BPF_STX
| BPF_MEM
| BPF_W
] = mem_stx4
,
1382 [BPF_JMP
| BPF_JA
| BPF_K
] = jump
,
1383 [BPF_JMP
| BPF_JEQ
| BPF_K
] = jeq_imm
,
1384 [BPF_JMP
| BPF_JGT
| BPF_K
] = jgt_imm
,
1385 [BPF_JMP
| BPF_JGE
| BPF_K
] = jge_imm
,
1386 [BPF_JMP
| BPF_JSET
| BPF_K
] = jset_imm
,
1387 [BPF_JMP
| BPF_JNE
| BPF_K
] = jne_imm
,
1388 [BPF_JMP
| BPF_JEQ
| BPF_X
] = jeq_reg
,
1389 [BPF_JMP
| BPF_JGT
| BPF_X
] = jgt_reg
,
1390 [BPF_JMP
| BPF_JGE
| BPF_X
] = jge_reg
,
1391 [BPF_JMP
| BPF_JSET
| BPF_X
] = jset_reg
,
1392 [BPF_JMP
| BPF_JNE
| BPF_X
] = jne_reg
,
1393 [BPF_JMP
| BPF_EXIT
] = goto_out
,
1396 /* --- Misc code --- */
1397 static void br_set_offset(u64
*instr
, u16 offset
)
1399 u16 addr_lo
, addr_hi
;
1401 addr_lo
= offset
& (OP_BR_ADDR_LO
>> __bf_shf(OP_BR_ADDR_LO
));
1402 addr_hi
= offset
!= addr_lo
;
1403 *instr
&= ~(OP_BR_ADDR_HI
| OP_BR_ADDR_LO
);
1404 *instr
|= FIELD_PREP(OP_BR_ADDR_HI
, addr_hi
);
1405 *instr
|= FIELD_PREP(OP_BR_ADDR_LO
, addr_lo
);
1408 /* --- Assembler logic --- */
1409 static int nfp_fixup_branches(struct nfp_prog
*nfp_prog
)
1411 struct nfp_insn_meta
*meta
, *next
;
1415 nfp_for_each_insn_walk2(nfp_prog
, meta
, next
) {
1418 if (BPF_CLASS(meta
->insn
.code
) != BPF_JMP
)
1421 br_idx
= nfp_prog_offset_to_index(nfp_prog
, next
->off
) - 1;
1422 if (!nfp_is_br(nfp_prog
->prog
[br_idx
])) {
1423 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
1424 br_idx
, meta
->insn
.code
, nfp_prog
->prog
[br_idx
]);
1427 /* Leave special branches for later */
1428 if (FIELD_GET(OP_BR_SPECIAL
, nfp_prog
->prog
[br_idx
]))
1431 /* Find the target offset in assembler realm */
1432 off
= meta
->insn
.off
;
1434 pr_err("Fixup found zero offset!!\n");
1438 while (off
&& nfp_meta_has_next(nfp_prog
, next
)) {
1439 next
= nfp_meta_next(next
);
1443 pr_err("Fixup found too large jump!! %d\n", off
);
1448 pr_err("Branch landing on removed instruction!!\n");
1452 for (idx
= nfp_prog_offset_to_index(nfp_prog
, meta
->off
);
1453 idx
<= br_idx
; idx
++) {
1454 if (!nfp_is_br(nfp_prog
->prog
[idx
]))
1456 br_set_offset(&nfp_prog
->prog
[idx
], next
->off
);
1460 /* Fixup 'goto out's separately, they can be scattered around */
1461 for (br_idx
= 0; br_idx
< nfp_prog
->prog_len
; br_idx
++) {
1462 enum br_special special
;
1464 if ((nfp_prog
->prog
[br_idx
] & OP_BR_BASE_MASK
) != OP_BR_BASE
)
1467 special
= FIELD_GET(OP_BR_SPECIAL
, nfp_prog
->prog
[br_idx
]);
1472 br_set_offset(&nfp_prog
->prog
[br_idx
],
1475 case OP_BR_GO_ABORT
:
1476 br_set_offset(&nfp_prog
->prog
[br_idx
],
1477 nfp_prog
->tgt_abort
);
1481 nfp_prog
->prog
[br_idx
] &= ~OP_BR_SPECIAL
;
1487 static void nfp_intro(struct nfp_prog
*nfp_prog
)
1489 emit_alu(nfp_prog
, pkt_reg(nfp_prog
),
1490 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_PKT
);
1493 static void nfp_outro_tc_legacy(struct nfp_prog
*nfp_prog
)
1495 const u8 act2code
[] = {
1496 [NN_ACT_TC_DROP
] = 0x22,
1497 [NN_ACT_TC_REDIR
] = 0x24
1499 /* Target for aborts */
1500 nfp_prog
->tgt_abort
= nfp_prog_current_offset(nfp_prog
);
1501 wrp_immed(nfp_prog
, reg_both(0), 0);
1503 /* Target for normal exits */
1504 nfp_prog
->tgt_out
= nfp_prog_current_offset(nfp_prog
);
1506 * 0 0x11 -> pass, count as stat0
1507 * -1 drop 0x22 -> drop, count as stat1
1508 * redir 0x24 -> redir, count as stat1
1509 * ife mark 0x21 -> pass, count as stat1
1510 * ife + tx 0x24 -> redir, count as stat1
1512 emit_br_byte_neq(nfp_prog
, reg_b(0), 0xff, 0, nfp_prog
->tgt_done
, 2);
1513 emit_alu(nfp_prog
, reg_a(0),
1514 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_FLAGS
);
1515 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF
, 16);
1517 emit_br(nfp_prog
, BR_UNC
, nfp_prog
->tgt_done
, 1);
1518 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(act2code
[nfp_prog
->act
]),
1522 static void nfp_outro_tc_da(struct nfp_prog
*nfp_prog
)
1524 /* TC direct-action mode:
1525 * 0,1 ok NOT SUPPORTED[1]
1526 * 2 drop 0x22 -> drop, count as stat1
1527 * 4,5 nuke 0x02 -> drop
1528 * 7 redir 0x44 -> redir, count as stat2
1529 * * unspec 0x11 -> pass, count as stat0
1531 * [1] We can't support OK and RECLASSIFY because we can't tell TC
1532 * the exact decision made. We are forced to support UNSPEC
1533 * to handle aborts so that's the only one we handle for passing
1534 * packets up the stack.
1536 /* Target for aborts */
1537 nfp_prog
->tgt_abort
= nfp_prog_current_offset(nfp_prog
);
1539 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1541 emit_alu(nfp_prog
, reg_a(0),
1542 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_FLAGS
);
1543 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF
, 16);
1545 /* Target for normal exits */
1546 nfp_prog
->tgt_out
= nfp_prog_current_offset(nfp_prog
);
1548 /* if R0 > 7 jump to abort */
1549 emit_alu(nfp_prog
, reg_none(), reg_imm(7), ALU_OP_SUB
, reg_b(0));
1550 emit_br(nfp_prog
, BR_BLO
, nfp_prog
->tgt_abort
, 0);
1551 emit_alu(nfp_prog
, reg_a(0),
1552 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_FLAGS
);
1554 wrp_immed(nfp_prog
, reg_b(2), 0x41221211);
1555 wrp_immed(nfp_prog
, reg_b(3), 0x41001211);
1557 emit_shf(nfp_prog
, reg_a(1),
1558 reg_none(), SHF_OP_NONE
, reg_b(0), SHF_SC_L_SHF
, 2);
1560 emit_alu(nfp_prog
, reg_none(), reg_a(1), ALU_OP_OR
, reg_imm(0));
1561 emit_shf(nfp_prog
, reg_a(2),
1562 reg_imm(0xf), SHF_OP_AND
, reg_b(2), SHF_SC_R_SHF
, 0);
1564 emit_alu(nfp_prog
, reg_none(), reg_a(1), ALU_OP_OR
, reg_imm(0));
1565 emit_shf(nfp_prog
, reg_b(2),
1566 reg_imm(0xf), SHF_OP_AND
, reg_b(3), SHF_SC_R_SHF
, 0);
1568 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1570 emit_shf(nfp_prog
, reg_b(2),
1571 reg_a(2), SHF_OP_OR
, reg_b(2), SHF_SC_L_SHF
, 4);
1572 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF
, 16);
1575 static void nfp_outro_xdp(struct nfp_prog
*nfp_prog
)
1577 /* XDP return codes:
1578 * 0 aborted 0x82 -> drop, count as stat3
1579 * 1 drop 0x22 -> drop, count as stat1
1580 * 2 pass 0x11 -> pass, count as stat0
1581 * 3 tx 0x44 -> redir, count as stat2
1582 * * unknown 0x82 -> drop, count as stat3
1584 /* Target for aborts */
1585 nfp_prog
->tgt_abort
= nfp_prog_current_offset(nfp_prog
);
1587 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1589 emit_alu(nfp_prog
, reg_a(0),
1590 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_FLAGS
);
1591 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF
, 16);
1593 /* Target for normal exits */
1594 nfp_prog
->tgt_out
= nfp_prog_current_offset(nfp_prog
);
1596 /* if R0 > 3 jump to abort */
1597 emit_alu(nfp_prog
, reg_none(), reg_imm(3), ALU_OP_SUB
, reg_b(0));
1598 emit_br(nfp_prog
, BR_BLO
, nfp_prog
->tgt_abort
, 0);
1600 wrp_immed(nfp_prog
, reg_b(2), 0x44112282);
1602 emit_shf(nfp_prog
, reg_a(1),
1603 reg_none(), SHF_OP_NONE
, reg_b(0), SHF_SC_L_SHF
, 3);
1605 emit_alu(nfp_prog
, reg_none(), reg_a(1), ALU_OP_OR
, reg_imm(0));
1606 emit_shf(nfp_prog
, reg_b(2),
1607 reg_imm(0xff), SHF_OP_AND
, reg_b(2), SHF_SC_R_SHF
, 0);
1609 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1611 emit_alu(nfp_prog
, reg_a(0),
1612 reg_none(), ALU_OP_NONE
, NFP_BPF_ABI_FLAGS
);
1613 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF
, 16);
1616 static void nfp_outro(struct nfp_prog
*nfp_prog
)
1618 switch (nfp_prog
->act
) {
1620 nfp_outro_tc_da(nfp_prog
);
1622 case NN_ACT_TC_DROP
:
1623 case NN_ACT_TC_REDIR
:
1624 nfp_outro_tc_legacy(nfp_prog
);
1627 nfp_outro_xdp(nfp_prog
);
1632 static int nfp_translate(struct nfp_prog
*nfp_prog
)
1634 struct nfp_insn_meta
*meta
;
1637 nfp_intro(nfp_prog
);
1638 if (nfp_prog
->error
)
1639 return nfp_prog
->error
;
1641 list_for_each_entry(meta
, &nfp_prog
->insns
, l
) {
1642 instr_cb_t cb
= instr_cb
[meta
->insn
.code
];
1644 meta
->off
= nfp_prog_current_offset(nfp_prog
);
1647 nfp_prog
->n_translated
++;
1651 if (nfp_meta_has_prev(nfp_prog
, meta
) &&
1652 nfp_meta_prev(meta
)->double_cb
)
1653 cb
= nfp_meta_prev(meta
)->double_cb
;
1656 err
= cb(nfp_prog
, meta
);
1660 nfp_prog
->n_translated
++;
1663 nfp_outro(nfp_prog
);
1664 if (nfp_prog
->error
)
1665 return nfp_prog
->error
;
1667 return nfp_fixup_branches(nfp_prog
);
1671 nfp_prog_prepare(struct nfp_prog
*nfp_prog
, const struct bpf_insn
*prog
,
1676 for (i
= 0; i
< cnt
; i
++) {
1677 struct nfp_insn_meta
*meta
;
1679 meta
= kzalloc(sizeof(*meta
), GFP_KERNEL
);
1683 meta
->insn
= prog
[i
];
1686 list_add_tail(&meta
->l
, &nfp_prog
->insns
);
1692 /* --- Optimizations --- */
1693 static void nfp_bpf_opt_reg_init(struct nfp_prog
*nfp_prog
)
1695 struct nfp_insn_meta
*meta
;
1697 list_for_each_entry(meta
, &nfp_prog
->insns
, l
) {
1698 struct bpf_insn insn
= meta
->insn
;
1700 /* Programs converted from cBPF start with register xoring */
1701 if (insn
.code
== (BPF_ALU64
| BPF_XOR
| BPF_X
) &&
1702 insn
.src_reg
== insn
.dst_reg
)
1705 /* Programs start with R6 = R1 but we ignore the skb pointer */
1706 if (insn
.code
== (BPF_ALU64
| BPF_MOV
| BPF_X
) &&
1707 insn
.src_reg
== 1 && insn
.dst_reg
== 6)
1710 /* Return as soon as something doesn't match */
1716 /* Try to rename registers so that program uses only low ones */
1717 static int nfp_bpf_opt_reg_rename(struct nfp_prog
*nfp_prog
)
1719 bool reg_used
[MAX_BPF_REG
] = {};
1720 u8 tgt_reg
[MAX_BPF_REG
] = {};
1721 struct nfp_insn_meta
*meta
;
1724 list_for_each_entry(meta
, &nfp_prog
->insns
, l
) {
1728 reg_used
[meta
->insn
.src_reg
] = true;
1729 reg_used
[meta
->insn
.dst_reg
] = true;
1732 for (i
= 0, j
= 0; i
< ARRAY_SIZE(tgt_reg
); i
++) {
1738 nfp_prog
->num_regs
= j
;
1740 list_for_each_entry(meta
, &nfp_prog
->insns
, l
) {
1741 meta
->insn
.src_reg
= tgt_reg
[meta
->insn
.src_reg
];
1742 meta
->insn
.dst_reg
= tgt_reg
[meta
->insn
.dst_reg
];
1748 /* Remove masking after load since our load guarantees this is not needed */
1749 static void nfp_bpf_opt_ld_mask(struct nfp_prog
*nfp_prog
)
1751 struct nfp_insn_meta
*meta1
, *meta2
;
1752 const s32 exp_mask
[] = {
1753 [BPF_B
] = 0x000000ffU
,
1754 [BPF_H
] = 0x0000ffffU
,
1755 [BPF_W
] = 0xffffffffU
,
1758 nfp_for_each_insn_walk2(nfp_prog
, meta1
, meta2
) {
1759 struct bpf_insn insn
, next
;
1764 if (BPF_CLASS(insn
.code
) != BPF_LD
)
1766 if (BPF_MODE(insn
.code
) != BPF_ABS
&&
1767 BPF_MODE(insn
.code
) != BPF_IND
)
1770 if (next
.code
!= (BPF_ALU64
| BPF_AND
| BPF_K
))
1773 if (!exp_mask
[BPF_SIZE(insn
.code
)])
1775 if (exp_mask
[BPF_SIZE(insn
.code
)] != next
.imm
)
1778 if (next
.src_reg
|| next
.dst_reg
)
1785 static void nfp_bpf_opt_ld_shift(struct nfp_prog
*nfp_prog
)
1787 struct nfp_insn_meta
*meta1
, *meta2
, *meta3
;
1789 nfp_for_each_insn_walk3(nfp_prog
, meta1
, meta2
, meta3
) {
1790 struct bpf_insn insn
, next1
, next2
;
1793 next1
= meta2
->insn
;
1794 next2
= meta3
->insn
;
1796 if (BPF_CLASS(insn
.code
) != BPF_LD
)
1798 if (BPF_MODE(insn
.code
) != BPF_ABS
&&
1799 BPF_MODE(insn
.code
) != BPF_IND
)
1801 if (BPF_SIZE(insn
.code
) != BPF_W
)
1804 if (!(next1
.code
== (BPF_LSH
| BPF_K
| BPF_ALU64
) &&
1805 next2
.code
== (BPF_RSH
| BPF_K
| BPF_ALU64
)) &&
1806 !(next1
.code
== (BPF_RSH
| BPF_K
| BPF_ALU64
) &&
1807 next2
.code
== (BPF_LSH
| BPF_K
| BPF_ALU64
)))
1810 if (next1
.src_reg
|| next1
.dst_reg
||
1811 next2
.src_reg
|| next2
.dst_reg
)
1814 if (next1
.imm
!= 0x20 || next2
.imm
!= 0x20)
1822 static int nfp_bpf_optimize(struct nfp_prog
*nfp_prog
)
1826 nfp_bpf_opt_reg_init(nfp_prog
);
1828 ret
= nfp_bpf_opt_reg_rename(nfp_prog
);
1832 nfp_bpf_opt_ld_mask(nfp_prog
);
1833 nfp_bpf_opt_ld_shift(nfp_prog
);
1839 * nfp_bpf_jit() - translate BPF code into NFP assembly
1840 * @filter: kernel BPF filter struct
1841 * @prog_mem: memory to store assembler instructions
1842 * @act: action attached to this eBPF program
1843 * @prog_start: offset of the first instruction when loaded
1844 * @prog_done: where to jump on exit
1845 * @prog_sz: size of @prog_mem in instructions
1846 * @res: achieved parameters of translation results
1849 nfp_bpf_jit(struct bpf_prog
*filter
, void *prog_mem
,
1850 enum nfp_bpf_action_type act
,
1851 unsigned int prog_start
, unsigned int prog_done
,
1852 unsigned int prog_sz
, struct nfp_bpf_result
*res
)
1854 struct nfp_prog
*nfp_prog
;
1857 nfp_prog
= kzalloc(sizeof(*nfp_prog
), GFP_KERNEL
);
1861 INIT_LIST_HEAD(&nfp_prog
->insns
);
1862 nfp_prog
->act
= act
;
1863 nfp_prog
->start_off
= prog_start
;
1864 nfp_prog
->tgt_done
= prog_done
;
1866 ret
= nfp_prog_prepare(nfp_prog
, filter
->insnsi
, filter
->len
);
1870 ret
= nfp_prog_verify(nfp_prog
, filter
);
1874 ret
= nfp_bpf_optimize(nfp_prog
);
1878 if (nfp_prog
->num_regs
<= 7)
1879 nfp_prog
->regs_per_thread
= 16;
1881 nfp_prog
->regs_per_thread
= 32;
1883 nfp_prog
->prog
= prog_mem
;
1884 nfp_prog
->__prog_alloc_len
= prog_sz
;
1886 ret
= nfp_translate(nfp_prog
);
1888 pr_err("Translation failed with error %d (translated: %u)\n",
1889 ret
, nfp_prog
->n_translated
);
1893 res
->n_instr
= nfp_prog
->prog_len
;
1894 res
->dense_mode
= nfp_prog
->num_regs
<= 7;
1896 nfp_prog_free(nfp_prog
);