2 * Copyright (C) 2016 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define pr_fmt(fmt) "NFP net bpf: " fmt
36 #include <linux/kernel.h>
37 #include <linux/bpf.h>
38 #include <linux/filter.h>
39 #include <linux/pkt_cls.h>
40 #include <linux/unistd.h>
43 #include "../nfp_asm.h"
45 /* --- NFP prog --- */
46 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
47 * It's safe to modify the next pointers (but not pos).
49 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
50 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
51 next = list_next_entry(pos, l); \
52 &(nfp_prog)->insns != &pos->l && \
53 &(nfp_prog)->insns != &next->l; \
54 pos = nfp_meta_next(pos), \
55 next = nfp_meta_next(pos))
57 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
58 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
59 next = list_next_entry(pos, l), \
60 next2 = list_next_entry(next, l); \
61 &(nfp_prog)->insns != &pos->l && \
62 &(nfp_prog)->insns != &next->l && \
63 &(nfp_prog)->insns != &next2->l; \
64 pos = nfp_meta_next(pos), \
65 next = nfp_meta_next(pos), \
66 next2 = nfp_meta_next(next))
69 nfp_meta_has_next(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
71 return meta
->l
.next
!= &nfp_prog
->insns
;
75 nfp_meta_has_prev(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
77 return meta
->l
.prev
!= &nfp_prog
->insns
;
80 static void nfp_prog_free(struct nfp_prog
*nfp_prog
)
82 struct nfp_insn_meta
*meta
, *tmp
;
84 list_for_each_entry_safe(meta
, tmp
, &nfp_prog
->insns
, l
) {
91 static void nfp_prog_push(struct nfp_prog
*nfp_prog
, u64 insn
)
93 if (nfp_prog
->__prog_alloc_len
== nfp_prog
->prog_len
) {
94 nfp_prog
->error
= -ENOSPC
;
98 nfp_prog
->prog
[nfp_prog
->prog_len
] = insn
;
102 static unsigned int nfp_prog_current_offset(struct nfp_prog
*nfp_prog
)
104 return nfp_prog
->start_off
+ nfp_prog
->prog_len
;
108 nfp_prog_offset_to_index(struct nfp_prog
*nfp_prog
, unsigned int offset
)
110 return offset
- nfp_prog
->start_off
;
113 /* --- Emitters --- */
115 __emit_cmd(struct nfp_prog
*nfp_prog
, enum cmd_tgt_map op
,
116 u8 mode
, u8 xfer
, u8 areg
, u8 breg
, u8 size
, bool sync
)
118 enum cmd_ctx_swap ctx
;
124 ctx
= CMD_CTX_NO_SWAP
;
126 insn
= FIELD_PREP(OP_CMD_A_SRC
, areg
) |
127 FIELD_PREP(OP_CMD_CTX
, ctx
) |
128 FIELD_PREP(OP_CMD_B_SRC
, breg
) |
129 FIELD_PREP(OP_CMD_TOKEN
, cmd_tgt_act
[op
].token
) |
130 FIELD_PREP(OP_CMD_XFER
, xfer
) |
131 FIELD_PREP(OP_CMD_CNT
, size
) |
132 FIELD_PREP(OP_CMD_SIG
, sync
) |
133 FIELD_PREP(OP_CMD_TGT_CMD
, cmd_tgt_act
[op
].tgt_cmd
) |
134 FIELD_PREP(OP_CMD_MODE
, mode
);
136 nfp_prog_push(nfp_prog
, insn
);
140 emit_cmd(struct nfp_prog
*nfp_prog
, enum cmd_tgt_map op
,
141 u8 mode
, u8 xfer
, swreg lreg
, swreg rreg
, u8 size
, bool sync
)
143 struct nfp_insn_re_regs reg
;
146 err
= swreg_to_restricted(reg_none(), lreg
, rreg
, ®
, false);
148 nfp_prog
->error
= err
;
152 pr_err("cmd can't swap arguments\n");
153 nfp_prog
->error
= -EFAULT
;
156 if (reg
.dst_lmextn
|| reg
.src_lmextn
) {
157 pr_err("cmd can't use LMextn\n");
158 nfp_prog
->error
= -EFAULT
;
162 __emit_cmd(nfp_prog
, op
, mode
, xfer
, reg
.areg
, reg
.breg
, size
, sync
);
166 __emit_br(struct nfp_prog
*nfp_prog
, enum br_mask mask
, enum br_ev_pip ev_pip
,
167 enum br_ctx_signal_state css
, u16 addr
, u8 defer
)
169 u16 addr_lo
, addr_hi
;
172 addr_lo
= addr
& (OP_BR_ADDR_LO
>> __bf_shf(OP_BR_ADDR_LO
));
173 addr_hi
= addr
!= addr_lo
;
176 FIELD_PREP(OP_BR_MASK
, mask
) |
177 FIELD_PREP(OP_BR_EV_PIP
, ev_pip
) |
178 FIELD_PREP(OP_BR_CSS
, css
) |
179 FIELD_PREP(OP_BR_DEFBR
, defer
) |
180 FIELD_PREP(OP_BR_ADDR_LO
, addr_lo
) |
181 FIELD_PREP(OP_BR_ADDR_HI
, addr_hi
);
183 nfp_prog_push(nfp_prog
, insn
);
186 static void emit_br_def(struct nfp_prog
*nfp_prog
, u16 addr
, u8 defer
)
189 pr_err("BUG: branch defer out of bounds %d\n", defer
);
190 nfp_prog
->error
= -EFAULT
;
193 __emit_br(nfp_prog
, BR_UNC
, BR_EV_PIP_UNCOND
, BR_CSS_NONE
, addr
, defer
);
197 emit_br(struct nfp_prog
*nfp_prog
, enum br_mask mask
, u16 addr
, u8 defer
)
199 __emit_br(nfp_prog
, mask
,
200 mask
!= BR_UNC
? BR_EV_PIP_COND
: BR_EV_PIP_UNCOND
,
201 BR_CSS_NONE
, addr
, defer
);
205 __emit_br_byte(struct nfp_prog
*nfp_prog
, u8 areg
, u8 breg
, bool imm8
,
206 u8 byte
, bool equal
, u16 addr
, u8 defer
, bool src_lmextn
)
208 u16 addr_lo
, addr_hi
;
211 addr_lo
= addr
& (OP_BB_ADDR_LO
>> __bf_shf(OP_BB_ADDR_LO
));
212 addr_hi
= addr
!= addr_lo
;
214 insn
= OP_BBYTE_BASE
|
215 FIELD_PREP(OP_BB_A_SRC
, areg
) |
216 FIELD_PREP(OP_BB_BYTE
, byte
) |
217 FIELD_PREP(OP_BB_B_SRC
, breg
) |
218 FIELD_PREP(OP_BB_I8
, imm8
) |
219 FIELD_PREP(OP_BB_EQ
, equal
) |
220 FIELD_PREP(OP_BB_DEFBR
, defer
) |
221 FIELD_PREP(OP_BB_ADDR_LO
, addr_lo
) |
222 FIELD_PREP(OP_BB_ADDR_HI
, addr_hi
) |
223 FIELD_PREP(OP_BB_SRC_LMEXTN
, src_lmextn
);
225 nfp_prog_push(nfp_prog
, insn
);
229 emit_br_byte_neq(struct nfp_prog
*nfp_prog
,
230 swreg src
, u8 imm
, u8 byte
, u16 addr
, u8 defer
)
232 struct nfp_insn_re_regs reg
;
235 err
= swreg_to_restricted(reg_none(), src
, reg_imm(imm
), ®
, true);
237 nfp_prog
->error
= err
;
241 __emit_br_byte(nfp_prog
, reg
.areg
, reg
.breg
, reg
.i8
, byte
, false, addr
,
242 defer
, reg
.src_lmextn
);
246 __emit_immed(struct nfp_prog
*nfp_prog
, u16 areg
, u16 breg
, u16 imm_hi
,
247 enum immed_width width
, bool invert
,
248 enum immed_shift shift
, bool wr_both
,
249 bool dst_lmextn
, bool src_lmextn
)
253 insn
= OP_IMMED_BASE
|
254 FIELD_PREP(OP_IMMED_A_SRC
, areg
) |
255 FIELD_PREP(OP_IMMED_B_SRC
, breg
) |
256 FIELD_PREP(OP_IMMED_IMM
, imm_hi
) |
257 FIELD_PREP(OP_IMMED_WIDTH
, width
) |
258 FIELD_PREP(OP_IMMED_INV
, invert
) |
259 FIELD_PREP(OP_IMMED_SHIFT
, shift
) |
260 FIELD_PREP(OP_IMMED_WR_AB
, wr_both
) |
261 FIELD_PREP(OP_IMMED_SRC_LMEXTN
, src_lmextn
) |
262 FIELD_PREP(OP_IMMED_DST_LMEXTN
, dst_lmextn
);
264 nfp_prog_push(nfp_prog
, insn
);
268 emit_immed(struct nfp_prog
*nfp_prog
, swreg dst
, u16 imm
,
269 enum immed_width width
, bool invert
, enum immed_shift shift
)
271 struct nfp_insn_ur_regs reg
;
274 if (swreg_type(dst
) == NN_REG_IMM
) {
275 nfp_prog
->error
= -EFAULT
;
279 err
= swreg_to_unrestricted(dst
, dst
, reg_imm(imm
& 0xff), ®
);
281 nfp_prog
->error
= err
;
285 __emit_immed(nfp_prog
, reg
.areg
, reg
.breg
, imm
>> 8, width
,
286 invert
, shift
, reg
.wr_both
,
287 reg
.dst_lmextn
, reg
.src_lmextn
);
291 __emit_shf(struct nfp_prog
*nfp_prog
, u16 dst
, enum alu_dst_ab dst_ab
,
292 enum shf_sc sc
, u8 shift
,
293 u16 areg
, enum shf_op op
, u16 breg
, bool i8
, bool sw
, bool wr_both
,
294 bool dst_lmextn
, bool src_lmextn
)
298 if (!FIELD_FIT(OP_SHF_SHIFT
, shift
)) {
299 nfp_prog
->error
= -EFAULT
;
303 if (sc
== SHF_SC_L_SHF
)
307 FIELD_PREP(OP_SHF_A_SRC
, areg
) |
308 FIELD_PREP(OP_SHF_SC
, sc
) |
309 FIELD_PREP(OP_SHF_B_SRC
, breg
) |
310 FIELD_PREP(OP_SHF_I8
, i8
) |
311 FIELD_PREP(OP_SHF_SW
, sw
) |
312 FIELD_PREP(OP_SHF_DST
, dst
) |
313 FIELD_PREP(OP_SHF_SHIFT
, shift
) |
314 FIELD_PREP(OP_SHF_OP
, op
) |
315 FIELD_PREP(OP_SHF_DST_AB
, dst_ab
) |
316 FIELD_PREP(OP_SHF_WR_AB
, wr_both
) |
317 FIELD_PREP(OP_SHF_SRC_LMEXTN
, src_lmextn
) |
318 FIELD_PREP(OP_SHF_DST_LMEXTN
, dst_lmextn
);
320 nfp_prog_push(nfp_prog
, insn
);
324 emit_shf(struct nfp_prog
*nfp_prog
, swreg dst
,
325 swreg lreg
, enum shf_op op
, swreg rreg
, enum shf_sc sc
, u8 shift
)
327 struct nfp_insn_re_regs reg
;
330 err
= swreg_to_restricted(dst
, lreg
, rreg
, ®
, true);
332 nfp_prog
->error
= err
;
336 __emit_shf(nfp_prog
, reg
.dst
, reg
.dst_ab
, sc
, shift
,
337 reg
.areg
, op
, reg
.breg
, reg
.i8
, reg
.swap
, reg
.wr_both
,
338 reg
.dst_lmextn
, reg
.src_lmextn
);
342 __emit_alu(struct nfp_prog
*nfp_prog
, u16 dst
, enum alu_dst_ab dst_ab
,
343 u16 areg
, enum alu_op op
, u16 breg
, bool swap
, bool wr_both
,
344 bool dst_lmextn
, bool src_lmextn
)
349 FIELD_PREP(OP_ALU_A_SRC
, areg
) |
350 FIELD_PREP(OP_ALU_B_SRC
, breg
) |
351 FIELD_PREP(OP_ALU_DST
, dst
) |
352 FIELD_PREP(OP_ALU_SW
, swap
) |
353 FIELD_PREP(OP_ALU_OP
, op
) |
354 FIELD_PREP(OP_ALU_DST_AB
, dst_ab
) |
355 FIELD_PREP(OP_ALU_WR_AB
, wr_both
) |
356 FIELD_PREP(OP_ALU_SRC_LMEXTN
, src_lmextn
) |
357 FIELD_PREP(OP_ALU_DST_LMEXTN
, dst_lmextn
);
359 nfp_prog_push(nfp_prog
, insn
);
363 emit_alu(struct nfp_prog
*nfp_prog
, swreg dst
,
364 swreg lreg
, enum alu_op op
, swreg rreg
)
366 struct nfp_insn_ur_regs reg
;
369 err
= swreg_to_unrestricted(dst
, lreg
, rreg
, ®
);
371 nfp_prog
->error
= err
;
375 __emit_alu(nfp_prog
, reg
.dst
, reg
.dst_ab
,
376 reg
.areg
, op
, reg
.breg
, reg
.swap
, reg
.wr_both
,
377 reg
.dst_lmextn
, reg
.src_lmextn
);
381 __emit_ld_field(struct nfp_prog
*nfp_prog
, enum shf_sc sc
,
382 u8 areg
, u8 bmask
, u8 breg
, u8 shift
, bool imm8
,
383 bool zero
, bool swap
, bool wr_both
,
384 bool dst_lmextn
, bool src_lmextn
)
389 FIELD_PREP(OP_LDF_A_SRC
, areg
) |
390 FIELD_PREP(OP_LDF_SC
, sc
) |
391 FIELD_PREP(OP_LDF_B_SRC
, breg
) |
392 FIELD_PREP(OP_LDF_I8
, imm8
) |
393 FIELD_PREP(OP_LDF_SW
, swap
) |
394 FIELD_PREP(OP_LDF_ZF
, zero
) |
395 FIELD_PREP(OP_LDF_BMASK
, bmask
) |
396 FIELD_PREP(OP_LDF_SHF
, shift
) |
397 FIELD_PREP(OP_LDF_WR_AB
, wr_both
) |
398 FIELD_PREP(OP_LDF_SRC_LMEXTN
, src_lmextn
) |
399 FIELD_PREP(OP_LDF_DST_LMEXTN
, dst_lmextn
);
401 nfp_prog_push(nfp_prog
, insn
);
405 emit_ld_field_any(struct nfp_prog
*nfp_prog
, swreg dst
, u8 bmask
, swreg src
,
406 enum shf_sc sc
, u8 shift
, bool zero
)
408 struct nfp_insn_re_regs reg
;
411 /* Note: ld_field is special as it uses one of the src regs as dst */
412 err
= swreg_to_restricted(dst
, dst
, src
, ®
, true);
414 nfp_prog
->error
= err
;
418 __emit_ld_field(nfp_prog
, sc
, reg
.areg
, bmask
, reg
.breg
, shift
,
419 reg
.i8
, zero
, reg
.swap
, reg
.wr_both
,
420 reg
.dst_lmextn
, reg
.src_lmextn
);
424 emit_ld_field(struct nfp_prog
*nfp_prog
, swreg dst
, u8 bmask
, swreg src
,
425 enum shf_sc sc
, u8 shift
)
427 emit_ld_field_any(nfp_prog
, dst
, bmask
, src
, sc
, shift
, false);
430 static void emit_nop(struct nfp_prog
*nfp_prog
)
432 __emit_immed(nfp_prog
, UR_REG_IMM
, UR_REG_IMM
, 0, 0, 0, 0, 0, 0, 0);
435 /* --- Wrappers --- */
436 static bool pack_immed(u32 imm
, u16
*val
, enum immed_shift
*shift
)
438 if (!(imm
& 0xffff0000)) {
440 *shift
= IMMED_SHIFT_0B
;
441 } else if (!(imm
& 0xff0000ff)) {
443 *shift
= IMMED_SHIFT_1B
;
444 } else if (!(imm
& 0x0000ffff)) {
446 *shift
= IMMED_SHIFT_2B
;
454 static void wrp_immed(struct nfp_prog
*nfp_prog
, swreg dst
, u32 imm
)
456 enum immed_shift shift
;
459 if (pack_immed(imm
, &val
, &shift
)) {
460 emit_immed(nfp_prog
, dst
, val
, IMMED_WIDTH_ALL
, false, shift
);
461 } else if (pack_immed(~imm
, &val
, &shift
)) {
462 emit_immed(nfp_prog
, dst
, val
, IMMED_WIDTH_ALL
, true, shift
);
464 emit_immed(nfp_prog
, dst
, imm
& 0xffff, IMMED_WIDTH_ALL
,
465 false, IMMED_SHIFT_0B
);
466 emit_immed(nfp_prog
, dst
, imm
>> 16, IMMED_WIDTH_WORD
,
467 false, IMMED_SHIFT_2B
);
471 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
472 * If the @imm is small enough encode it directly in operand and return
473 * otherwise load @imm to a spare register and return its encoding.
475 static swreg
ur_load_imm_any(struct nfp_prog
*nfp_prog
, u32 imm
, swreg tmp_reg
)
477 if (FIELD_FIT(UR_REG_IMM_MAX
, imm
))
480 wrp_immed(nfp_prog
, tmp_reg
, imm
);
484 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
485 * If the @imm is small enough encode it directly in operand and return
486 * otherwise load @imm to a spare register and return its encoding.
488 static swreg
re_load_imm_any(struct nfp_prog
*nfp_prog
, u32 imm
, swreg tmp_reg
)
490 if (FIELD_FIT(RE_REG_IMM_MAX
, imm
))
493 wrp_immed(nfp_prog
, tmp_reg
, imm
);
498 wrp_br_special(struct nfp_prog
*nfp_prog
, enum br_mask mask
,
499 enum br_special special
)
501 emit_br(nfp_prog
, mask
, 0, 0);
503 nfp_prog
->prog
[nfp_prog
->prog_len
- 1] |=
504 FIELD_PREP(OP_BR_SPECIAL
, special
);
507 static void wrp_mov(struct nfp_prog
*nfp_prog
, swreg dst
, swreg src
)
509 emit_alu(nfp_prog
, dst
, reg_none(), ALU_OP_NONE
, src
);
512 static void wrp_reg_mov(struct nfp_prog
*nfp_prog
, u16 dst
, u16 src
)
514 wrp_mov(nfp_prog
, reg_both(dst
), reg_b(src
));
518 data_ld(struct nfp_prog
*nfp_prog
, swreg offset
, u8 dst_gpr
, int size
)
523 /* We load the value from the address indicated in @offset and then
524 * shift out the data we don't need. Note: this is big endian!
527 shift
= size
< 4 ? 4 - size
: 0;
529 emit_cmd(nfp_prog
, CMD_TGT_READ8
, CMD_MODE_32b
, 0,
530 pptr_reg(nfp_prog
), offset
, sz
- 1, true);
534 emit_shf(nfp_prog
, reg_both(dst_gpr
), reg_none(), SHF_OP_NONE
,
535 reg_xfer(0), SHF_SC_R_SHF
, shift
* 8);
537 for (; i
* 4 < size
; i
++)
538 wrp_mov(nfp_prog
, reg_both(dst_gpr
+ i
), reg_xfer(i
));
541 wrp_immed(nfp_prog
, reg_both(dst_gpr
+ 1), 0);
547 data_ld_host_order(struct nfp_prog
*nfp_prog
, u8 src_gpr
, swreg offset
,
548 u8 dst_gpr
, int size
)
553 /* We load the value from the address indicated in @offset and then
554 * mask out the data we don't need. Note: this is little endian!
557 mask
= size
< 4 ? GENMASK(size
- 1, 0) : 0;
559 emit_cmd(nfp_prog
, CMD_TGT_READ32_SWAP
, CMD_MODE_32b
, 0,
560 reg_a(src_gpr
), offset
, sz
/ 4 - 1, true);
564 emit_ld_field_any(nfp_prog
, reg_both(dst_gpr
), mask
,
565 reg_xfer(0), SHF_SC_NONE
, 0, true);
567 for (; i
* 4 < size
; i
++)
568 wrp_mov(nfp_prog
, reg_both(dst_gpr
+ i
), reg_xfer(i
));
571 wrp_immed(nfp_prog
, reg_both(dst_gpr
+ 1), 0);
577 construct_data_ind_ld(struct nfp_prog
*nfp_prog
, u16 offset
, u16 src
, u8 size
)
581 /* Calculate the true offset (src_reg + imm) */
582 tmp_reg
= ur_load_imm_any(nfp_prog
, offset
, imm_b(nfp_prog
));
583 emit_alu(nfp_prog
, imm_both(nfp_prog
), reg_a(src
), ALU_OP_ADD
, tmp_reg
);
585 /* Check packet length (size guaranteed to fit b/c it's u8) */
586 emit_alu(nfp_prog
, imm_a(nfp_prog
),
587 imm_a(nfp_prog
), ALU_OP_ADD
, reg_imm(size
));
588 emit_alu(nfp_prog
, reg_none(),
589 plen_reg(nfp_prog
), ALU_OP_SUB
, imm_a(nfp_prog
));
590 wrp_br_special(nfp_prog
, BR_BLO
, OP_BR_GO_ABORT
);
593 return data_ld(nfp_prog
, imm_b(nfp_prog
), 0, size
);
596 static int construct_data_ld(struct nfp_prog
*nfp_prog
, u16 offset
, u8 size
)
600 /* Check packet length */
601 tmp_reg
= ur_load_imm_any(nfp_prog
, offset
+ size
, imm_a(nfp_prog
));
602 emit_alu(nfp_prog
, reg_none(), plen_reg(nfp_prog
), ALU_OP_SUB
, tmp_reg
);
603 wrp_br_special(nfp_prog
, BR_BLO
, OP_BR_GO_ABORT
);
606 tmp_reg
= re_load_imm_any(nfp_prog
, offset
, imm_b(nfp_prog
));
607 return data_ld(nfp_prog
, tmp_reg
, 0, size
);
611 wrp_alu_imm(struct nfp_prog
*nfp_prog
, u8 dst
, enum alu_op alu_op
, u32 imm
)
615 if (alu_op
== ALU_OP_AND
) {
617 wrp_immed(nfp_prog
, reg_both(dst
), 0);
621 if (alu_op
== ALU_OP_OR
) {
623 wrp_immed(nfp_prog
, reg_both(dst
), ~0U);
627 if (alu_op
== ALU_OP_XOR
) {
629 emit_alu(nfp_prog
, reg_both(dst
), reg_none(),
630 ALU_OP_NEG
, reg_b(dst
));
635 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
, imm_b(nfp_prog
));
636 emit_alu(nfp_prog
, reg_both(dst
), reg_a(dst
), alu_op
, tmp_reg
);
640 wrp_alu64_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
641 enum alu_op alu_op
, bool skip
)
643 const struct bpf_insn
*insn
= &meta
->insn
;
644 u64 imm
= insn
->imm
; /* sign extend */
651 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, alu_op
, imm
& ~0U);
652 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2 + 1, alu_op
, imm
>> 32);
658 wrp_alu64_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
661 u8 dst
= meta
->insn
.dst_reg
* 2, src
= meta
->insn
.src_reg
* 2;
663 emit_alu(nfp_prog
, reg_both(dst
), reg_a(dst
), alu_op
, reg_b(src
));
664 emit_alu(nfp_prog
, reg_both(dst
+ 1),
665 reg_a(dst
+ 1), alu_op
, reg_b(src
+ 1));
671 wrp_alu32_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
672 enum alu_op alu_op
, bool skip
)
674 const struct bpf_insn
*insn
= &meta
->insn
;
681 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, alu_op
, insn
->imm
);
682 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
688 wrp_alu32_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
691 u8 dst
= meta
->insn
.dst_reg
* 2, src
= meta
->insn
.src_reg
* 2;
693 emit_alu(nfp_prog
, reg_both(dst
), reg_a(dst
), alu_op
, reg_b(src
));
694 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), 0);
700 wrp_test_reg_one(struct nfp_prog
*nfp_prog
, u8 dst
, enum alu_op alu_op
, u8 src
,
701 enum br_mask br_mask
, u16 off
)
703 emit_alu(nfp_prog
, reg_none(), reg_a(dst
), alu_op
, reg_b(src
));
704 emit_br(nfp_prog
, br_mask
, off
, 0);
708 wrp_test_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
709 enum alu_op alu_op
, enum br_mask br_mask
)
711 const struct bpf_insn
*insn
= &meta
->insn
;
713 if (insn
->off
< 0) /* TODO */
716 wrp_test_reg_one(nfp_prog
, insn
->dst_reg
* 2, alu_op
,
717 insn
->src_reg
* 2, br_mask
, insn
->off
);
718 wrp_test_reg_one(nfp_prog
, insn
->dst_reg
* 2 + 1, alu_op
,
719 insn
->src_reg
* 2 + 1, br_mask
, insn
->off
);
725 wrp_cmp_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
726 enum br_mask br_mask
, bool swap
)
728 const struct bpf_insn
*insn
= &meta
->insn
;
729 u64 imm
= insn
->imm
; /* sign extend */
730 u8 reg
= insn
->dst_reg
* 2;
733 if (insn
->off
< 0) /* TODO */
736 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
738 emit_alu(nfp_prog
, reg_none(), reg_a(reg
), ALU_OP_SUB
, tmp_reg
);
740 emit_alu(nfp_prog
, reg_none(), tmp_reg
, ALU_OP_SUB
, reg_a(reg
));
742 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
744 emit_alu(nfp_prog
, reg_none(),
745 reg_a(reg
+ 1), ALU_OP_SUB_C
, tmp_reg
);
747 emit_alu(nfp_prog
, reg_none(),
748 tmp_reg
, ALU_OP_SUB_C
, reg_a(reg
+ 1));
750 emit_br(nfp_prog
, br_mask
, insn
->off
, 0);
756 wrp_cmp_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
757 enum br_mask br_mask
, bool swap
)
759 const struct bpf_insn
*insn
= &meta
->insn
;
762 areg
= insn
->dst_reg
* 2;
763 breg
= insn
->src_reg
* 2;
765 if (insn
->off
< 0) /* TODO */
774 emit_alu(nfp_prog
, reg_none(), reg_a(areg
), ALU_OP_SUB
, reg_b(breg
));
775 emit_alu(nfp_prog
, reg_none(),
776 reg_a(areg
+ 1), ALU_OP_SUB_C
, reg_b(breg
+ 1));
777 emit_br(nfp_prog
, br_mask
, insn
->off
, 0);
782 static void wrp_end32(struct nfp_prog
*nfp_prog
, swreg reg_in
, u8 gpr_out
)
784 emit_ld_field(nfp_prog
, reg_both(gpr_out
), 0xf, reg_in
,
786 emit_ld_field(nfp_prog
, reg_both(gpr_out
), 0x5, reg_a(gpr_out
),
790 /* --- Callbacks --- */
791 static int mov_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
793 const struct bpf_insn
*insn
= &meta
->insn
;
795 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2, insn
->src_reg
* 2);
796 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2 + 1, insn
->src_reg
* 2 + 1);
801 static int mov_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
803 u64 imm
= meta
->insn
.imm
; /* sign extend */
805 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2), imm
& ~0U);
806 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), imm
>> 32);
811 static int xor_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
813 return wrp_alu64_reg(nfp_prog
, meta
, ALU_OP_XOR
);
816 static int xor_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
818 return wrp_alu64_imm(nfp_prog
, meta
, ALU_OP_XOR
, !meta
->insn
.imm
);
821 static int and_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
823 return wrp_alu64_reg(nfp_prog
, meta
, ALU_OP_AND
);
826 static int and_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
828 return wrp_alu64_imm(nfp_prog
, meta
, ALU_OP_AND
, !~meta
->insn
.imm
);
831 static int or_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
833 return wrp_alu64_reg(nfp_prog
, meta
, ALU_OP_OR
);
836 static int or_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
838 return wrp_alu64_imm(nfp_prog
, meta
, ALU_OP_OR
, !meta
->insn
.imm
);
841 static int add_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
843 const struct bpf_insn
*insn
= &meta
->insn
;
845 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2),
846 reg_a(insn
->dst_reg
* 2), ALU_OP_ADD
,
847 reg_b(insn
->src_reg
* 2));
848 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1),
849 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_ADD_C
,
850 reg_b(insn
->src_reg
* 2 + 1));
855 static int add_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
857 const struct bpf_insn
*insn
= &meta
->insn
;
858 u64 imm
= insn
->imm
; /* sign extend */
860 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, ALU_OP_ADD
, imm
& ~0U);
861 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2 + 1, ALU_OP_ADD_C
, imm
>> 32);
866 static int sub_reg64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
868 const struct bpf_insn
*insn
= &meta
->insn
;
870 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2),
871 reg_a(insn
->dst_reg
* 2), ALU_OP_SUB
,
872 reg_b(insn
->src_reg
* 2));
873 emit_alu(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1),
874 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_SUB_C
,
875 reg_b(insn
->src_reg
* 2 + 1));
880 static int sub_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
882 const struct bpf_insn
*insn
= &meta
->insn
;
883 u64 imm
= insn
->imm
; /* sign extend */
885 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2, ALU_OP_SUB
, imm
& ~0U);
886 wrp_alu_imm(nfp_prog
, insn
->dst_reg
* 2 + 1, ALU_OP_SUB_C
, imm
>> 32);
891 static int shl_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
893 const struct bpf_insn
*insn
= &meta
->insn
;
894 u8 dst
= insn
->dst_reg
* 2;
896 if (insn
->imm
< 32) {
897 emit_shf(nfp_prog
, reg_both(dst
+ 1),
898 reg_a(dst
+ 1), SHF_OP_NONE
, reg_b(dst
),
899 SHF_SC_R_DSHF
, 32 - insn
->imm
);
900 emit_shf(nfp_prog
, reg_both(dst
),
901 reg_none(), SHF_OP_NONE
, reg_b(dst
),
902 SHF_SC_L_SHF
, insn
->imm
);
903 } else if (insn
->imm
== 32) {
904 wrp_reg_mov(nfp_prog
, dst
+ 1, dst
);
905 wrp_immed(nfp_prog
, reg_both(dst
), 0);
906 } else if (insn
->imm
> 32) {
907 emit_shf(nfp_prog
, reg_both(dst
+ 1),
908 reg_none(), SHF_OP_NONE
, reg_b(dst
),
909 SHF_SC_L_SHF
, insn
->imm
- 32);
910 wrp_immed(nfp_prog
, reg_both(dst
), 0);
916 static int shr_imm64(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
918 const struct bpf_insn
*insn
= &meta
->insn
;
919 u8 dst
= insn
->dst_reg
* 2;
921 if (insn
->imm
< 32) {
922 emit_shf(nfp_prog
, reg_both(dst
),
923 reg_a(dst
+ 1), SHF_OP_NONE
, reg_b(dst
),
924 SHF_SC_R_DSHF
, insn
->imm
);
925 emit_shf(nfp_prog
, reg_both(dst
+ 1),
926 reg_none(), SHF_OP_NONE
, reg_b(dst
+ 1),
927 SHF_SC_R_SHF
, insn
->imm
);
928 } else if (insn
->imm
== 32) {
929 wrp_reg_mov(nfp_prog
, dst
, dst
+ 1);
930 wrp_immed(nfp_prog
, reg_both(dst
+ 1), 0);
931 } else if (insn
->imm
> 32) {
932 emit_shf(nfp_prog
, reg_both(dst
),
933 reg_none(), SHF_OP_NONE
, reg_b(dst
+ 1),
934 SHF_SC_R_SHF
, insn
->imm
- 32);
935 wrp_immed(nfp_prog
, reg_both(dst
+ 1), 0);
941 static int mov_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
943 const struct bpf_insn
*insn
= &meta
->insn
;
945 wrp_reg_mov(nfp_prog
, insn
->dst_reg
* 2, insn
->src_reg
* 2);
946 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
951 static int mov_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
953 const struct bpf_insn
*insn
= &meta
->insn
;
955 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2), insn
->imm
);
956 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
961 static int xor_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
963 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_XOR
);
966 static int xor_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
968 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_XOR
, !~meta
->insn
.imm
);
971 static int and_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
973 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_AND
);
976 static int and_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
978 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_AND
, !~meta
->insn
.imm
);
981 static int or_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
983 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_OR
);
986 static int or_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
988 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_OR
, !meta
->insn
.imm
);
991 static int add_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
993 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_ADD
);
996 static int add_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
998 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_ADD
, !meta
->insn
.imm
);
1001 static int sub_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1003 return wrp_alu32_reg(nfp_prog
, meta
, ALU_OP_SUB
);
1006 static int sub_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1008 return wrp_alu32_imm(nfp_prog
, meta
, ALU_OP_SUB
, !meta
->insn
.imm
);
1011 static int shl_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1013 const struct bpf_insn
*insn
= &meta
->insn
;
1016 return 1; /* TODO: zero shift means indirect */
1018 emit_shf(nfp_prog
, reg_both(insn
->dst_reg
* 2),
1019 reg_none(), SHF_OP_NONE
, reg_b(insn
->dst_reg
* 2),
1020 SHF_SC_L_SHF
, insn
->imm
);
1021 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2 + 1), 0);
1026 static int end_reg32(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1028 const struct bpf_insn
*insn
= &meta
->insn
;
1029 u8 gpr
= insn
->dst_reg
* 2;
1031 switch (insn
->imm
) {
1033 emit_ld_field(nfp_prog
, reg_both(gpr
), 0x9, reg_b(gpr
),
1035 emit_ld_field(nfp_prog
, reg_both(gpr
), 0xe, reg_a(gpr
),
1038 wrp_immed(nfp_prog
, reg_both(gpr
+ 1), 0);
1041 wrp_end32(nfp_prog
, reg_a(gpr
), gpr
);
1042 wrp_immed(nfp_prog
, reg_both(gpr
+ 1), 0);
1045 wrp_mov(nfp_prog
, imm_a(nfp_prog
), reg_b(gpr
+ 1));
1047 wrp_end32(nfp_prog
, reg_a(gpr
), gpr
+ 1);
1048 wrp_end32(nfp_prog
, imm_a(nfp_prog
), gpr
);
1055 static int imm_ld8_part2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1057 wrp_immed(nfp_prog
, reg_both(nfp_meta_prev(meta
)->insn
.dst_reg
* 2 + 1),
1063 static int imm_ld8(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1065 const struct bpf_insn
*insn
= &meta
->insn
;
1067 meta
->double_cb
= imm_ld8_part2
;
1068 wrp_immed(nfp_prog
, reg_both(insn
->dst_reg
* 2), insn
->imm
);
1073 static int data_ld1(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1075 return construct_data_ld(nfp_prog
, meta
->insn
.imm
, 1);
1078 static int data_ld2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1080 return construct_data_ld(nfp_prog
, meta
->insn
.imm
, 2);
1083 static int data_ld4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1085 return construct_data_ld(nfp_prog
, meta
->insn
.imm
, 4);
1088 static int data_ind_ld1(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1090 return construct_data_ind_ld(nfp_prog
, meta
->insn
.imm
,
1091 meta
->insn
.src_reg
* 2, 1);
1094 static int data_ind_ld2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1096 return construct_data_ind_ld(nfp_prog
, meta
->insn
.imm
,
1097 meta
->insn
.src_reg
* 2, 2);
1100 static int data_ind_ld4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1102 return construct_data_ind_ld(nfp_prog
, meta
->insn
.imm
,
1103 meta
->insn
.src_reg
* 2, 4);
1106 static int mem_ldx_skb(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
1109 switch (meta
->insn
.off
) {
1110 case offsetof(struct sk_buff
, len
):
1111 if (size
!= FIELD_SIZEOF(struct sk_buff
, len
))
1114 reg_both(meta
->insn
.dst_reg
* 2), plen_reg(nfp_prog
));
1120 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), 0);
1125 static int mem_ldx_xdp(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
1128 swreg dst
= reg_both(meta
->insn
.dst_reg
* 2);
1130 if (size
!= sizeof(void *))
1133 switch (meta
->insn
.off
) {
1134 case offsetof(struct xdp_buff
, data
):
1135 wrp_mov(nfp_prog
, dst
, pptr_reg(nfp_prog
));
1137 case offsetof(struct xdp_buff
, data_end
):
1138 emit_alu(nfp_prog
, dst
,
1139 plen_reg(nfp_prog
), ALU_OP_ADD
, pptr_reg(nfp_prog
));
1145 wrp_immed(nfp_prog
, reg_both(meta
->insn
.dst_reg
* 2 + 1), 0);
1151 mem_ldx_data(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
1156 tmp_reg
= re_load_imm_any(nfp_prog
, meta
->insn
.off
, imm_b(nfp_prog
));
1158 return data_ld_host_order(nfp_prog
, meta
->insn
.src_reg
* 2, tmp_reg
,
1159 meta
->insn
.dst_reg
* 2, size
);
1163 mem_ldx(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
,
1166 if (meta
->ptr
.type
== PTR_TO_CTX
) {
1167 if (nfp_prog
->act
== NN_ACT_XDP
)
1168 return mem_ldx_xdp(nfp_prog
, meta
, size
);
1170 return mem_ldx_skb(nfp_prog
, meta
, size
);
1173 if (meta
->ptr
.type
== PTR_TO_PACKET
)
1174 return mem_ldx_data(nfp_prog
, meta
, size
);
1179 static int mem_ldx1(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1181 return mem_ldx(nfp_prog
, meta
, 1);
1184 static int mem_ldx2(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1186 return mem_ldx(nfp_prog
, meta
, 2);
1189 static int mem_ldx4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1191 return mem_ldx(nfp_prog
, meta
, 4);
1194 static int mem_ldx8(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1196 return mem_ldx(nfp_prog
, meta
, 8);
1199 static int mem_stx4_skb(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1204 static int mem_stx4_xdp(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1209 static int mem_stx4(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1211 if (meta
->ptr
.type
== PTR_TO_PACKET
)
1214 if (nfp_prog
->act
== NN_ACT_XDP
)
1215 return mem_stx4_xdp(nfp_prog
, meta
);
1216 return mem_stx4_skb(nfp_prog
, meta
);
1219 static int jump(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1221 if (meta
->insn
.off
< 0) /* TODO */
1223 emit_br(nfp_prog
, BR_UNC
, meta
->insn
.off
, 0);
1228 static int jeq_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1230 const struct bpf_insn
*insn
= &meta
->insn
;
1231 u64 imm
= insn
->imm
; /* sign extend */
1232 swreg or1
, or2
, tmp_reg
;
1234 or1
= reg_a(insn
->dst_reg
* 2);
1235 or2
= reg_b(insn
->dst_reg
* 2 + 1);
1237 if (insn
->off
< 0) /* TODO */
1241 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
1242 emit_alu(nfp_prog
, imm_a(nfp_prog
),
1243 reg_a(insn
->dst_reg
* 2), ALU_OP_XOR
, tmp_reg
);
1244 or1
= imm_a(nfp_prog
);
1248 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
1249 emit_alu(nfp_prog
, imm_b(nfp_prog
),
1250 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_XOR
, tmp_reg
);
1251 or2
= imm_b(nfp_prog
);
1254 emit_alu(nfp_prog
, reg_none(), or1
, ALU_OP_OR
, or2
);
1255 emit_br(nfp_prog
, BR_BEQ
, insn
->off
, 0);
1260 static int jgt_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1262 return wrp_cmp_imm(nfp_prog
, meta
, BR_BLO
, true);
1265 static int jge_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1267 return wrp_cmp_imm(nfp_prog
, meta
, BR_BHS
, false);
1270 static int jlt_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1272 return wrp_cmp_imm(nfp_prog
, meta
, BR_BLO
, false);
1275 static int jle_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1277 return wrp_cmp_imm(nfp_prog
, meta
, BR_BHS
, true);
1280 static int jset_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1282 const struct bpf_insn
*insn
= &meta
->insn
;
1283 u64 imm
= insn
->imm
; /* sign extend */
1286 if (insn
->off
< 0) /* TODO */
1295 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
1296 emit_alu(nfp_prog
, reg_none(),
1297 reg_a(insn
->dst_reg
* 2), ALU_OP_AND
, tmp_reg
);
1298 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1302 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
1303 emit_alu(nfp_prog
, reg_none(),
1304 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_AND
, tmp_reg
);
1305 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1311 static int jne_imm(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1313 const struct bpf_insn
*insn
= &meta
->insn
;
1314 u64 imm
= insn
->imm
; /* sign extend */
1317 if (insn
->off
< 0) /* TODO */
1321 emit_alu(nfp_prog
, reg_none(), reg_a(insn
->dst_reg
* 2),
1322 ALU_OP_OR
, reg_b(insn
->dst_reg
* 2 + 1));
1323 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1327 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
& ~0U, imm_b(nfp_prog
));
1328 emit_alu(nfp_prog
, reg_none(),
1329 reg_a(insn
->dst_reg
* 2), ALU_OP_XOR
, tmp_reg
);
1330 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1332 tmp_reg
= ur_load_imm_any(nfp_prog
, imm
>> 32, imm_b(nfp_prog
));
1333 emit_alu(nfp_prog
, reg_none(),
1334 reg_a(insn
->dst_reg
* 2 + 1), ALU_OP_XOR
, tmp_reg
);
1335 emit_br(nfp_prog
, BR_BNE
, insn
->off
, 0);
1340 static int jeq_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1342 const struct bpf_insn
*insn
= &meta
->insn
;
1344 if (insn
->off
< 0) /* TODO */
1347 emit_alu(nfp_prog
, imm_a(nfp_prog
), reg_a(insn
->dst_reg
* 2),
1348 ALU_OP_XOR
, reg_b(insn
->src_reg
* 2));
1349 emit_alu(nfp_prog
, imm_b(nfp_prog
), reg_a(insn
->dst_reg
* 2 + 1),
1350 ALU_OP_XOR
, reg_b(insn
->src_reg
* 2 + 1));
1351 emit_alu(nfp_prog
, reg_none(),
1352 imm_a(nfp_prog
), ALU_OP_OR
, imm_b(nfp_prog
));
1353 emit_br(nfp_prog
, BR_BEQ
, insn
->off
, 0);
1358 static int jgt_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1360 return wrp_cmp_reg(nfp_prog
, meta
, BR_BLO
, true);
1363 static int jge_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1365 return wrp_cmp_reg(nfp_prog
, meta
, BR_BHS
, false);
1368 static int jlt_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1370 return wrp_cmp_reg(nfp_prog
, meta
, BR_BLO
, false);
1373 static int jle_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1375 return wrp_cmp_reg(nfp_prog
, meta
, BR_BHS
, true);
1378 static int jset_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1380 return wrp_test_reg(nfp_prog
, meta
, ALU_OP_AND
, BR_BNE
);
1383 static int jne_reg(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1385 return wrp_test_reg(nfp_prog
, meta
, ALU_OP_XOR
, BR_BNE
);
1388 static int goto_out(struct nfp_prog
*nfp_prog
, struct nfp_insn_meta
*meta
)
1390 wrp_br_special(nfp_prog
, BR_UNC
, OP_BR_GO_OUT
);
1395 static const instr_cb_t instr_cb
[256] = {
1396 [BPF_ALU64
| BPF_MOV
| BPF_X
] = mov_reg64
,
1397 [BPF_ALU64
| BPF_MOV
| BPF_K
] = mov_imm64
,
1398 [BPF_ALU64
| BPF_XOR
| BPF_X
] = xor_reg64
,
1399 [BPF_ALU64
| BPF_XOR
| BPF_K
] = xor_imm64
,
1400 [BPF_ALU64
| BPF_AND
| BPF_X
] = and_reg64
,
1401 [BPF_ALU64
| BPF_AND
| BPF_K
] = and_imm64
,
1402 [BPF_ALU64
| BPF_OR
| BPF_X
] = or_reg64
,
1403 [BPF_ALU64
| BPF_OR
| BPF_K
] = or_imm64
,
1404 [BPF_ALU64
| BPF_ADD
| BPF_X
] = add_reg64
,
1405 [BPF_ALU64
| BPF_ADD
| BPF_K
] = add_imm64
,
1406 [BPF_ALU64
| BPF_SUB
| BPF_X
] = sub_reg64
,
1407 [BPF_ALU64
| BPF_SUB
| BPF_K
] = sub_imm64
,
1408 [BPF_ALU64
| BPF_LSH
| BPF_K
] = shl_imm64
,
1409 [BPF_ALU64
| BPF_RSH
| BPF_K
] = shr_imm64
,
1410 [BPF_ALU
| BPF_MOV
| BPF_X
] = mov_reg
,
1411 [BPF_ALU
| BPF_MOV
| BPF_K
] = mov_imm
,
1412 [BPF_ALU
| BPF_XOR
| BPF_X
] = xor_reg
,
1413 [BPF_ALU
| BPF_XOR
| BPF_K
] = xor_imm
,
1414 [BPF_ALU
| BPF_AND
| BPF_X
] = and_reg
,
1415 [BPF_ALU
| BPF_AND
| BPF_K
] = and_imm
,
1416 [BPF_ALU
| BPF_OR
| BPF_X
] = or_reg
,
1417 [BPF_ALU
| BPF_OR
| BPF_K
] = or_imm
,
1418 [BPF_ALU
| BPF_ADD
| BPF_X
] = add_reg
,
1419 [BPF_ALU
| BPF_ADD
| BPF_K
] = add_imm
,
1420 [BPF_ALU
| BPF_SUB
| BPF_X
] = sub_reg
,
1421 [BPF_ALU
| BPF_SUB
| BPF_K
] = sub_imm
,
1422 [BPF_ALU
| BPF_LSH
| BPF_K
] = shl_imm
,
1423 [BPF_ALU
| BPF_END
| BPF_X
] = end_reg32
,
1424 [BPF_LD
| BPF_IMM
| BPF_DW
] = imm_ld8
,
1425 [BPF_LD
| BPF_ABS
| BPF_B
] = data_ld1
,
1426 [BPF_LD
| BPF_ABS
| BPF_H
] = data_ld2
,
1427 [BPF_LD
| BPF_ABS
| BPF_W
] = data_ld4
,
1428 [BPF_LD
| BPF_IND
| BPF_B
] = data_ind_ld1
,
1429 [BPF_LD
| BPF_IND
| BPF_H
] = data_ind_ld2
,
1430 [BPF_LD
| BPF_IND
| BPF_W
] = data_ind_ld4
,
1431 [BPF_LDX
| BPF_MEM
| BPF_B
] = mem_ldx1
,
1432 [BPF_LDX
| BPF_MEM
| BPF_H
] = mem_ldx2
,
1433 [BPF_LDX
| BPF_MEM
| BPF_W
] = mem_ldx4
,
1434 [BPF_LDX
| BPF_MEM
| BPF_DW
] = mem_ldx8
,
1435 [BPF_STX
| BPF_MEM
| BPF_W
] = mem_stx4
,
1436 [BPF_JMP
| BPF_JA
| BPF_K
] = jump
,
1437 [BPF_JMP
| BPF_JEQ
| BPF_K
] = jeq_imm
,
1438 [BPF_JMP
| BPF_JGT
| BPF_K
] = jgt_imm
,
1439 [BPF_JMP
| BPF_JGE
| BPF_K
] = jge_imm
,
1440 [BPF_JMP
| BPF_JLT
| BPF_K
] = jlt_imm
,
1441 [BPF_JMP
| BPF_JLE
| BPF_K
] = jle_imm
,
1442 [BPF_JMP
| BPF_JSET
| BPF_K
] = jset_imm
,
1443 [BPF_JMP
| BPF_JNE
| BPF_K
] = jne_imm
,
1444 [BPF_JMP
| BPF_JEQ
| BPF_X
] = jeq_reg
,
1445 [BPF_JMP
| BPF_JGT
| BPF_X
] = jgt_reg
,
1446 [BPF_JMP
| BPF_JGE
| BPF_X
] = jge_reg
,
1447 [BPF_JMP
| BPF_JLT
| BPF_X
] = jlt_reg
,
1448 [BPF_JMP
| BPF_JLE
| BPF_X
] = jle_reg
,
1449 [BPF_JMP
| BPF_JSET
| BPF_X
] = jset_reg
,
1450 [BPF_JMP
| BPF_JNE
| BPF_X
] = jne_reg
,
1451 [BPF_JMP
| BPF_EXIT
] = goto_out
,
1454 /* --- Misc code --- */
1455 static void br_set_offset(u64
*instr
, u16 offset
)
1457 u16 addr_lo
, addr_hi
;
1459 addr_lo
= offset
& (OP_BR_ADDR_LO
>> __bf_shf(OP_BR_ADDR_LO
));
1460 addr_hi
= offset
!= addr_lo
;
1461 *instr
&= ~(OP_BR_ADDR_HI
| OP_BR_ADDR_LO
);
1462 *instr
|= FIELD_PREP(OP_BR_ADDR_HI
, addr_hi
);
1463 *instr
|= FIELD_PREP(OP_BR_ADDR_LO
, addr_lo
);
1466 /* --- Assembler logic --- */
1467 static int nfp_fixup_branches(struct nfp_prog
*nfp_prog
)
1469 struct nfp_insn_meta
*meta
, *next
;
1473 nfp_for_each_insn_walk2(nfp_prog
, meta
, next
) {
1476 if (BPF_CLASS(meta
->insn
.code
) != BPF_JMP
)
1479 br_idx
= nfp_prog_offset_to_index(nfp_prog
, next
->off
) - 1;
1480 if (!nfp_is_br(nfp_prog
->prog
[br_idx
])) {
1481 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
1482 br_idx
, meta
->insn
.code
, nfp_prog
->prog
[br_idx
]);
1485 /* Leave special branches for later */
1486 if (FIELD_GET(OP_BR_SPECIAL
, nfp_prog
->prog
[br_idx
]))
1489 /* Find the target offset in assembler realm */
1490 off
= meta
->insn
.off
;
1492 pr_err("Fixup found zero offset!!\n");
1496 while (off
&& nfp_meta_has_next(nfp_prog
, next
)) {
1497 next
= nfp_meta_next(next
);
1501 pr_err("Fixup found too large jump!! %d\n", off
);
1506 pr_err("Branch landing on removed instruction!!\n");
1510 for (idx
= nfp_prog_offset_to_index(nfp_prog
, meta
->off
);
1511 idx
<= br_idx
; idx
++) {
1512 if (!nfp_is_br(nfp_prog
->prog
[idx
]))
1514 br_set_offset(&nfp_prog
->prog
[idx
], next
->off
);
1518 /* Fixup 'goto out's separately, they can be scattered around */
1519 for (br_idx
= 0; br_idx
< nfp_prog
->prog_len
; br_idx
++) {
1520 enum br_special special
;
1522 if ((nfp_prog
->prog
[br_idx
] & OP_BR_BASE_MASK
) != OP_BR_BASE
)
1525 special
= FIELD_GET(OP_BR_SPECIAL
, nfp_prog
->prog
[br_idx
]);
1530 br_set_offset(&nfp_prog
->prog
[br_idx
],
1533 case OP_BR_GO_ABORT
:
1534 br_set_offset(&nfp_prog
->prog
[br_idx
],
1535 nfp_prog
->tgt_abort
);
1539 nfp_prog
->prog
[br_idx
] &= ~OP_BR_SPECIAL
;
1545 static void nfp_intro(struct nfp_prog
*nfp_prog
)
1547 wrp_immed(nfp_prog
, plen_reg(nfp_prog
), GENMASK(13, 0));
1548 emit_alu(nfp_prog
, plen_reg(nfp_prog
),
1549 plen_reg(nfp_prog
), ALU_OP_AND
, pv_len(nfp_prog
));
1552 static void nfp_outro_tc_legacy(struct nfp_prog
*nfp_prog
)
1554 const u8 act2code
[] = {
1555 [NN_ACT_TC_DROP
] = 0x22,
1556 [NN_ACT_TC_REDIR
] = 0x24
1558 /* Target for aborts */
1559 nfp_prog
->tgt_abort
= nfp_prog_current_offset(nfp_prog
);
1560 wrp_immed(nfp_prog
, reg_both(0), 0);
1562 /* Target for normal exits */
1563 nfp_prog
->tgt_out
= nfp_prog_current_offset(nfp_prog
);
1565 * 0 0x11 -> pass, count as stat0
1566 * -1 drop 0x22 -> drop, count as stat1
1567 * redir 0x24 -> redir, count as stat1
1568 * ife mark 0x21 -> pass, count as stat1
1569 * ife + tx 0x24 -> redir, count as stat1
1571 emit_br_byte_neq(nfp_prog
, reg_b(0), 0xff, 0, nfp_prog
->tgt_done
, 2);
1572 wrp_mov(nfp_prog
, reg_a(0), NFP_BPF_ABI_FLAGS
);
1573 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF
, 16);
1575 emit_br(nfp_prog
, BR_UNC
, nfp_prog
->tgt_done
, 1);
1576 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(act2code
[nfp_prog
->act
]),
1580 static void nfp_outro_tc_da(struct nfp_prog
*nfp_prog
)
1582 /* TC direct-action mode:
1583 * 0,1 ok NOT SUPPORTED[1]
1584 * 2 drop 0x22 -> drop, count as stat1
1585 * 4,5 nuke 0x02 -> drop
1586 * 7 redir 0x44 -> redir, count as stat2
1587 * * unspec 0x11 -> pass, count as stat0
1589 * [1] We can't support OK and RECLASSIFY because we can't tell TC
1590 * the exact decision made. We are forced to support UNSPEC
1591 * to handle aborts so that's the only one we handle for passing
1592 * packets up the stack.
1594 /* Target for aborts */
1595 nfp_prog
->tgt_abort
= nfp_prog_current_offset(nfp_prog
);
1597 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1599 wrp_mov(nfp_prog
, reg_a(0), NFP_BPF_ABI_FLAGS
);
1600 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF
, 16);
1602 /* Target for normal exits */
1603 nfp_prog
->tgt_out
= nfp_prog_current_offset(nfp_prog
);
1605 /* if R0 > 7 jump to abort */
1606 emit_alu(nfp_prog
, reg_none(), reg_imm(7), ALU_OP_SUB
, reg_b(0));
1607 emit_br(nfp_prog
, BR_BLO
, nfp_prog
->tgt_abort
, 0);
1608 wrp_mov(nfp_prog
, reg_a(0), NFP_BPF_ABI_FLAGS
);
1610 wrp_immed(nfp_prog
, reg_b(2), 0x41221211);
1611 wrp_immed(nfp_prog
, reg_b(3), 0x41001211);
1613 emit_shf(nfp_prog
, reg_a(1),
1614 reg_none(), SHF_OP_NONE
, reg_b(0), SHF_SC_L_SHF
, 2);
1616 emit_alu(nfp_prog
, reg_none(), reg_a(1), ALU_OP_OR
, reg_imm(0));
1617 emit_shf(nfp_prog
, reg_a(2),
1618 reg_imm(0xf), SHF_OP_AND
, reg_b(2), SHF_SC_R_SHF
, 0);
1620 emit_alu(nfp_prog
, reg_none(), reg_a(1), ALU_OP_OR
, reg_imm(0));
1621 emit_shf(nfp_prog
, reg_b(2),
1622 reg_imm(0xf), SHF_OP_AND
, reg_b(3), SHF_SC_R_SHF
, 0);
1624 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1626 emit_shf(nfp_prog
, reg_b(2),
1627 reg_a(2), SHF_OP_OR
, reg_b(2), SHF_SC_L_SHF
, 4);
1628 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF
, 16);
1631 static void nfp_outro_xdp(struct nfp_prog
*nfp_prog
)
1633 /* XDP return codes:
1634 * 0 aborted 0x82 -> drop, count as stat3
1635 * 1 drop 0x22 -> drop, count as stat1
1636 * 2 pass 0x11 -> pass, count as stat0
1637 * 3 tx 0x44 -> redir, count as stat2
1638 * * unknown 0x82 -> drop, count as stat3
1640 /* Target for aborts */
1641 nfp_prog
->tgt_abort
= nfp_prog_current_offset(nfp_prog
);
1643 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1645 wrp_mov(nfp_prog
, reg_a(0), NFP_BPF_ABI_FLAGS
);
1646 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF
, 16);
1648 /* Target for normal exits */
1649 nfp_prog
->tgt_out
= nfp_prog_current_offset(nfp_prog
);
1651 /* if R0 > 3 jump to abort */
1652 emit_alu(nfp_prog
, reg_none(), reg_imm(3), ALU_OP_SUB
, reg_b(0));
1653 emit_br(nfp_prog
, BR_BLO
, nfp_prog
->tgt_abort
, 0);
1655 wrp_immed(nfp_prog
, reg_b(2), 0x44112282);
1657 emit_shf(nfp_prog
, reg_a(1),
1658 reg_none(), SHF_OP_NONE
, reg_b(0), SHF_SC_L_SHF
, 3);
1660 emit_alu(nfp_prog
, reg_none(), reg_a(1), ALU_OP_OR
, reg_imm(0));
1661 emit_shf(nfp_prog
, reg_b(2),
1662 reg_imm(0xff), SHF_OP_AND
, reg_b(2), SHF_SC_R_SHF
, 0);
1664 emit_br_def(nfp_prog
, nfp_prog
->tgt_done
, 2);
1666 wrp_mov(nfp_prog
, reg_a(0), NFP_BPF_ABI_FLAGS
);
1667 emit_ld_field(nfp_prog
, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF
, 16);
1670 static void nfp_outro(struct nfp_prog
*nfp_prog
)
1672 switch (nfp_prog
->act
) {
1674 nfp_outro_tc_da(nfp_prog
);
1676 case NN_ACT_TC_DROP
:
1677 case NN_ACT_TC_REDIR
:
1678 nfp_outro_tc_legacy(nfp_prog
);
1681 nfp_outro_xdp(nfp_prog
);
1686 static int nfp_translate(struct nfp_prog
*nfp_prog
)
1688 struct nfp_insn_meta
*meta
;
1691 nfp_intro(nfp_prog
);
1692 if (nfp_prog
->error
)
1693 return nfp_prog
->error
;
1695 list_for_each_entry(meta
, &nfp_prog
->insns
, l
) {
1696 instr_cb_t cb
= instr_cb
[meta
->insn
.code
];
1698 meta
->off
= nfp_prog_current_offset(nfp_prog
);
1701 nfp_prog
->n_translated
++;
1705 if (nfp_meta_has_prev(nfp_prog
, meta
) &&
1706 nfp_meta_prev(meta
)->double_cb
)
1707 cb
= nfp_meta_prev(meta
)->double_cb
;
1710 err
= cb(nfp_prog
, meta
);
1714 nfp_prog
->n_translated
++;
1717 nfp_outro(nfp_prog
);
1718 if (nfp_prog
->error
)
1719 return nfp_prog
->error
;
1721 for (i
= 0; i
< NFP_USTORE_PREFETCH_WINDOW
; i
++)
1723 if (nfp_prog
->error
)
1724 return nfp_prog
->error
;
1726 return nfp_fixup_branches(nfp_prog
);
1730 nfp_prog_prepare(struct nfp_prog
*nfp_prog
, const struct bpf_insn
*prog
,
1735 for (i
= 0; i
< cnt
; i
++) {
1736 struct nfp_insn_meta
*meta
;
1738 meta
= kzalloc(sizeof(*meta
), GFP_KERNEL
);
1742 meta
->insn
= prog
[i
];
1745 list_add_tail(&meta
->l
, &nfp_prog
->insns
);
1751 /* --- Optimizations --- */
1752 static void nfp_bpf_opt_reg_init(struct nfp_prog
*nfp_prog
)
1754 struct nfp_insn_meta
*meta
;
1756 list_for_each_entry(meta
, &nfp_prog
->insns
, l
) {
1757 struct bpf_insn insn
= meta
->insn
;
1759 /* Programs converted from cBPF start with register xoring */
1760 if (insn
.code
== (BPF_ALU64
| BPF_XOR
| BPF_X
) &&
1761 insn
.src_reg
== insn
.dst_reg
)
1764 /* Programs start with R6 = R1 but we ignore the skb pointer */
1765 if (insn
.code
== (BPF_ALU64
| BPF_MOV
| BPF_X
) &&
1766 insn
.src_reg
== 1 && insn
.dst_reg
== 6)
1769 /* Return as soon as something doesn't match */
1775 /* Remove masking after load since our load guarantees this is not needed */
1776 static void nfp_bpf_opt_ld_mask(struct nfp_prog
*nfp_prog
)
1778 struct nfp_insn_meta
*meta1
, *meta2
;
1779 const s32 exp_mask
[] = {
1780 [BPF_B
] = 0x000000ffU
,
1781 [BPF_H
] = 0x0000ffffU
,
1782 [BPF_W
] = 0xffffffffU
,
1785 nfp_for_each_insn_walk2(nfp_prog
, meta1
, meta2
) {
1786 struct bpf_insn insn
, next
;
1791 if (BPF_CLASS(insn
.code
) != BPF_LD
)
1793 if (BPF_MODE(insn
.code
) != BPF_ABS
&&
1794 BPF_MODE(insn
.code
) != BPF_IND
)
1797 if (next
.code
!= (BPF_ALU64
| BPF_AND
| BPF_K
))
1800 if (!exp_mask
[BPF_SIZE(insn
.code
)])
1802 if (exp_mask
[BPF_SIZE(insn
.code
)] != next
.imm
)
1805 if (next
.src_reg
|| next
.dst_reg
)
1812 static void nfp_bpf_opt_ld_shift(struct nfp_prog
*nfp_prog
)
1814 struct nfp_insn_meta
*meta1
, *meta2
, *meta3
;
1816 nfp_for_each_insn_walk3(nfp_prog
, meta1
, meta2
, meta3
) {
1817 struct bpf_insn insn
, next1
, next2
;
1820 next1
= meta2
->insn
;
1821 next2
= meta3
->insn
;
1823 if (BPF_CLASS(insn
.code
) != BPF_LD
)
1825 if (BPF_MODE(insn
.code
) != BPF_ABS
&&
1826 BPF_MODE(insn
.code
) != BPF_IND
)
1828 if (BPF_SIZE(insn
.code
) != BPF_W
)
1831 if (!(next1
.code
== (BPF_LSH
| BPF_K
| BPF_ALU64
) &&
1832 next2
.code
== (BPF_RSH
| BPF_K
| BPF_ALU64
)) &&
1833 !(next1
.code
== (BPF_RSH
| BPF_K
| BPF_ALU64
) &&
1834 next2
.code
== (BPF_LSH
| BPF_K
| BPF_ALU64
)))
1837 if (next1
.src_reg
|| next1
.dst_reg
||
1838 next2
.src_reg
|| next2
.dst_reg
)
1841 if (next1
.imm
!= 0x20 || next2
.imm
!= 0x20)
1849 static int nfp_bpf_optimize(struct nfp_prog
*nfp_prog
)
1851 nfp_bpf_opt_reg_init(nfp_prog
);
1853 nfp_bpf_opt_ld_mask(nfp_prog
);
1854 nfp_bpf_opt_ld_shift(nfp_prog
);
1859 static int nfp_bpf_ustore_calc(struct nfp_prog
*nfp_prog
, __le64
*ustore
)
1863 for (i
= 0; i
< nfp_prog
->prog_len
; i
++) {
1866 err
= nfp_ustore_check_valid_no_ecc(nfp_prog
->prog
[i
]);
1870 nfp_prog
->prog
[i
] = nfp_ustore_calc_ecc_insn(nfp_prog
->prog
[i
]);
1872 ustore
[i
] = cpu_to_le64(nfp_prog
->prog
[i
]);
1879 * nfp_bpf_jit() - translate BPF code into NFP assembly
1880 * @filter: kernel BPF filter struct
1881 * @prog_mem: memory to store assembler instructions
1882 * @act: action attached to this eBPF program
1883 * @prog_start: offset of the first instruction when loaded
1884 * @prog_done: where to jump on exit
1885 * @prog_sz: size of @prog_mem in instructions
1886 * @res: achieved parameters of translation results
1889 nfp_bpf_jit(struct bpf_prog
*filter
, void *prog_mem
,
1890 enum nfp_bpf_action_type act
,
1891 unsigned int prog_start
, unsigned int prog_done
,
1892 unsigned int prog_sz
, struct nfp_bpf_result
*res
)
1894 struct nfp_prog
*nfp_prog
;
1897 nfp_prog
= kzalloc(sizeof(*nfp_prog
), GFP_KERNEL
);
1901 INIT_LIST_HEAD(&nfp_prog
->insns
);
1902 nfp_prog
->act
= act
;
1903 nfp_prog
->start_off
= prog_start
;
1904 nfp_prog
->tgt_done
= prog_done
;
1906 ret
= nfp_prog_prepare(nfp_prog
, filter
->insnsi
, filter
->len
);
1910 ret
= nfp_prog_verify(nfp_prog
, filter
);
1914 ret
= nfp_bpf_optimize(nfp_prog
);
1918 nfp_prog
->num_regs
= MAX_BPF_REG
;
1919 nfp_prog
->regs_per_thread
= 32;
1921 nfp_prog
->prog
= prog_mem
;
1922 nfp_prog
->__prog_alloc_len
= prog_sz
;
1924 ret
= nfp_translate(nfp_prog
);
1926 pr_err("Translation failed with error %d (translated: %u)\n",
1927 ret
, nfp_prog
->n_translated
);
1932 ret
= nfp_bpf_ustore_calc(nfp_prog
, (__force __le64
*)prog_mem
);
1934 res
->n_instr
= nfp_prog
->prog_len
;
1935 res
->dense_mode
= false;
1937 nfp_prog_free(nfp_prog
);