]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/netronome/nfp/bpf/jit.c
975d63fbc1d534ac4aa3ec544140401e8fc3698e
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / netronome / nfp / bpf / jit.c
1 /*
2 * Copyright (C) 2016 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #define pr_fmt(fmt) "NFP net bpf: " fmt
35
36 #include <linux/kernel.h>
37 #include <linux/bpf.h>
38 #include <linux/filter.h>
39 #include <linux/pkt_cls.h>
40 #include <linux/unistd.h>
41
42 #include "main.h"
43 #include "../nfp_asm.h"
44
45 /* --- NFP prog --- */
46 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
47 * It's safe to modify the next pointers (but not pos).
48 */
49 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
50 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
51 next = list_next_entry(pos, l); \
52 &(nfp_prog)->insns != &pos->l && \
53 &(nfp_prog)->insns != &next->l; \
54 pos = nfp_meta_next(pos), \
55 next = nfp_meta_next(pos))
56
57 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
58 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
59 next = list_next_entry(pos, l), \
60 next2 = list_next_entry(next, l); \
61 &(nfp_prog)->insns != &pos->l && \
62 &(nfp_prog)->insns != &next->l && \
63 &(nfp_prog)->insns != &next2->l; \
64 pos = nfp_meta_next(pos), \
65 next = nfp_meta_next(pos), \
66 next2 = nfp_meta_next(next))
67
68 static bool
69 nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
70 {
71 return meta->l.next != &nfp_prog->insns;
72 }
73
74 static bool
75 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
76 {
77 return meta->l.prev != &nfp_prog->insns;
78 }
79
80 static void nfp_prog_free(struct nfp_prog *nfp_prog)
81 {
82 struct nfp_insn_meta *meta, *tmp;
83
84 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
85 list_del(&meta->l);
86 kfree(meta);
87 }
88 kfree(nfp_prog);
89 }
90
91 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
92 {
93 if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
94 nfp_prog->error = -ENOSPC;
95 return;
96 }
97
98 nfp_prog->prog[nfp_prog->prog_len] = insn;
99 nfp_prog->prog_len++;
100 }
101
102 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
103 {
104 return nfp_prog->start_off + nfp_prog->prog_len;
105 }
106
107 static unsigned int
108 nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
109 {
110 return offset - nfp_prog->start_off;
111 }
112
113 /* --- Emitters --- */
114 static void
115 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
116 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
117 {
118 enum cmd_ctx_swap ctx;
119 u64 insn;
120
121 if (sync)
122 ctx = CMD_CTX_SWAP;
123 else
124 ctx = CMD_CTX_NO_SWAP;
125
126 insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
127 FIELD_PREP(OP_CMD_CTX, ctx) |
128 FIELD_PREP(OP_CMD_B_SRC, breg) |
129 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
130 FIELD_PREP(OP_CMD_XFER, xfer) |
131 FIELD_PREP(OP_CMD_CNT, size) |
132 FIELD_PREP(OP_CMD_SIG, sync) |
133 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
134 FIELD_PREP(OP_CMD_MODE, mode);
135
136 nfp_prog_push(nfp_prog, insn);
137 }
138
139 static void
140 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
141 u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
142 {
143 struct nfp_insn_re_regs reg;
144 int err;
145
146 err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
147 if (err) {
148 nfp_prog->error = err;
149 return;
150 }
151 if (reg.swap) {
152 pr_err("cmd can't swap arguments\n");
153 nfp_prog->error = -EFAULT;
154 return;
155 }
156 if (reg.dst_lmextn || reg.src_lmextn) {
157 pr_err("cmd can't use LMextn\n");
158 nfp_prog->error = -EFAULT;
159 return;
160 }
161
162 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
163 }
164
165 static void
166 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
167 enum br_ctx_signal_state css, u16 addr, u8 defer)
168 {
169 u16 addr_lo, addr_hi;
170 u64 insn;
171
172 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
173 addr_hi = addr != addr_lo;
174
175 insn = OP_BR_BASE |
176 FIELD_PREP(OP_BR_MASK, mask) |
177 FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
178 FIELD_PREP(OP_BR_CSS, css) |
179 FIELD_PREP(OP_BR_DEFBR, defer) |
180 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
181 FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
182
183 nfp_prog_push(nfp_prog, insn);
184 }
185
186 static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
187 {
188 if (defer > 2) {
189 pr_err("BUG: branch defer out of bounds %d\n", defer);
190 nfp_prog->error = -EFAULT;
191 return;
192 }
193 __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
194 }
195
196 static void
197 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
198 {
199 __emit_br(nfp_prog, mask,
200 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
201 BR_CSS_NONE, addr, defer);
202 }
203
204 static void
205 __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
206 u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn)
207 {
208 u16 addr_lo, addr_hi;
209 u64 insn;
210
211 addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
212 addr_hi = addr != addr_lo;
213
214 insn = OP_BBYTE_BASE |
215 FIELD_PREP(OP_BB_A_SRC, areg) |
216 FIELD_PREP(OP_BB_BYTE, byte) |
217 FIELD_PREP(OP_BB_B_SRC, breg) |
218 FIELD_PREP(OP_BB_I8, imm8) |
219 FIELD_PREP(OP_BB_EQ, equal) |
220 FIELD_PREP(OP_BB_DEFBR, defer) |
221 FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
222 FIELD_PREP(OP_BB_ADDR_HI, addr_hi) |
223 FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn);
224
225 nfp_prog_push(nfp_prog, insn);
226 }
227
228 static void
229 emit_br_byte_neq(struct nfp_prog *nfp_prog,
230 swreg src, u8 imm, u8 byte, u16 addr, u8 defer)
231 {
232 struct nfp_insn_re_regs reg;
233 int err;
234
235 err = swreg_to_restricted(reg_none(), src, reg_imm(imm), &reg, true);
236 if (err) {
237 nfp_prog->error = err;
238 return;
239 }
240
241 __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
242 defer, reg.src_lmextn);
243 }
244
245 static void
246 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
247 enum immed_width width, bool invert,
248 enum immed_shift shift, bool wr_both,
249 bool dst_lmextn, bool src_lmextn)
250 {
251 u64 insn;
252
253 insn = OP_IMMED_BASE |
254 FIELD_PREP(OP_IMMED_A_SRC, areg) |
255 FIELD_PREP(OP_IMMED_B_SRC, breg) |
256 FIELD_PREP(OP_IMMED_IMM, imm_hi) |
257 FIELD_PREP(OP_IMMED_WIDTH, width) |
258 FIELD_PREP(OP_IMMED_INV, invert) |
259 FIELD_PREP(OP_IMMED_SHIFT, shift) |
260 FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
261 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
262 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
263
264 nfp_prog_push(nfp_prog, insn);
265 }
266
267 static void
268 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
269 enum immed_width width, bool invert, enum immed_shift shift)
270 {
271 struct nfp_insn_ur_regs reg;
272 int err;
273
274 if (swreg_type(dst) == NN_REG_IMM) {
275 nfp_prog->error = -EFAULT;
276 return;
277 }
278
279 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
280 if (err) {
281 nfp_prog->error = err;
282 return;
283 }
284
285 __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
286 invert, shift, reg.wr_both,
287 reg.dst_lmextn, reg.src_lmextn);
288 }
289
290 static void
291 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
292 enum shf_sc sc, u8 shift,
293 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
294 bool dst_lmextn, bool src_lmextn)
295 {
296 u64 insn;
297
298 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
299 nfp_prog->error = -EFAULT;
300 return;
301 }
302
303 if (sc == SHF_SC_L_SHF)
304 shift = 32 - shift;
305
306 insn = OP_SHF_BASE |
307 FIELD_PREP(OP_SHF_A_SRC, areg) |
308 FIELD_PREP(OP_SHF_SC, sc) |
309 FIELD_PREP(OP_SHF_B_SRC, breg) |
310 FIELD_PREP(OP_SHF_I8, i8) |
311 FIELD_PREP(OP_SHF_SW, sw) |
312 FIELD_PREP(OP_SHF_DST, dst) |
313 FIELD_PREP(OP_SHF_SHIFT, shift) |
314 FIELD_PREP(OP_SHF_OP, op) |
315 FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
316 FIELD_PREP(OP_SHF_WR_AB, wr_both) |
317 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
318 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
319
320 nfp_prog_push(nfp_prog, insn);
321 }
322
323 static void
324 emit_shf(struct nfp_prog *nfp_prog, swreg dst,
325 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
326 {
327 struct nfp_insn_re_regs reg;
328 int err;
329
330 err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
331 if (err) {
332 nfp_prog->error = err;
333 return;
334 }
335
336 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
337 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
338 reg.dst_lmextn, reg.src_lmextn);
339 }
340
341 static void
342 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
343 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
344 bool dst_lmextn, bool src_lmextn)
345 {
346 u64 insn;
347
348 insn = OP_ALU_BASE |
349 FIELD_PREP(OP_ALU_A_SRC, areg) |
350 FIELD_PREP(OP_ALU_B_SRC, breg) |
351 FIELD_PREP(OP_ALU_DST, dst) |
352 FIELD_PREP(OP_ALU_SW, swap) |
353 FIELD_PREP(OP_ALU_OP, op) |
354 FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
355 FIELD_PREP(OP_ALU_WR_AB, wr_both) |
356 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
357 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
358
359 nfp_prog_push(nfp_prog, insn);
360 }
361
362 static void
363 emit_alu(struct nfp_prog *nfp_prog, swreg dst,
364 swreg lreg, enum alu_op op, swreg rreg)
365 {
366 struct nfp_insn_ur_regs reg;
367 int err;
368
369 err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
370 if (err) {
371 nfp_prog->error = err;
372 return;
373 }
374
375 __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
376 reg.areg, op, reg.breg, reg.swap, reg.wr_both,
377 reg.dst_lmextn, reg.src_lmextn);
378 }
379
380 static void
381 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
382 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
383 bool zero, bool swap, bool wr_both,
384 bool dst_lmextn, bool src_lmextn)
385 {
386 u64 insn;
387
388 insn = OP_LDF_BASE |
389 FIELD_PREP(OP_LDF_A_SRC, areg) |
390 FIELD_PREP(OP_LDF_SC, sc) |
391 FIELD_PREP(OP_LDF_B_SRC, breg) |
392 FIELD_PREP(OP_LDF_I8, imm8) |
393 FIELD_PREP(OP_LDF_SW, swap) |
394 FIELD_PREP(OP_LDF_ZF, zero) |
395 FIELD_PREP(OP_LDF_BMASK, bmask) |
396 FIELD_PREP(OP_LDF_SHF, shift) |
397 FIELD_PREP(OP_LDF_WR_AB, wr_both) |
398 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
399 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
400
401 nfp_prog_push(nfp_prog, insn);
402 }
403
404 static void
405 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
406 enum shf_sc sc, u8 shift, bool zero)
407 {
408 struct nfp_insn_re_regs reg;
409 int err;
410
411 /* Note: ld_field is special as it uses one of the src regs as dst */
412 err = swreg_to_restricted(dst, dst, src, &reg, true);
413 if (err) {
414 nfp_prog->error = err;
415 return;
416 }
417
418 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
419 reg.i8, zero, reg.swap, reg.wr_both,
420 reg.dst_lmextn, reg.src_lmextn);
421 }
422
423 static void
424 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
425 enum shf_sc sc, u8 shift)
426 {
427 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
428 }
429
430 static void emit_nop(struct nfp_prog *nfp_prog)
431 {
432 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
433 }
434
435 /* --- Wrappers --- */
436 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
437 {
438 if (!(imm & 0xffff0000)) {
439 *val = imm;
440 *shift = IMMED_SHIFT_0B;
441 } else if (!(imm & 0xff0000ff)) {
442 *val = imm >> 8;
443 *shift = IMMED_SHIFT_1B;
444 } else if (!(imm & 0x0000ffff)) {
445 *val = imm >> 16;
446 *shift = IMMED_SHIFT_2B;
447 } else {
448 return false;
449 }
450
451 return true;
452 }
453
454 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
455 {
456 enum immed_shift shift;
457 u16 val;
458
459 if (pack_immed(imm, &val, &shift)) {
460 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
461 } else if (pack_immed(~imm, &val, &shift)) {
462 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
463 } else {
464 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
465 false, IMMED_SHIFT_0B);
466 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
467 false, IMMED_SHIFT_2B);
468 }
469 }
470
471 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
472 * If the @imm is small enough encode it directly in operand and return
473 * otherwise load @imm to a spare register and return its encoding.
474 */
475 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
476 {
477 if (FIELD_FIT(UR_REG_IMM_MAX, imm))
478 return reg_imm(imm);
479
480 wrp_immed(nfp_prog, tmp_reg, imm);
481 return tmp_reg;
482 }
483
484 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
485 * If the @imm is small enough encode it directly in operand and return
486 * otherwise load @imm to a spare register and return its encoding.
487 */
488 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
489 {
490 if (FIELD_FIT(RE_REG_IMM_MAX, imm))
491 return reg_imm(imm);
492
493 wrp_immed(nfp_prog, tmp_reg, imm);
494 return tmp_reg;
495 }
496
497 static void
498 wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
499 enum br_special special)
500 {
501 emit_br(nfp_prog, mask, 0, 0);
502
503 nfp_prog->prog[nfp_prog->prog_len - 1] |=
504 FIELD_PREP(OP_BR_SPECIAL, special);
505 }
506
507 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
508 {
509 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
510 }
511
512 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
513 {
514 wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
515 }
516
517 static int
518 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
519 {
520 unsigned int i;
521 u16 shift, sz;
522
523 /* We load the value from the address indicated in @offset and then
524 * shift out the data we don't need. Note: this is big endian!
525 */
526 sz = max(size, 4);
527 shift = size < 4 ? 4 - size : 0;
528
529 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
530 pptr_reg(nfp_prog), offset, sz - 1, true);
531
532 i = 0;
533 if (shift)
534 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
535 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
536 else
537 for (; i * 4 < size; i++)
538 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
539
540 if (i < 2)
541 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
542
543 return 0;
544 }
545
546 static int
547 data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
548 u8 dst_gpr, int size)
549 {
550 unsigned int i;
551 u8 mask, sz;
552
553 /* We load the value from the address indicated in @offset and then
554 * mask out the data we don't need. Note: this is little endian!
555 */
556 sz = max(size, 4);
557 mask = size < 4 ? GENMASK(size - 1, 0) : 0;
558
559 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
560 reg_a(src_gpr), offset, sz / 4 - 1, true);
561
562 i = 0;
563 if (mask)
564 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
565 reg_xfer(0), SHF_SC_NONE, 0, true);
566 else
567 for (; i * 4 < size; i++)
568 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
569
570 if (i < 2)
571 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
572
573 return 0;
574 }
575
576 static int
577 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
578 {
579 swreg tmp_reg;
580
581 /* Calculate the true offset (src_reg + imm) */
582 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
583 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
584
585 /* Check packet length (size guaranteed to fit b/c it's u8) */
586 emit_alu(nfp_prog, imm_a(nfp_prog),
587 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
588 emit_alu(nfp_prog, reg_none(),
589 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
590 wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
591
592 /* Load data */
593 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
594 }
595
596 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
597 {
598 swreg tmp_reg;
599
600 /* Check packet length */
601 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
602 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
603 wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
604
605 /* Load data */
606 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
607 return data_ld(nfp_prog, tmp_reg, 0, size);
608 }
609
610 static void
611 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
612 {
613 swreg tmp_reg;
614
615 if (alu_op == ALU_OP_AND) {
616 if (!imm)
617 wrp_immed(nfp_prog, reg_both(dst), 0);
618 if (!imm || !~imm)
619 return;
620 }
621 if (alu_op == ALU_OP_OR) {
622 if (!~imm)
623 wrp_immed(nfp_prog, reg_both(dst), ~0U);
624 if (!imm || !~imm)
625 return;
626 }
627 if (alu_op == ALU_OP_XOR) {
628 if (!~imm)
629 emit_alu(nfp_prog, reg_both(dst), reg_none(),
630 ALU_OP_NEG, reg_b(dst));
631 if (!imm || !~imm)
632 return;
633 }
634
635 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
636 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
637 }
638
639 static int
640 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
641 enum alu_op alu_op, bool skip)
642 {
643 const struct bpf_insn *insn = &meta->insn;
644 u64 imm = insn->imm; /* sign extend */
645
646 if (skip) {
647 meta->skip = true;
648 return 0;
649 }
650
651 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
652 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
653
654 return 0;
655 }
656
657 static int
658 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
659 enum alu_op alu_op)
660 {
661 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
662
663 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
664 emit_alu(nfp_prog, reg_both(dst + 1),
665 reg_a(dst + 1), alu_op, reg_b(src + 1));
666
667 return 0;
668 }
669
670 static int
671 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
672 enum alu_op alu_op, bool skip)
673 {
674 const struct bpf_insn *insn = &meta->insn;
675
676 if (skip) {
677 meta->skip = true;
678 return 0;
679 }
680
681 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
682 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
683
684 return 0;
685 }
686
687 static int
688 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
689 enum alu_op alu_op)
690 {
691 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
692
693 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
694 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
695
696 return 0;
697 }
698
699 static void
700 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
701 enum br_mask br_mask, u16 off)
702 {
703 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
704 emit_br(nfp_prog, br_mask, off, 0);
705 }
706
707 static int
708 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
709 enum alu_op alu_op, enum br_mask br_mask)
710 {
711 const struct bpf_insn *insn = &meta->insn;
712
713 if (insn->off < 0) /* TODO */
714 return -EOPNOTSUPP;
715
716 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
717 insn->src_reg * 2, br_mask, insn->off);
718 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
719 insn->src_reg * 2 + 1, br_mask, insn->off);
720
721 return 0;
722 }
723
724 static int
725 wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
726 enum br_mask br_mask, bool swap)
727 {
728 const struct bpf_insn *insn = &meta->insn;
729 u64 imm = insn->imm; /* sign extend */
730 u8 reg = insn->dst_reg * 2;
731 swreg tmp_reg;
732
733 if (insn->off < 0) /* TODO */
734 return -EOPNOTSUPP;
735
736 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
737 if (!swap)
738 emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
739 else
740 emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
741
742 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
743 if (!swap)
744 emit_alu(nfp_prog, reg_none(),
745 reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
746 else
747 emit_alu(nfp_prog, reg_none(),
748 tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
749
750 emit_br(nfp_prog, br_mask, insn->off, 0);
751
752 return 0;
753 }
754
755 static int
756 wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
757 enum br_mask br_mask, bool swap)
758 {
759 const struct bpf_insn *insn = &meta->insn;
760 u8 areg, breg;
761
762 areg = insn->dst_reg * 2;
763 breg = insn->src_reg * 2;
764
765 if (insn->off < 0) /* TODO */
766 return -EOPNOTSUPP;
767
768 if (swap) {
769 areg ^= breg;
770 breg ^= areg;
771 areg ^= breg;
772 }
773
774 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
775 emit_alu(nfp_prog, reg_none(),
776 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
777 emit_br(nfp_prog, br_mask, insn->off, 0);
778
779 return 0;
780 }
781
782 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
783 {
784 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
785 SHF_SC_R_ROT, 8);
786 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
787 SHF_SC_R_ROT, 16);
788 }
789
790 /* --- Callbacks --- */
791 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
792 {
793 const struct bpf_insn *insn = &meta->insn;
794
795 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
796 wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
797
798 return 0;
799 }
800
801 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
802 {
803 u64 imm = meta->insn.imm; /* sign extend */
804
805 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
806 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
807
808 return 0;
809 }
810
811 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
812 {
813 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
814 }
815
816 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
817 {
818 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
819 }
820
821 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
822 {
823 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
824 }
825
826 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
827 {
828 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
829 }
830
831 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
832 {
833 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
834 }
835
836 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
837 {
838 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
839 }
840
841 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
842 {
843 const struct bpf_insn *insn = &meta->insn;
844
845 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
846 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
847 reg_b(insn->src_reg * 2));
848 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
849 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
850 reg_b(insn->src_reg * 2 + 1));
851
852 return 0;
853 }
854
855 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
856 {
857 const struct bpf_insn *insn = &meta->insn;
858 u64 imm = insn->imm; /* sign extend */
859
860 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
861 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
862
863 return 0;
864 }
865
866 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
867 {
868 const struct bpf_insn *insn = &meta->insn;
869
870 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
871 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
872 reg_b(insn->src_reg * 2));
873 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
874 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
875 reg_b(insn->src_reg * 2 + 1));
876
877 return 0;
878 }
879
880 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
881 {
882 const struct bpf_insn *insn = &meta->insn;
883 u64 imm = insn->imm; /* sign extend */
884
885 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
886 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
887
888 return 0;
889 }
890
891 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
892 {
893 const struct bpf_insn *insn = &meta->insn;
894 u8 dst = insn->dst_reg * 2;
895
896 if (insn->imm < 32) {
897 emit_shf(nfp_prog, reg_both(dst + 1),
898 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
899 SHF_SC_R_DSHF, 32 - insn->imm);
900 emit_shf(nfp_prog, reg_both(dst),
901 reg_none(), SHF_OP_NONE, reg_b(dst),
902 SHF_SC_L_SHF, insn->imm);
903 } else if (insn->imm == 32) {
904 wrp_reg_mov(nfp_prog, dst + 1, dst);
905 wrp_immed(nfp_prog, reg_both(dst), 0);
906 } else if (insn->imm > 32) {
907 emit_shf(nfp_prog, reg_both(dst + 1),
908 reg_none(), SHF_OP_NONE, reg_b(dst),
909 SHF_SC_L_SHF, insn->imm - 32);
910 wrp_immed(nfp_prog, reg_both(dst), 0);
911 }
912
913 return 0;
914 }
915
916 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
917 {
918 const struct bpf_insn *insn = &meta->insn;
919 u8 dst = insn->dst_reg * 2;
920
921 if (insn->imm < 32) {
922 emit_shf(nfp_prog, reg_both(dst),
923 reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
924 SHF_SC_R_DSHF, insn->imm);
925 emit_shf(nfp_prog, reg_both(dst + 1),
926 reg_none(), SHF_OP_NONE, reg_b(dst + 1),
927 SHF_SC_R_SHF, insn->imm);
928 } else if (insn->imm == 32) {
929 wrp_reg_mov(nfp_prog, dst, dst + 1);
930 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
931 } else if (insn->imm > 32) {
932 emit_shf(nfp_prog, reg_both(dst),
933 reg_none(), SHF_OP_NONE, reg_b(dst + 1),
934 SHF_SC_R_SHF, insn->imm - 32);
935 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
936 }
937
938 return 0;
939 }
940
941 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
942 {
943 const struct bpf_insn *insn = &meta->insn;
944
945 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
946 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
947
948 return 0;
949 }
950
951 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
952 {
953 const struct bpf_insn *insn = &meta->insn;
954
955 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
956 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
957
958 return 0;
959 }
960
961 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
962 {
963 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
964 }
965
966 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
967 {
968 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
969 }
970
971 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
972 {
973 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
974 }
975
976 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
977 {
978 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
979 }
980
981 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
982 {
983 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
984 }
985
986 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
987 {
988 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
989 }
990
991 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
992 {
993 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
994 }
995
996 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
997 {
998 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
999 }
1000
1001 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1002 {
1003 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
1004 }
1005
1006 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1007 {
1008 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
1009 }
1010
1011 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1012 {
1013 const struct bpf_insn *insn = &meta->insn;
1014
1015 if (!insn->imm)
1016 return 1; /* TODO: zero shift means indirect */
1017
1018 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
1019 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
1020 SHF_SC_L_SHF, insn->imm);
1021 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1022
1023 return 0;
1024 }
1025
1026 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1027 {
1028 const struct bpf_insn *insn = &meta->insn;
1029 u8 gpr = insn->dst_reg * 2;
1030
1031 switch (insn->imm) {
1032 case 16:
1033 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
1034 SHF_SC_R_ROT, 8);
1035 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
1036 SHF_SC_R_SHF, 16);
1037
1038 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1039 break;
1040 case 32:
1041 wrp_end32(nfp_prog, reg_a(gpr), gpr);
1042 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1043 break;
1044 case 64:
1045 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
1046
1047 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
1048 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
1049 break;
1050 }
1051
1052 return 0;
1053 }
1054
1055 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1056 {
1057 wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
1058 meta->insn.imm);
1059
1060 return 0;
1061 }
1062
1063 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1064 {
1065 const struct bpf_insn *insn = &meta->insn;
1066
1067 meta->double_cb = imm_ld8_part2;
1068 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
1069
1070 return 0;
1071 }
1072
1073 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1074 {
1075 return construct_data_ld(nfp_prog, meta->insn.imm, 1);
1076 }
1077
1078 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1079 {
1080 return construct_data_ld(nfp_prog, meta->insn.imm, 2);
1081 }
1082
1083 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1084 {
1085 return construct_data_ld(nfp_prog, meta->insn.imm, 4);
1086 }
1087
1088 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1089 {
1090 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1091 meta->insn.src_reg * 2, 1);
1092 }
1093
1094 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1095 {
1096 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1097 meta->insn.src_reg * 2, 2);
1098 }
1099
1100 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1101 {
1102 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
1103 meta->insn.src_reg * 2, 4);
1104 }
1105
1106 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1107 u8 size)
1108 {
1109 switch (meta->insn.off) {
1110 case offsetof(struct sk_buff, len):
1111 if (size != FIELD_SIZEOF(struct sk_buff, len))
1112 return -EOPNOTSUPP;
1113 wrp_mov(nfp_prog,
1114 reg_both(meta->insn.dst_reg * 2), plen_reg(nfp_prog));
1115 break;
1116 default:
1117 return -EOPNOTSUPP;
1118 }
1119
1120 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1121
1122 return 0;
1123 }
1124
1125 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1126 u8 size)
1127 {
1128 swreg dst = reg_both(meta->insn.dst_reg * 2);
1129
1130 if (size != sizeof(void *))
1131 return -EINVAL;
1132
1133 switch (meta->insn.off) {
1134 case offsetof(struct xdp_buff, data):
1135 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
1136 break;
1137 case offsetof(struct xdp_buff, data_end):
1138 emit_alu(nfp_prog, dst,
1139 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
1140 break;
1141 default:
1142 return -EOPNOTSUPP;
1143 }
1144
1145 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1146
1147 return 0;
1148 }
1149
1150 static int
1151 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1152 unsigned int size)
1153 {
1154 swreg tmp_reg;
1155
1156 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
1157
1158 return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
1159 meta->insn.dst_reg * 2, size);
1160 }
1161
1162 static int
1163 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1164 unsigned int size)
1165 {
1166 if (meta->ptr.type == PTR_TO_CTX) {
1167 if (nfp_prog->act == NN_ACT_XDP)
1168 return mem_ldx_xdp(nfp_prog, meta, size);
1169 else
1170 return mem_ldx_skb(nfp_prog, meta, size);
1171 }
1172
1173 if (meta->ptr.type == PTR_TO_PACKET)
1174 return mem_ldx_data(nfp_prog, meta, size);
1175
1176 return -EOPNOTSUPP;
1177 }
1178
1179 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1180 {
1181 return mem_ldx(nfp_prog, meta, 1);
1182 }
1183
1184 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1185 {
1186 return mem_ldx(nfp_prog, meta, 2);
1187 }
1188
1189 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1190 {
1191 return mem_ldx(nfp_prog, meta, 4);
1192 }
1193
1194 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1195 {
1196 return mem_ldx(nfp_prog, meta, 8);
1197 }
1198
1199 static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1200 {
1201 return -EOPNOTSUPP;
1202 }
1203
1204 static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1205 {
1206 return -EOPNOTSUPP;
1207 }
1208
1209 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1210 {
1211 if (meta->ptr.type == PTR_TO_PACKET)
1212 return -EOPNOTSUPP;
1213
1214 if (nfp_prog->act == NN_ACT_XDP)
1215 return mem_stx4_xdp(nfp_prog, meta);
1216 return mem_stx4_skb(nfp_prog, meta);
1217 }
1218
1219 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1220 {
1221 if (meta->insn.off < 0) /* TODO */
1222 return -EOPNOTSUPP;
1223 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
1224
1225 return 0;
1226 }
1227
1228 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1229 {
1230 const struct bpf_insn *insn = &meta->insn;
1231 u64 imm = insn->imm; /* sign extend */
1232 swreg or1, or2, tmp_reg;
1233
1234 or1 = reg_a(insn->dst_reg * 2);
1235 or2 = reg_b(insn->dst_reg * 2 + 1);
1236
1237 if (insn->off < 0) /* TODO */
1238 return -EOPNOTSUPP;
1239
1240 if (imm & ~0U) {
1241 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1242 emit_alu(nfp_prog, imm_a(nfp_prog),
1243 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
1244 or1 = imm_a(nfp_prog);
1245 }
1246
1247 if (imm >> 32) {
1248 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1249 emit_alu(nfp_prog, imm_b(nfp_prog),
1250 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
1251 or2 = imm_b(nfp_prog);
1252 }
1253
1254 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
1255 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
1256
1257 return 0;
1258 }
1259
1260 static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1261 {
1262 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
1263 }
1264
1265 static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1266 {
1267 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
1268 }
1269
1270 static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1271 {
1272 return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
1273 }
1274
1275 static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1276 {
1277 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
1278 }
1279
1280 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1281 {
1282 const struct bpf_insn *insn = &meta->insn;
1283 u64 imm = insn->imm; /* sign extend */
1284 swreg tmp_reg;
1285
1286 if (insn->off < 0) /* TODO */
1287 return -EOPNOTSUPP;
1288
1289 if (!imm) {
1290 meta->skip = true;
1291 return 0;
1292 }
1293
1294 if (imm & ~0U) {
1295 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1296 emit_alu(nfp_prog, reg_none(),
1297 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
1298 emit_br(nfp_prog, BR_BNE, insn->off, 0);
1299 }
1300
1301 if (imm >> 32) {
1302 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1303 emit_alu(nfp_prog, reg_none(),
1304 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
1305 emit_br(nfp_prog, BR_BNE, insn->off, 0);
1306 }
1307
1308 return 0;
1309 }
1310
1311 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1312 {
1313 const struct bpf_insn *insn = &meta->insn;
1314 u64 imm = insn->imm; /* sign extend */
1315 swreg tmp_reg;
1316
1317 if (insn->off < 0) /* TODO */
1318 return -EOPNOTSUPP;
1319
1320 if (!imm) {
1321 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
1322 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
1323 emit_br(nfp_prog, BR_BNE, insn->off, 0);
1324 return 0;
1325 }
1326
1327 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1328 emit_alu(nfp_prog, reg_none(),
1329 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
1330 emit_br(nfp_prog, BR_BNE, insn->off, 0);
1331
1332 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1333 emit_alu(nfp_prog, reg_none(),
1334 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
1335 emit_br(nfp_prog, BR_BNE, insn->off, 0);
1336
1337 return 0;
1338 }
1339
1340 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1341 {
1342 const struct bpf_insn *insn = &meta->insn;
1343
1344 if (insn->off < 0) /* TODO */
1345 return -EOPNOTSUPP;
1346
1347 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
1348 ALU_OP_XOR, reg_b(insn->src_reg * 2));
1349 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
1350 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
1351 emit_alu(nfp_prog, reg_none(),
1352 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
1353 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
1354
1355 return 0;
1356 }
1357
1358 static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1359 {
1360 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
1361 }
1362
1363 static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1364 {
1365 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
1366 }
1367
1368 static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1369 {
1370 return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
1371 }
1372
1373 static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1374 {
1375 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
1376 }
1377
1378 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1379 {
1380 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
1381 }
1382
1383 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1384 {
1385 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
1386 }
1387
1388 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1389 {
1390 wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
1391
1392 return 0;
1393 }
1394
1395 static const instr_cb_t instr_cb[256] = {
1396 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
1397 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
1398 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
1399 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
1400 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
1401 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
1402 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
1403 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
1404 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
1405 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
1406 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
1407 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
1408 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
1409 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
1410 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
1411 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
1412 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
1413 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
1414 [BPF_ALU | BPF_AND | BPF_X] = and_reg,
1415 [BPF_ALU | BPF_AND | BPF_K] = and_imm,
1416 [BPF_ALU | BPF_OR | BPF_X] = or_reg,
1417 [BPF_ALU | BPF_OR | BPF_K] = or_imm,
1418 [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
1419 [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
1420 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
1421 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
1422 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
1423 [BPF_ALU | BPF_END | BPF_X] = end_reg32,
1424 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
1425 [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
1426 [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
1427 [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
1428 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
1429 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
1430 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
1431 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
1432 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
1433 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
1434 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
1435 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
1436 [BPF_JMP | BPF_JA | BPF_K] = jump,
1437 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
1438 [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
1439 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
1440 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
1441 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
1442 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
1443 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
1444 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
1445 [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
1446 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
1447 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
1448 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
1449 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
1450 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
1451 [BPF_JMP | BPF_EXIT] = goto_out,
1452 };
1453
1454 /* --- Misc code --- */
1455 static void br_set_offset(u64 *instr, u16 offset)
1456 {
1457 u16 addr_lo, addr_hi;
1458
1459 addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
1460 addr_hi = offset != addr_lo;
1461 *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
1462 *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
1463 *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
1464 }
1465
1466 /* --- Assembler logic --- */
1467 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
1468 {
1469 struct nfp_insn_meta *meta, *next;
1470 u32 off, br_idx;
1471 u32 idx;
1472
1473 nfp_for_each_insn_walk2(nfp_prog, meta, next) {
1474 if (meta->skip)
1475 continue;
1476 if (BPF_CLASS(meta->insn.code) != BPF_JMP)
1477 continue;
1478
1479 br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
1480 if (!nfp_is_br(nfp_prog->prog[br_idx])) {
1481 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
1482 br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
1483 return -ELOOP;
1484 }
1485 /* Leave special branches for later */
1486 if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
1487 continue;
1488
1489 /* Find the target offset in assembler realm */
1490 off = meta->insn.off;
1491 if (!off) {
1492 pr_err("Fixup found zero offset!!\n");
1493 return -ELOOP;
1494 }
1495
1496 while (off && nfp_meta_has_next(nfp_prog, next)) {
1497 next = nfp_meta_next(next);
1498 off--;
1499 }
1500 if (off) {
1501 pr_err("Fixup found too large jump!! %d\n", off);
1502 return -ELOOP;
1503 }
1504
1505 if (next->skip) {
1506 pr_err("Branch landing on removed instruction!!\n");
1507 return -ELOOP;
1508 }
1509
1510 for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
1511 idx <= br_idx; idx++) {
1512 if (!nfp_is_br(nfp_prog->prog[idx]))
1513 continue;
1514 br_set_offset(&nfp_prog->prog[idx], next->off);
1515 }
1516 }
1517
1518 /* Fixup 'goto out's separately, they can be scattered around */
1519 for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
1520 enum br_special special;
1521
1522 if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
1523 continue;
1524
1525 special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
1526 switch (special) {
1527 case OP_BR_NORMAL:
1528 break;
1529 case OP_BR_GO_OUT:
1530 br_set_offset(&nfp_prog->prog[br_idx],
1531 nfp_prog->tgt_out);
1532 break;
1533 case OP_BR_GO_ABORT:
1534 br_set_offset(&nfp_prog->prog[br_idx],
1535 nfp_prog->tgt_abort);
1536 break;
1537 }
1538
1539 nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
1540 }
1541
1542 return 0;
1543 }
1544
1545 static void nfp_intro(struct nfp_prog *nfp_prog)
1546 {
1547 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
1548 emit_alu(nfp_prog, plen_reg(nfp_prog),
1549 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
1550 }
1551
1552 static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
1553 {
1554 const u8 act2code[] = {
1555 [NN_ACT_TC_DROP] = 0x22,
1556 [NN_ACT_TC_REDIR] = 0x24
1557 };
1558 /* Target for aborts */
1559 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
1560 wrp_immed(nfp_prog, reg_both(0), 0);
1561
1562 /* Target for normal exits */
1563 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
1564 /* Legacy TC mode:
1565 * 0 0x11 -> pass, count as stat0
1566 * -1 drop 0x22 -> drop, count as stat1
1567 * redir 0x24 -> redir, count as stat1
1568 * ife mark 0x21 -> pass, count as stat1
1569 * ife + tx 0x24 -> redir, count as stat1
1570 */
1571 emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
1572 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
1573 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
1574
1575 emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
1576 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
1577 SHF_SC_L_SHF, 16);
1578 }
1579
1580 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
1581 {
1582 /* TC direct-action mode:
1583 * 0,1 ok NOT SUPPORTED[1]
1584 * 2 drop 0x22 -> drop, count as stat1
1585 * 4,5 nuke 0x02 -> drop
1586 * 7 redir 0x44 -> redir, count as stat2
1587 * * unspec 0x11 -> pass, count as stat0
1588 *
1589 * [1] We can't support OK and RECLASSIFY because we can't tell TC
1590 * the exact decision made. We are forced to support UNSPEC
1591 * to handle aborts so that's the only one we handle for passing
1592 * packets up the stack.
1593 */
1594 /* Target for aborts */
1595 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
1596
1597 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
1598
1599 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
1600 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
1601
1602 /* Target for normal exits */
1603 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
1604
1605 /* if R0 > 7 jump to abort */
1606 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
1607 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
1608 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
1609
1610 wrp_immed(nfp_prog, reg_b(2), 0x41221211);
1611 wrp_immed(nfp_prog, reg_b(3), 0x41001211);
1612
1613 emit_shf(nfp_prog, reg_a(1),
1614 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
1615
1616 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
1617 emit_shf(nfp_prog, reg_a(2),
1618 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
1619
1620 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
1621 emit_shf(nfp_prog, reg_b(2),
1622 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
1623
1624 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
1625
1626 emit_shf(nfp_prog, reg_b(2),
1627 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
1628 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
1629 }
1630
1631 static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
1632 {
1633 /* XDP return codes:
1634 * 0 aborted 0x82 -> drop, count as stat3
1635 * 1 drop 0x22 -> drop, count as stat1
1636 * 2 pass 0x11 -> pass, count as stat0
1637 * 3 tx 0x44 -> redir, count as stat2
1638 * * unknown 0x82 -> drop, count as stat3
1639 */
1640 /* Target for aborts */
1641 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
1642
1643 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
1644
1645 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
1646 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
1647
1648 /* Target for normal exits */
1649 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
1650
1651 /* if R0 > 3 jump to abort */
1652 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
1653 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
1654
1655 wrp_immed(nfp_prog, reg_b(2), 0x44112282);
1656
1657 emit_shf(nfp_prog, reg_a(1),
1658 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
1659
1660 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
1661 emit_shf(nfp_prog, reg_b(2),
1662 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
1663
1664 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
1665
1666 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
1667 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
1668 }
1669
1670 static void nfp_outro(struct nfp_prog *nfp_prog)
1671 {
1672 switch (nfp_prog->act) {
1673 case NN_ACT_DIRECT:
1674 nfp_outro_tc_da(nfp_prog);
1675 break;
1676 case NN_ACT_TC_DROP:
1677 case NN_ACT_TC_REDIR:
1678 nfp_outro_tc_legacy(nfp_prog);
1679 break;
1680 case NN_ACT_XDP:
1681 nfp_outro_xdp(nfp_prog);
1682 break;
1683 }
1684 }
1685
1686 static int nfp_translate(struct nfp_prog *nfp_prog)
1687 {
1688 struct nfp_insn_meta *meta;
1689 int i, err;
1690
1691 nfp_intro(nfp_prog);
1692 if (nfp_prog->error)
1693 return nfp_prog->error;
1694
1695 list_for_each_entry(meta, &nfp_prog->insns, l) {
1696 instr_cb_t cb = instr_cb[meta->insn.code];
1697
1698 meta->off = nfp_prog_current_offset(nfp_prog);
1699
1700 if (meta->skip) {
1701 nfp_prog->n_translated++;
1702 continue;
1703 }
1704
1705 if (nfp_meta_has_prev(nfp_prog, meta) &&
1706 nfp_meta_prev(meta)->double_cb)
1707 cb = nfp_meta_prev(meta)->double_cb;
1708 if (!cb)
1709 return -ENOENT;
1710 err = cb(nfp_prog, meta);
1711 if (err)
1712 return err;
1713
1714 nfp_prog->n_translated++;
1715 }
1716
1717 nfp_outro(nfp_prog);
1718 if (nfp_prog->error)
1719 return nfp_prog->error;
1720
1721 for (i = 0; i < NFP_USTORE_PREFETCH_WINDOW; i++)
1722 emit_nop(nfp_prog);
1723 if (nfp_prog->error)
1724 return nfp_prog->error;
1725
1726 return nfp_fixup_branches(nfp_prog);
1727 }
1728
1729 static int
1730 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
1731 unsigned int cnt)
1732 {
1733 unsigned int i;
1734
1735 for (i = 0; i < cnt; i++) {
1736 struct nfp_insn_meta *meta;
1737
1738 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
1739 if (!meta)
1740 return -ENOMEM;
1741
1742 meta->insn = prog[i];
1743 meta->n = i;
1744
1745 list_add_tail(&meta->l, &nfp_prog->insns);
1746 }
1747
1748 return 0;
1749 }
1750
1751 /* --- Optimizations --- */
1752 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
1753 {
1754 struct nfp_insn_meta *meta;
1755
1756 list_for_each_entry(meta, &nfp_prog->insns, l) {
1757 struct bpf_insn insn = meta->insn;
1758
1759 /* Programs converted from cBPF start with register xoring */
1760 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
1761 insn.src_reg == insn.dst_reg)
1762 continue;
1763
1764 /* Programs start with R6 = R1 but we ignore the skb pointer */
1765 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
1766 insn.src_reg == 1 && insn.dst_reg == 6)
1767 meta->skip = true;
1768
1769 /* Return as soon as something doesn't match */
1770 if (!meta->skip)
1771 return;
1772 }
1773 }
1774
1775 /* Remove masking after load since our load guarantees this is not needed */
1776 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
1777 {
1778 struct nfp_insn_meta *meta1, *meta2;
1779 const s32 exp_mask[] = {
1780 [BPF_B] = 0x000000ffU,
1781 [BPF_H] = 0x0000ffffU,
1782 [BPF_W] = 0xffffffffU,
1783 };
1784
1785 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
1786 struct bpf_insn insn, next;
1787
1788 insn = meta1->insn;
1789 next = meta2->insn;
1790
1791 if (BPF_CLASS(insn.code) != BPF_LD)
1792 continue;
1793 if (BPF_MODE(insn.code) != BPF_ABS &&
1794 BPF_MODE(insn.code) != BPF_IND)
1795 continue;
1796
1797 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
1798 continue;
1799
1800 if (!exp_mask[BPF_SIZE(insn.code)])
1801 continue;
1802 if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
1803 continue;
1804
1805 if (next.src_reg || next.dst_reg)
1806 continue;
1807
1808 meta2->skip = true;
1809 }
1810 }
1811
1812 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
1813 {
1814 struct nfp_insn_meta *meta1, *meta2, *meta3;
1815
1816 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
1817 struct bpf_insn insn, next1, next2;
1818
1819 insn = meta1->insn;
1820 next1 = meta2->insn;
1821 next2 = meta3->insn;
1822
1823 if (BPF_CLASS(insn.code) != BPF_LD)
1824 continue;
1825 if (BPF_MODE(insn.code) != BPF_ABS &&
1826 BPF_MODE(insn.code) != BPF_IND)
1827 continue;
1828 if (BPF_SIZE(insn.code) != BPF_W)
1829 continue;
1830
1831 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
1832 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
1833 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
1834 next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
1835 continue;
1836
1837 if (next1.src_reg || next1.dst_reg ||
1838 next2.src_reg || next2.dst_reg)
1839 continue;
1840
1841 if (next1.imm != 0x20 || next2.imm != 0x20)
1842 continue;
1843
1844 meta2->skip = true;
1845 meta3->skip = true;
1846 }
1847 }
1848
1849 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
1850 {
1851 nfp_bpf_opt_reg_init(nfp_prog);
1852
1853 nfp_bpf_opt_ld_mask(nfp_prog);
1854 nfp_bpf_opt_ld_shift(nfp_prog);
1855
1856 return 0;
1857 }
1858
1859 static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
1860 {
1861 int i;
1862
1863 for (i = 0; i < nfp_prog->prog_len; i++) {
1864 int err;
1865
1866 err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
1867 if (err)
1868 return err;
1869
1870 nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
1871
1872 ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
1873 }
1874
1875 return 0;
1876 }
1877
1878 /**
1879 * nfp_bpf_jit() - translate BPF code into NFP assembly
1880 * @filter: kernel BPF filter struct
1881 * @prog_mem: memory to store assembler instructions
1882 * @act: action attached to this eBPF program
1883 * @prog_start: offset of the first instruction when loaded
1884 * @prog_done: where to jump on exit
1885 * @prog_sz: size of @prog_mem in instructions
1886 * @res: achieved parameters of translation results
1887 */
1888 int
1889 nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
1890 enum nfp_bpf_action_type act,
1891 unsigned int prog_start, unsigned int prog_done,
1892 unsigned int prog_sz, struct nfp_bpf_result *res)
1893 {
1894 struct nfp_prog *nfp_prog;
1895 int ret;
1896
1897 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
1898 if (!nfp_prog)
1899 return -ENOMEM;
1900
1901 INIT_LIST_HEAD(&nfp_prog->insns);
1902 nfp_prog->act = act;
1903 nfp_prog->start_off = prog_start;
1904 nfp_prog->tgt_done = prog_done;
1905
1906 ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
1907 if (ret)
1908 goto out;
1909
1910 ret = nfp_prog_verify(nfp_prog, filter);
1911 if (ret)
1912 goto out;
1913
1914 ret = nfp_bpf_optimize(nfp_prog);
1915 if (ret)
1916 goto out;
1917
1918 nfp_prog->num_regs = MAX_BPF_REG;
1919 nfp_prog->regs_per_thread = 32;
1920
1921 nfp_prog->prog = prog_mem;
1922 nfp_prog->__prog_alloc_len = prog_sz;
1923
1924 ret = nfp_translate(nfp_prog);
1925 if (ret) {
1926 pr_err("Translation failed with error %d (translated: %u)\n",
1927 ret, nfp_prog->n_translated);
1928 ret = -EINVAL;
1929 goto out;
1930 }
1931
1932 ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem);
1933
1934 res->n_instr = nfp_prog->prog_len;
1935 res->dense_mode = false;
1936 out:
1937 nfp_prog_free(nfp_prog);
1938
1939 return ret;
1940 }