2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/bpf.h>
22 #include <linux/filter.h>
23 #include <linux/printk.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
27 #include <asm/byteorder.h>
28 #include <asm/cacheflush.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/set_memory.h>
34 int bpf_jit_enable __read_mostly
;
36 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
37 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
38 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
40 /* Map BPF registers to A64 registers */
41 static const int bpf2a64
[] = {
42 /* return value from in-kernel function, and exit value from eBPF */
43 [BPF_REG_0
] = A64_R(7),
44 /* arguments from eBPF program to in-kernel function */
45 [BPF_REG_1
] = A64_R(0),
46 [BPF_REG_2
] = A64_R(1),
47 [BPF_REG_3
] = A64_R(2),
48 [BPF_REG_4
] = A64_R(3),
49 [BPF_REG_5
] = A64_R(4),
50 /* callee saved registers that in-kernel function will preserve */
51 [BPF_REG_6
] = A64_R(19),
52 [BPF_REG_7
] = A64_R(20),
53 [BPF_REG_8
] = A64_R(21),
54 [BPF_REG_9
] = A64_R(22),
55 /* read-only frame pointer to access stack */
56 [BPF_REG_FP
] = A64_R(25),
57 /* temporary registers for internal BPF JIT */
58 [TMP_REG_1
] = A64_R(10),
59 [TMP_REG_2
] = A64_R(11),
61 [TCALL_CNT
] = A64_R(26),
62 /* temporary register for blinding constants */
63 [BPF_REG_AX
] = A64_R(9),
67 const struct bpf_prog
*prog
;
74 static inline void emit(const u32 insn
, struct jit_ctx
*ctx
)
76 if (ctx
->image
!= NULL
)
77 ctx
->image
[ctx
->idx
] = cpu_to_le32(insn
);
82 static inline void emit_a64_mov_i64(const int reg
, const u64 val
,
88 emit(A64_MOVZ(1, reg
, tmp
& 0xffff, shift
), ctx
);
93 emit(A64_MOVK(1, reg
, tmp
& 0xffff, shift
), ctx
);
99 static inline void emit_a64_mov_i(const int is64
, const int reg
,
100 const s32 val
, struct jit_ctx
*ctx
)
103 u16 lo
= val
& 0xffff;
107 emit(A64_MOVN(is64
, reg
, (u16
)~lo
, 0), ctx
);
109 emit(A64_MOVN(is64
, reg
, (u16
)~hi
, 16), ctx
);
110 emit(A64_MOVK(is64
, reg
, lo
, 0), ctx
);
113 emit(A64_MOVZ(is64
, reg
, lo
, 0), ctx
);
115 emit(A64_MOVK(is64
, reg
, hi
, 16), ctx
);
119 static inline int bpf2a64_offset(int bpf_to
, int bpf_from
,
120 const struct jit_ctx
*ctx
)
122 int to
= ctx
->offset
[bpf_to
];
123 /* -1 to account for the Branch instruction */
124 int from
= ctx
->offset
[bpf_from
] - 1;
129 static void jit_fill_hole(void *area
, unsigned int size
)
132 /* We are guaranteed to have aligned memory. */
133 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
134 *ptr
++ = cpu_to_le32(AARCH64_BREAK_FAULT
);
137 static inline int epilogue_offset(const struct jit_ctx
*ctx
)
139 int to
= ctx
->epilogue_offset
;
145 /* Stack must be multiples of 16B */
146 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
148 #define _STACK_SIZE \
150 + 4 /* extra for skb_copy_bits buffer */)
152 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
154 #define PROLOGUE_OFFSET 8
156 static int build_prologue(struct jit_ctx
*ctx
)
158 const u8 r6
= bpf2a64
[BPF_REG_6
];
159 const u8 r7
= bpf2a64
[BPF_REG_7
];
160 const u8 r8
= bpf2a64
[BPF_REG_8
];
161 const u8 r9
= bpf2a64
[BPF_REG_9
];
162 const u8 fp
= bpf2a64
[BPF_REG_FP
];
163 const u8 tcc
= bpf2a64
[TCALL_CNT
];
164 const int idx0
= ctx
->idx
;
168 * BPF prog stack layout
171 * original A64_SP => 0:+-----+ BPF prologue
173 * current A64_FP => -16:+-----+
174 * | ... | callee saved registers
175 * BPF fp register => -64:+-----+ <= (BPF_FP)
177 * | ... | BPF prog stack
179 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
180 * |RSVD | JIT scratchpad
181 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
183 * | ... | Function call stack
190 /* Save FP and LR registers to stay align with ARM64 AAPCS */
191 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
192 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
194 /* Save callee-saved registers */
195 emit(A64_PUSH(r6
, r7
, A64_SP
), ctx
);
196 emit(A64_PUSH(r8
, r9
, A64_SP
), ctx
);
197 emit(A64_PUSH(fp
, tcc
, A64_SP
), ctx
);
199 /* Set up BPF prog stack base register */
200 emit(A64_MOV(1, fp
, A64_SP
), ctx
);
202 /* Initialize tail_call_cnt */
203 emit(A64_MOVZ(1, tcc
, 0, 0), ctx
);
205 /* Set up function call stack */
206 emit(A64_SUB_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
208 cur_offset
= ctx
->idx
- idx0
;
209 if (cur_offset
!= PROLOGUE_OFFSET
) {
210 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
211 cur_offset
, PROLOGUE_OFFSET
);
217 static int out_offset
= -1; /* initialized on the first pass of build_body() */
218 static int emit_bpf_tail_call(struct jit_ctx
*ctx
)
220 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
221 const u8 r2
= bpf2a64
[BPF_REG_2
];
222 const u8 r3
= bpf2a64
[BPF_REG_3
];
224 const u8 tmp
= bpf2a64
[TMP_REG_1
];
225 const u8 prg
= bpf2a64
[TMP_REG_2
];
226 const u8 tcc
= bpf2a64
[TCALL_CNT
];
227 const int idx0
= ctx
->idx
;
228 #define cur_offset (ctx->idx - idx0)
229 #define jmp_offset (out_offset - (cur_offset))
232 /* if (index >= array->map.max_entries)
235 off
= offsetof(struct bpf_array
, map
.max_entries
);
236 emit_a64_mov_i64(tmp
, off
, ctx
);
237 emit(A64_LDR32(tmp
, r2
, tmp
), ctx
);
238 emit(A64_CMP(0, r3
, tmp
), ctx
);
239 emit(A64_B_(A64_COND_GE
, jmp_offset
), ctx
);
241 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
245 emit_a64_mov_i64(tmp
, MAX_TAIL_CALL_CNT
, ctx
);
246 emit(A64_CMP(1, tcc
, tmp
), ctx
);
247 emit(A64_B_(A64_COND_GT
, jmp_offset
), ctx
);
248 emit(A64_ADD_I(1, tcc
, tcc
, 1), ctx
);
250 /* prog = array->ptrs[index];
254 off
= offsetof(struct bpf_array
, ptrs
);
255 emit_a64_mov_i64(tmp
, off
, ctx
);
256 emit(A64_ADD(1, tmp
, r2
, tmp
), ctx
);
257 emit(A64_LSL(1, prg
, r3
, 3), ctx
);
258 emit(A64_LDR64(prg
, tmp
, prg
), ctx
);
259 emit(A64_CBZ(1, prg
, jmp_offset
), ctx
);
261 /* goto *(prog->bpf_func + prologue_size); */
262 off
= offsetof(struct bpf_prog
, bpf_func
);
263 emit_a64_mov_i64(tmp
, off
, ctx
);
264 emit(A64_LDR64(tmp
, prg
, tmp
), ctx
);
265 emit(A64_ADD_I(1, tmp
, tmp
, sizeof(u32
) * PROLOGUE_OFFSET
), ctx
);
266 emit(A64_BR(tmp
), ctx
);
269 if (out_offset
== -1)
270 out_offset
= cur_offset
;
271 if (cur_offset
!= out_offset
) {
272 pr_err_once("tail_call out_offset = %d, expected %d!\n",
273 cur_offset
, out_offset
);
281 static void build_epilogue(struct jit_ctx
*ctx
)
283 const u8 r0
= bpf2a64
[BPF_REG_0
];
284 const u8 r6
= bpf2a64
[BPF_REG_6
];
285 const u8 r7
= bpf2a64
[BPF_REG_7
];
286 const u8 r8
= bpf2a64
[BPF_REG_8
];
287 const u8 r9
= bpf2a64
[BPF_REG_9
];
288 const u8 fp
= bpf2a64
[BPF_REG_FP
];
290 /* We're done with BPF stack */
291 emit(A64_ADD_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
293 /* Restore fs (x25) and x26 */
294 emit(A64_POP(fp
, A64_R(26), A64_SP
), ctx
);
296 /* Restore callee-saved register */
297 emit(A64_POP(r8
, r9
, A64_SP
), ctx
);
298 emit(A64_POP(r6
, r7
, A64_SP
), ctx
);
300 /* Restore FP/LR registers */
301 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
303 /* Set return value */
304 emit(A64_MOV(1, A64_R(0), r0
), ctx
);
306 emit(A64_RET(A64_LR
), ctx
);
309 /* JITs an eBPF instruction.
311 * 0 - successfully JITed an 8-byte eBPF instruction.
312 * >0 - successfully JITed a 16-byte eBPF instruction.
313 * <0 - failed to JIT.
315 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
317 const u8 code
= insn
->code
;
318 const u8 dst
= bpf2a64
[insn
->dst_reg
];
319 const u8 src
= bpf2a64
[insn
->src_reg
];
320 const u8 tmp
= bpf2a64
[TMP_REG_1
];
321 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
322 const s16 off
= insn
->off
;
323 const s32 imm
= insn
->imm
;
324 const int i
= insn
- ctx
->prog
->insnsi
;
325 const bool is64
= BPF_CLASS(code
) == BPF_ALU64
;
326 const bool isdw
= BPF_SIZE(code
) == BPF_DW
;
330 #define check_imm(bits, imm) do { \
331 if ((((imm) > 0) && ((imm) >> (bits))) || \
332 (((imm) < 0) && (~(imm) >> (bits)))) { \
333 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
338 #define check_imm19(imm) check_imm(19, imm)
339 #define check_imm26(imm) check_imm(26, imm)
343 case BPF_ALU
| BPF_MOV
| BPF_X
:
344 case BPF_ALU64
| BPF_MOV
| BPF_X
:
345 emit(A64_MOV(is64
, dst
, src
), ctx
);
347 /* dst = dst OP src */
348 case BPF_ALU
| BPF_ADD
| BPF_X
:
349 case BPF_ALU64
| BPF_ADD
| BPF_X
:
350 emit(A64_ADD(is64
, dst
, dst
, src
), ctx
);
352 case BPF_ALU
| BPF_SUB
| BPF_X
:
353 case BPF_ALU64
| BPF_SUB
| BPF_X
:
354 emit(A64_SUB(is64
, dst
, dst
, src
), ctx
);
356 case BPF_ALU
| BPF_AND
| BPF_X
:
357 case BPF_ALU64
| BPF_AND
| BPF_X
:
358 emit(A64_AND(is64
, dst
, dst
, src
), ctx
);
360 case BPF_ALU
| BPF_OR
| BPF_X
:
361 case BPF_ALU64
| BPF_OR
| BPF_X
:
362 emit(A64_ORR(is64
, dst
, dst
, src
), ctx
);
364 case BPF_ALU
| BPF_XOR
| BPF_X
:
365 case BPF_ALU64
| BPF_XOR
| BPF_X
:
366 emit(A64_EOR(is64
, dst
, dst
, src
), ctx
);
368 case BPF_ALU
| BPF_MUL
| BPF_X
:
369 case BPF_ALU64
| BPF_MUL
| BPF_X
:
370 emit(A64_MUL(is64
, dst
, dst
, src
), ctx
);
372 case BPF_ALU
| BPF_DIV
| BPF_X
:
373 case BPF_ALU64
| BPF_DIV
| BPF_X
:
374 case BPF_ALU
| BPF_MOD
| BPF_X
:
375 case BPF_ALU64
| BPF_MOD
| BPF_X
:
377 const u8 r0
= bpf2a64
[BPF_REG_0
];
379 /* if (src == 0) return 0 */
380 jmp_offset
= 3; /* skip ahead to else path */
381 check_imm19(jmp_offset
);
382 emit(A64_CBNZ(is64
, src
, jmp_offset
), ctx
);
383 emit(A64_MOVZ(1, r0
, 0, 0), ctx
);
384 jmp_offset
= epilogue_offset(ctx
);
385 check_imm26(jmp_offset
);
386 emit(A64_B(jmp_offset
), ctx
);
388 switch (BPF_OP(code
)) {
390 emit(A64_UDIV(is64
, dst
, dst
, src
), ctx
);
393 emit(A64_UDIV(is64
, tmp
, dst
, src
), ctx
);
394 emit(A64_MUL(is64
, tmp
, tmp
, src
), ctx
);
395 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
400 case BPF_ALU
| BPF_LSH
| BPF_X
:
401 case BPF_ALU64
| BPF_LSH
| BPF_X
:
402 emit(A64_LSLV(is64
, dst
, dst
, src
), ctx
);
404 case BPF_ALU
| BPF_RSH
| BPF_X
:
405 case BPF_ALU64
| BPF_RSH
| BPF_X
:
406 emit(A64_LSRV(is64
, dst
, dst
, src
), ctx
);
408 case BPF_ALU
| BPF_ARSH
| BPF_X
:
409 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
410 emit(A64_ASRV(is64
, dst
, dst
, src
), ctx
);
413 case BPF_ALU
| BPF_NEG
:
414 case BPF_ALU64
| BPF_NEG
:
415 emit(A64_NEG(is64
, dst
, dst
), ctx
);
417 /* dst = BSWAP##imm(dst) */
418 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
419 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
420 #ifdef CONFIG_CPU_BIG_ENDIAN
421 if (BPF_SRC(code
) == BPF_FROM_BE
)
423 #else /* !CONFIG_CPU_BIG_ENDIAN */
424 if (BPF_SRC(code
) == BPF_FROM_LE
)
429 emit(A64_REV16(is64
, dst
, dst
), ctx
);
430 /* zero-extend 16 bits into 64 bits */
431 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
434 emit(A64_REV32(is64
, dst
, dst
), ctx
);
435 /* upper 32 bits already cleared */
438 emit(A64_REV64(dst
, dst
), ctx
);
445 /* zero-extend 16 bits into 64 bits */
446 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
449 /* zero-extend 32 bits into 64 bits */
450 emit(A64_UXTW(is64
, dst
, dst
), ctx
);
458 case BPF_ALU
| BPF_MOV
| BPF_K
:
459 case BPF_ALU64
| BPF_MOV
| BPF_K
:
460 emit_a64_mov_i(is64
, dst
, imm
, ctx
);
462 /* dst = dst OP imm */
463 case BPF_ALU
| BPF_ADD
| BPF_K
:
464 case BPF_ALU64
| BPF_ADD
| BPF_K
:
465 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
466 emit(A64_ADD(is64
, dst
, dst
, tmp
), ctx
);
468 case BPF_ALU
| BPF_SUB
| BPF_K
:
469 case BPF_ALU64
| BPF_SUB
| BPF_K
:
470 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
471 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
473 case BPF_ALU
| BPF_AND
| BPF_K
:
474 case BPF_ALU64
| BPF_AND
| BPF_K
:
475 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
476 emit(A64_AND(is64
, dst
, dst
, tmp
), ctx
);
478 case BPF_ALU
| BPF_OR
| BPF_K
:
479 case BPF_ALU64
| BPF_OR
| BPF_K
:
480 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
481 emit(A64_ORR(is64
, dst
, dst
, tmp
), ctx
);
483 case BPF_ALU
| BPF_XOR
| BPF_K
:
484 case BPF_ALU64
| BPF_XOR
| BPF_K
:
485 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
486 emit(A64_EOR(is64
, dst
, dst
, tmp
), ctx
);
488 case BPF_ALU
| BPF_MUL
| BPF_K
:
489 case BPF_ALU64
| BPF_MUL
| BPF_K
:
490 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
491 emit(A64_MUL(is64
, dst
, dst
, tmp
), ctx
);
493 case BPF_ALU
| BPF_DIV
| BPF_K
:
494 case BPF_ALU64
| BPF_DIV
| BPF_K
:
495 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
496 emit(A64_UDIV(is64
, dst
, dst
, tmp
), ctx
);
498 case BPF_ALU
| BPF_MOD
| BPF_K
:
499 case BPF_ALU64
| BPF_MOD
| BPF_K
:
500 emit_a64_mov_i(is64
, tmp2
, imm
, ctx
);
501 emit(A64_UDIV(is64
, tmp
, dst
, tmp2
), ctx
);
502 emit(A64_MUL(is64
, tmp
, tmp
, tmp2
), ctx
);
503 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
505 case BPF_ALU
| BPF_LSH
| BPF_K
:
506 case BPF_ALU64
| BPF_LSH
| BPF_K
:
507 emit(A64_LSL(is64
, dst
, dst
, imm
), ctx
);
509 case BPF_ALU
| BPF_RSH
| BPF_K
:
510 case BPF_ALU64
| BPF_RSH
| BPF_K
:
511 emit(A64_LSR(is64
, dst
, dst
, imm
), ctx
);
513 case BPF_ALU
| BPF_ARSH
| BPF_K
:
514 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
515 emit(A64_ASR(is64
, dst
, dst
, imm
), ctx
);
519 case BPF_JMP
| BPF_JA
:
520 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
521 check_imm26(jmp_offset
);
522 emit(A64_B(jmp_offset
), ctx
);
524 /* IF (dst COND src) JUMP off */
525 case BPF_JMP
| BPF_JEQ
| BPF_X
:
526 case BPF_JMP
| BPF_JGT
| BPF_X
:
527 case BPF_JMP
| BPF_JGE
| BPF_X
:
528 case BPF_JMP
| BPF_JNE
| BPF_X
:
529 case BPF_JMP
| BPF_JSGT
| BPF_X
:
530 case BPF_JMP
| BPF_JSGE
| BPF_X
:
531 emit(A64_CMP(1, dst
, src
), ctx
);
533 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
534 check_imm19(jmp_offset
);
535 switch (BPF_OP(code
)) {
537 jmp_cond
= A64_COND_EQ
;
540 jmp_cond
= A64_COND_HI
;
543 jmp_cond
= A64_COND_CS
;
547 jmp_cond
= A64_COND_NE
;
550 jmp_cond
= A64_COND_GT
;
553 jmp_cond
= A64_COND_GE
;
558 emit(A64_B_(jmp_cond
, jmp_offset
), ctx
);
560 case BPF_JMP
| BPF_JSET
| BPF_X
:
561 emit(A64_TST(1, dst
, src
), ctx
);
563 /* IF (dst COND imm) JUMP off */
564 case BPF_JMP
| BPF_JEQ
| BPF_K
:
565 case BPF_JMP
| BPF_JGT
| BPF_K
:
566 case BPF_JMP
| BPF_JGE
| BPF_K
:
567 case BPF_JMP
| BPF_JNE
| BPF_K
:
568 case BPF_JMP
| BPF_JSGT
| BPF_K
:
569 case BPF_JMP
| BPF_JSGE
| BPF_K
:
570 emit_a64_mov_i(1, tmp
, imm
, ctx
);
571 emit(A64_CMP(1, dst
, tmp
), ctx
);
573 case BPF_JMP
| BPF_JSET
| BPF_K
:
574 emit_a64_mov_i(1, tmp
, imm
, ctx
);
575 emit(A64_TST(1, dst
, tmp
), ctx
);
578 case BPF_JMP
| BPF_CALL
:
580 const u8 r0
= bpf2a64
[BPF_REG_0
];
581 const u64 func
= (u64
)__bpf_call_base
+ imm
;
583 emit_a64_mov_i64(tmp
, func
, ctx
);
584 emit(A64_BLR(tmp
), ctx
);
585 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
589 case BPF_JMP
| BPF_CALL
| BPF_X
:
590 if (emit_bpf_tail_call(ctx
))
593 /* function return */
594 case BPF_JMP
| BPF_EXIT
:
595 /* Optimization: when last instruction is EXIT,
596 simply fallthrough to epilogue. */
597 if (i
== ctx
->prog
->len
- 1)
599 jmp_offset
= epilogue_offset(ctx
);
600 check_imm26(jmp_offset
);
601 emit(A64_B(jmp_offset
), ctx
);
605 case BPF_LD
| BPF_IMM
| BPF_DW
:
607 const struct bpf_insn insn1
= insn
[1];
610 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
611 emit_a64_mov_i64(dst
, imm64
, ctx
);
616 /* LDX: dst = *(size *)(src + off) */
617 case BPF_LDX
| BPF_MEM
| BPF_W
:
618 case BPF_LDX
| BPF_MEM
| BPF_H
:
619 case BPF_LDX
| BPF_MEM
| BPF_B
:
620 case BPF_LDX
| BPF_MEM
| BPF_DW
:
621 emit_a64_mov_i(1, tmp
, off
, ctx
);
622 switch (BPF_SIZE(code
)) {
624 emit(A64_LDR32(dst
, src
, tmp
), ctx
);
627 emit(A64_LDRH(dst
, src
, tmp
), ctx
);
630 emit(A64_LDRB(dst
, src
, tmp
), ctx
);
633 emit(A64_LDR64(dst
, src
, tmp
), ctx
);
638 /* ST: *(size *)(dst + off) = imm */
639 case BPF_ST
| BPF_MEM
| BPF_W
:
640 case BPF_ST
| BPF_MEM
| BPF_H
:
641 case BPF_ST
| BPF_MEM
| BPF_B
:
642 case BPF_ST
| BPF_MEM
| BPF_DW
:
643 /* Load imm to a register then store it */
644 emit_a64_mov_i(1, tmp2
, off
, ctx
);
645 emit_a64_mov_i(1, tmp
, imm
, ctx
);
646 switch (BPF_SIZE(code
)) {
648 emit(A64_STR32(tmp
, dst
, tmp2
), ctx
);
651 emit(A64_STRH(tmp
, dst
, tmp2
), ctx
);
654 emit(A64_STRB(tmp
, dst
, tmp2
), ctx
);
657 emit(A64_STR64(tmp
, dst
, tmp2
), ctx
);
662 /* STX: *(size *)(dst + off) = src */
663 case BPF_STX
| BPF_MEM
| BPF_W
:
664 case BPF_STX
| BPF_MEM
| BPF_H
:
665 case BPF_STX
| BPF_MEM
| BPF_B
:
666 case BPF_STX
| BPF_MEM
| BPF_DW
:
667 emit_a64_mov_i(1, tmp
, off
, ctx
);
668 switch (BPF_SIZE(code
)) {
670 emit(A64_STR32(src
, dst
, tmp
), ctx
);
673 emit(A64_STRH(src
, dst
, tmp
), ctx
);
676 emit(A64_STRB(src
, dst
, tmp
), ctx
);
679 emit(A64_STR64(src
, dst
, tmp
), ctx
);
683 /* STX XADD: lock *(u32 *)(dst + off) += src */
684 case BPF_STX
| BPF_XADD
| BPF_W
:
685 /* STX XADD: lock *(u64 *)(dst + off) += src */
686 case BPF_STX
| BPF_XADD
| BPF_DW
:
687 emit_a64_mov_i(1, tmp
, off
, ctx
);
688 emit(A64_ADD(1, tmp
, tmp
, dst
), ctx
);
689 emit(A64_PRFM(tmp
, PST
, L1
, STRM
), ctx
);
690 emit(A64_LDXR(isdw
, tmp2
, tmp
), ctx
);
691 emit(A64_ADD(isdw
, tmp2
, tmp2
, src
), ctx
);
692 emit(A64_STXR(isdw
, tmp2
, tmp
, tmp2
), ctx
);
694 check_imm19(jmp_offset
);
695 emit(A64_CBNZ(0, tmp2
, jmp_offset
), ctx
);
698 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
699 case BPF_LD
| BPF_ABS
| BPF_W
:
700 case BPF_LD
| BPF_ABS
| BPF_H
:
701 case BPF_LD
| BPF_ABS
| BPF_B
:
702 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
703 case BPF_LD
| BPF_IND
| BPF_W
:
704 case BPF_LD
| BPF_IND
| BPF_H
:
705 case BPF_LD
| BPF_IND
| BPF_B
:
707 const u8 r0
= bpf2a64
[BPF_REG_0
]; /* r0 = return value */
708 const u8 r6
= bpf2a64
[BPF_REG_6
]; /* r6 = pointer to sk_buff */
709 const u8 fp
= bpf2a64
[BPF_REG_FP
];
710 const u8 r1
= bpf2a64
[BPF_REG_1
]; /* r1: struct sk_buff *skb */
711 const u8 r2
= bpf2a64
[BPF_REG_2
]; /* r2: int k */
712 const u8 r3
= bpf2a64
[BPF_REG_3
]; /* r3: unsigned int size */
713 const u8 r4
= bpf2a64
[BPF_REG_4
]; /* r4: void *buffer */
714 const u8 r5
= bpf2a64
[BPF_REG_5
]; /* r5: void *(*func)(...) */
717 emit(A64_MOV(1, r1
, r6
), ctx
);
718 emit_a64_mov_i(0, r2
, imm
, ctx
);
719 if (BPF_MODE(code
) == BPF_IND
)
720 emit(A64_ADD(0, r2
, r2
, src
), ctx
);
721 switch (BPF_SIZE(code
)) {
734 emit_a64_mov_i64(r3
, size
, ctx
);
735 emit(A64_SUB_I(1, r4
, fp
, STACK_SIZE
), ctx
);
736 emit_a64_mov_i64(r5
, (unsigned long)bpf_load_pointer
, ctx
);
737 emit(A64_BLR(r5
), ctx
);
738 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
740 jmp_offset
= epilogue_offset(ctx
);
741 check_imm19(jmp_offset
);
742 emit(A64_CBZ(1, r0
, jmp_offset
), ctx
);
743 emit(A64_MOV(1, r5
, r0
), ctx
);
744 switch (BPF_SIZE(code
)) {
746 emit(A64_LDR32(r0
, r5
, A64_ZR
), ctx
);
747 #ifndef CONFIG_CPU_BIG_ENDIAN
748 emit(A64_REV32(0, r0
, r0
), ctx
);
752 emit(A64_LDRH(r0
, r5
, A64_ZR
), ctx
);
753 #ifndef CONFIG_CPU_BIG_ENDIAN
754 emit(A64_REV16(0, r0
, r0
), ctx
);
758 emit(A64_LDRB(r0
, r5
, A64_ZR
), ctx
);
764 pr_err_once("unknown opcode %02x\n", code
);
771 static int build_body(struct jit_ctx
*ctx
)
773 const struct bpf_prog
*prog
= ctx
->prog
;
776 for (i
= 0; i
< prog
->len
; i
++) {
777 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
780 ret
= build_insn(insn
, ctx
);
783 if (ctx
->image
== NULL
)
784 ctx
->offset
[i
] = ctx
->idx
;
787 if (ctx
->image
== NULL
)
788 ctx
->offset
[i
] = ctx
->idx
;
796 static int validate_code(struct jit_ctx
*ctx
)
800 for (i
= 0; i
< ctx
->idx
; i
++) {
801 u32 a64_insn
= le32_to_cpu(ctx
->image
[i
]);
803 if (a64_insn
== AARCH64_BREAK_FAULT
)
810 static inline void bpf_flush_icache(void *start
, void *end
)
812 flush_icache_range((unsigned long)start
, (unsigned long)end
);
815 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
817 struct bpf_prog
*tmp
, *orig_prog
= prog
;
818 struct bpf_binary_header
*header
;
819 bool tmp_blinded
= false;
827 tmp
= bpf_jit_blind_constants(prog
);
828 /* If blinding was requested and we failed during blinding,
829 * we must fall back to the interpreter.
838 memset(&ctx
, 0, sizeof(ctx
));
841 ctx
.offset
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
842 if (ctx
.offset
== NULL
) {
847 /* 1. Initial fake pass to compute ctx->idx. */
849 /* Fake pass to fill in ctx->offset. */
850 if (build_body(&ctx
)) {
855 if (build_prologue(&ctx
)) {
860 ctx
.epilogue_offset
= ctx
.idx
;
861 build_epilogue(&ctx
);
863 /* Now we know the actual image size. */
864 image_size
= sizeof(u32
) * ctx
.idx
;
865 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
866 sizeof(u32
), jit_fill_hole
);
867 if (header
== NULL
) {
872 /* 2. Now, the actual pass. */
874 ctx
.image
= (u32
*)image_ptr
;
877 build_prologue(&ctx
);
879 if (build_body(&ctx
)) {
880 bpf_jit_binary_free(header
);
885 build_epilogue(&ctx
);
887 /* 3. Extra pass to validate JITed code. */
888 if (validate_code(&ctx
)) {
889 bpf_jit_binary_free(header
);
894 /* And we're done. */
895 if (bpf_jit_enable
> 1)
896 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.image
);
898 bpf_flush_icache(header
, ctx
.image
+ ctx
.idx
);
900 bpf_jit_binary_lock_ro(header
);
901 prog
->bpf_func
= (void *)ctx
.image
;
908 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?