2 * Just-In-Time compiler for eBPF filters on 32bit ARM
4 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
5 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
12 #include <linux/bpf.h>
13 #include <linux/bitops.h>
14 #include <linux/compiler.h>
15 #include <linux/errno.h>
16 #include <linux/filter.h>
17 #include <linux/netdevice.h>
18 #include <linux/string.h>
19 #include <linux/slab.h>
20 #include <linux/if_vlan.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
24 #include <asm/opcodes.h>
26 #include "bpf_jit_32.h"
28 int bpf_jit_enable __read_mostly
;
31 * eBPF prog stack layout:
34 * original ARM_SP => +-----+
35 * | | callee saved registers
36 * +-----+ <= (BPF_FP + SCRATCH_SIZE)
37 * | ... | eBPF JIT scratch space
38 * eBPF fp register => +-----+
39 * (BPF_FP) | ... | eBPF prog stack
41 * |RSVD | JIT scratchpad
42 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
44 * | ... | Function call stack
49 * The callee saved registers depends on whether frame pointers are enabled.
50 * With frame pointers (to be compliant with the ABI):
53 * original ARM_SP => +------------------+ \
55 * current ARM_FP => +------------------+ } callee saved registers
56 * |r4-r8,r10,fp,ip,lr| |
57 * +------------------+ /
60 * Without frame pointers:
63 * original ARM_SP => +------------------+
64 * | r4-r8,r10,fp,lr | callee saved registers
65 * current ARM_FP => +------------------+
68 * When popping registers off the stack at the end of a BPF function, we
69 * reference them via the current ARM_FP register.
71 #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
72 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
74 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
75 #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
77 #define STACK_OFFSET(k) (k)
78 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
79 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
80 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
82 #define FLAG_IMM_OVERFLOW (1 << 0)
85 * Map eBPF registers to ARM 32bit registers or stack scratch space.
87 * 1. First argument is passed using the arm 32bit registers and rest of the
88 * arguments are passed on stack scratch space.
89 * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest
90 * arguments are mapped to scratch space on stack.
91 * 3. We need two 64 bit temp registers to do complex operations on eBPF
94 * As the eBPF registers are all 64 bit registers and arm has only 32 bit
95 * registers, we have to map each eBPF registers with two arm 32 bit regs or
96 * scratch memory space and we have to build eBPF 64 bit register from those.
99 static const u8 bpf2a32
[][2] = {
100 /* return value from in-kernel function, and exit value from eBPF */
101 [BPF_REG_0
] = {ARM_R1
, ARM_R0
},
102 /* arguments from eBPF program to in-kernel function */
103 [BPF_REG_1
] = {ARM_R3
, ARM_R2
},
104 /* Stored on stack scratch space */
105 [BPF_REG_2
] = {STACK_OFFSET(0), STACK_OFFSET(4)},
106 [BPF_REG_3
] = {STACK_OFFSET(8), STACK_OFFSET(12)},
107 [BPF_REG_4
] = {STACK_OFFSET(16), STACK_OFFSET(20)},
108 [BPF_REG_5
] = {STACK_OFFSET(24), STACK_OFFSET(28)},
109 /* callee saved registers that in-kernel function will preserve */
110 [BPF_REG_6
] = {ARM_R5
, ARM_R4
},
111 /* Stored on stack scratch space */
112 [BPF_REG_7
] = {STACK_OFFSET(32), STACK_OFFSET(36)},
113 [BPF_REG_8
] = {STACK_OFFSET(40), STACK_OFFSET(44)},
114 [BPF_REG_9
] = {STACK_OFFSET(48), STACK_OFFSET(52)},
115 /* Read only Frame Pointer to access Stack */
116 [BPF_REG_FP
] = {STACK_OFFSET(56), STACK_OFFSET(60)},
117 /* Temporary Register for internal BPF JIT, can be used
118 * for constant blindings and others.
120 [TMP_REG_1
] = {ARM_R7
, ARM_R6
},
121 [TMP_REG_2
] = {ARM_R10
, ARM_R8
},
122 /* Tail call count. Stored on stack scratch space. */
123 [TCALL_CNT
] = {STACK_OFFSET(64), STACK_OFFSET(68)},
124 /* temporary register for blinding constants.
125 * Stored on stack scratch space.
127 [BPF_REG_AX
] = {STACK_OFFSET(72), STACK_OFFSET(76)},
130 #define dst_lo dst[1]
131 #define dst_hi dst[0]
132 #define src_lo src[1]
133 #define src_hi src[0]
139 * idx : index of current last JITed instruction.
140 * prologue_bytes : bytes used in prologue.
141 * epilogue_offset : offset of epilogue starting.
142 * offsets : array of eBPF instruction offsets in
144 * target : final JITed code.
145 * epilogue_bytes : no of bytes used in epilogue.
146 * imm_count : no of immediate counts used for global
148 * imms : array of global variable addresses.
152 const struct bpf_prog
*prog
;
154 unsigned int prologue_bytes
;
155 unsigned int epilogue_offset
;
160 #if __LINUX_ARM_ARCH__ < 7
168 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
169 * (where the assembly routines like __aeabi_uidiv could cause problems).
171 static u32
jit_udiv32(u32 dividend
, u32 divisor
)
173 return dividend
/ divisor
;
176 static u32
jit_mod32(u32 dividend
, u32 divisor
)
178 return dividend
% divisor
;
181 static inline void _emit(int cond
, u32 inst
, struct jit_ctx
*ctx
)
183 inst
|= (cond
<< 28);
184 inst
= __opcode_to_mem_arm(inst
);
186 if (ctx
->target
!= NULL
)
187 ctx
->target
[ctx
->idx
] = inst
;
193 * Emit an instruction that will be executed unconditionally.
195 static inline void emit(u32 inst
, struct jit_ctx
*ctx
)
197 _emit(ARM_COND_AL
, inst
, ctx
);
201 * Checks if immediate value can be converted to imm12(12 bits) value.
203 static int16_t imm8m(u32 x
)
207 for (rot
= 0; rot
< 16; rot
++)
208 if ((x
& ~ror32(0xff, 2 * rot
)) == 0)
209 return rol32(x
, 2 * rot
) | (rot
<< 8);
214 * Initializes the JIT space with undefined instructions.
216 static void jit_fill_hole(void *area
, unsigned int size
)
219 /* We are guaranteed to have aligned memory. */
220 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
221 *ptr
++ = __opcode_to_mem_arm(ARM_INST_UDF
);
224 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
225 /* EABI requires the stack to be aligned to 64-bit boundaries */
226 #define STACK_ALIGNMENT 8
228 /* Stack must be aligned to 32-bit boundaries */
229 #define STACK_ALIGNMENT 4
232 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
233 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
234 * BPF_REG_FP and Tail call counts.
236 #define SCRATCH_SIZE 80
238 /* total stack size used in JITed code */
239 #define _STACK_SIZE \
240 (ctx->prog->aux->stack_depth + \
242 + 4 /* extra for skb_copy_bits buffer */)
244 #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
246 /* Get the offset of eBPF REGISTERs stored on scratch space. */
247 #define STACK_VAR(off) (STACK_SIZE-off-4)
249 /* Offset of skb_copy_bits buffer */
250 #define SKB_BUFFER STACK_VAR(SCRATCH_SIZE)
252 #if __LINUX_ARM_ARCH__ < 7
254 static u16
imm_offset(u32 k
, struct jit_ctx
*ctx
)
256 unsigned int i
= 0, offset
;
259 /* on the "fake" run we just count them (duplicates included) */
260 if (ctx
->target
== NULL
) {
265 while ((i
< ctx
->imm_count
) && ctx
->imms
[i
]) {
266 if (ctx
->imms
[i
] == k
)
271 if (ctx
->imms
[i
] == 0)
274 /* constants go just after the epilogue */
275 offset
= ctx
->offsets
[ctx
->prog
->len
- 1] * 4;
276 offset
+= ctx
->prologue_bytes
;
277 offset
+= ctx
->epilogue_bytes
;
280 ctx
->target
[offset
/ 4] = k
;
282 /* PC in ARM mode == address of the instruction + 8 */
283 imm
= offset
- (8 + ctx
->idx
* 4);
287 * literal pool is too far, signal it into flags. we
288 * can only detect it on the second pass unfortunately.
290 ctx
->flags
|= FLAG_IMM_OVERFLOW
;
297 #endif /* __LINUX_ARM_ARCH__ */
299 static inline int bpf2a32_offset(int bpf_to
, int bpf_from
,
300 const struct jit_ctx
*ctx
) {
303 if (ctx
->target
== NULL
)
305 to
= ctx
->offsets
[bpf_to
];
306 from
= ctx
->offsets
[bpf_from
];
308 return to
- from
- 1;
312 * Move an immediate that's not an imm8m to a core register.
314 static inline void emit_mov_i_no8m(const u8 rd
, u32 val
, struct jit_ctx
*ctx
)
316 #if __LINUX_ARM_ARCH__ < 7
317 emit(ARM_LDR_I(rd
, ARM_PC
, imm_offset(val
, ctx
)), ctx
);
319 emit(ARM_MOVW(rd
, val
& 0xffff), ctx
);
321 emit(ARM_MOVT(rd
, val
>> 16), ctx
);
325 static inline void emit_mov_i(const u8 rd
, u32 val
, struct jit_ctx
*ctx
)
327 int imm12
= imm8m(val
);
330 emit(ARM_MOV_I(rd
, imm12
), ctx
);
332 emit_mov_i_no8m(rd
, val
, ctx
);
335 static void emit_bx_r(u8 tgt_reg
, struct jit_ctx
*ctx
)
337 if (elf_hwcap
& HWCAP_THUMB
)
338 emit(ARM_BX(tgt_reg
), ctx
);
340 emit(ARM_MOV_R(ARM_PC
, tgt_reg
), ctx
);
343 static inline void emit_blx_r(u8 tgt_reg
, struct jit_ctx
*ctx
)
345 #if __LINUX_ARM_ARCH__ < 5
346 emit(ARM_MOV_R(ARM_LR
, ARM_PC
), ctx
);
347 emit_bx_r(tgt_reg
, ctx
);
349 emit(ARM_BLX_R(tgt_reg
), ctx
);
353 static inline int epilogue_offset(const struct jit_ctx
*ctx
)
356 /* No need for 1st dummy run */
357 if (ctx
->target
== NULL
)
359 to
= ctx
->epilogue_offset
;
362 return to
- from
- 2;
365 static inline void emit_udivmod(u8 rd
, u8 rm
, u8 rn
, struct jit_ctx
*ctx
, u8 op
)
367 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
370 /* checks if divisor is zero or not. If it is, then
373 emit(ARM_CMP_I(rn
, 0), ctx
);
374 _emit(ARM_COND_EQ
, ARM_MOV_I(ARM_R0
, 0), ctx
);
375 jmp_offset
= epilogue_offset(ctx
);
376 _emit(ARM_COND_EQ
, ARM_B(jmp_offset
), ctx
);
377 #if __LINUX_ARM_ARCH__ == 7
378 if (elf_hwcap
& HWCAP_IDIVA
) {
380 emit(ARM_UDIV(rd
, rm
, rn
), ctx
);
382 emit(ARM_UDIV(ARM_IP
, rm
, rn
), ctx
);
383 emit(ARM_MLS(rd
, rn
, ARM_IP
, rm
), ctx
);
390 * For BPF_ALU | BPF_DIV | BPF_K instructions
391 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
392 * function, we need to save it on caller side to save
393 * it from getting destroyed within callee.
394 * After the return from the callee, we restore ARM_R0
398 emit(ARM_MOV_R(tmp
[0], ARM_R1
), ctx
);
399 emit(ARM_MOV_R(ARM_R1
, rn
), ctx
);
402 emit(ARM_MOV_R(tmp
[1], ARM_R0
), ctx
);
403 emit(ARM_MOV_R(ARM_R0
, rm
), ctx
);
406 /* Call appropriate function */
407 emit_mov_i(ARM_IP
, op
== BPF_DIV
?
408 (u32
)jit_udiv32
: (u32
)jit_mod32
, ctx
);
409 emit_blx_r(ARM_IP
, ctx
);
411 /* Save return value */
413 emit(ARM_MOV_R(rd
, ARM_R0
), ctx
);
415 /* Restore ARM_R0 and ARM_R1 */
417 emit(ARM_MOV_R(ARM_R1
, tmp
[0]), ctx
);
419 emit(ARM_MOV_R(ARM_R0
, tmp
[1]), ctx
);
422 /* Checks whether BPF register is on scratch stack space or not. */
423 static inline bool is_on_stack(u8 bpf_reg
)
425 static u8 stack_regs
[] = {BPF_REG_AX
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
,
426 BPF_REG_7
, BPF_REG_8
, BPF_REG_9
, TCALL_CNT
,
427 BPF_REG_2
, BPF_REG_FP
};
428 int i
, reg_len
= sizeof(stack_regs
);
430 for (i
= 0 ; i
< reg_len
; i
++) {
431 if (bpf_reg
== stack_regs
[i
])
437 static inline void emit_a32_mov_i(const u8 dst
, const u32 val
,
438 bool dstk
, struct jit_ctx
*ctx
)
440 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
443 emit_mov_i(tmp
[1], val
, ctx
);
444 emit(ARM_STR_I(tmp
[1], ARM_SP
, STACK_VAR(dst
)), ctx
);
446 emit_mov_i(dst
, val
, ctx
);
450 /* Sign extended move */
451 static inline void emit_a32_mov_i64(const bool is64
, const u8 dst
[],
452 const u32 val
, bool dstk
,
453 struct jit_ctx
*ctx
) {
456 if (is64
&& (val
& (1<<31)))
458 emit_a32_mov_i(dst_lo
, val
, dstk
, ctx
);
459 emit_a32_mov_i(dst_hi
, hi
, dstk
, ctx
);
462 static inline void emit_a32_add_r(const u8 dst
, const u8 src
,
463 const bool is64
, const bool hi
,
464 struct jit_ctx
*ctx
) {
466 * adds dst_lo, dst_lo, src_lo
467 * adc dst_hi, dst_hi, src_hi
469 * add dst_lo, dst_lo, src_lo
472 emit(ARM_ADDS_R(dst
, dst
, src
), ctx
);
474 emit(ARM_ADC_R(dst
, dst
, src
), ctx
);
476 emit(ARM_ADD_R(dst
, dst
, src
), ctx
);
479 static inline void emit_a32_sub_r(const u8 dst
, const u8 src
,
480 const bool is64
, const bool hi
,
481 struct jit_ctx
*ctx
) {
483 * subs dst_lo, dst_lo, src_lo
484 * sbc dst_hi, dst_hi, src_hi
486 * sub dst_lo, dst_lo, src_lo
489 emit(ARM_SUBS_R(dst
, dst
, src
), ctx
);
491 emit(ARM_SBC_R(dst
, dst
, src
), ctx
);
493 emit(ARM_SUB_R(dst
, dst
, src
), ctx
);
496 static inline void emit_alu_r(const u8 dst
, const u8 src
, const bool is64
,
497 const bool hi
, const u8 op
, struct jit_ctx
*ctx
){
498 switch (BPF_OP(op
)) {
499 /* dst = dst + src */
501 emit_a32_add_r(dst
, src
, is64
, hi
, ctx
);
503 /* dst = dst - src */
505 emit_a32_sub_r(dst
, src
, is64
, hi
, ctx
);
507 /* dst = dst | src */
509 emit(ARM_ORR_R(dst
, dst
, src
), ctx
);
511 /* dst = dst & src */
513 emit(ARM_AND_R(dst
, dst
, src
), ctx
);
515 /* dst = dst ^ src */
517 emit(ARM_EOR_R(dst
, dst
, src
), ctx
);
519 /* dst = dst * src */
521 emit(ARM_MUL(dst
, dst
, src
), ctx
);
523 /* dst = dst << src */
525 emit(ARM_LSL_R(dst
, dst
, src
), ctx
);
527 /* dst = dst >> src */
529 emit(ARM_LSR_R(dst
, dst
, src
), ctx
);
531 /* dst = dst >> src (signed)*/
533 emit(ARM_MOV_SR(dst
, dst
, SRTYPE_ASR
, src
), ctx
);
538 /* ALU operation (32 bit)
541 static inline void emit_a32_alu_r(const u8 dst
, const u8 src
,
542 bool dstk
, bool sstk
,
543 struct jit_ctx
*ctx
, const bool is64
,
544 const bool hi
, const u8 op
) {
545 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
546 u8 rn
= sstk
? tmp
[1] : src
;
549 emit(ARM_LDR_I(rn
, ARM_SP
, STACK_VAR(src
)), ctx
);
553 emit(ARM_LDR_I(tmp
[0], ARM_SP
, STACK_VAR(dst
)), ctx
);
554 emit_alu_r(tmp
[0], rn
, is64
, hi
, op
, ctx
);
555 emit(ARM_STR_I(tmp
[0], ARM_SP
, STACK_VAR(dst
)), ctx
);
557 emit_alu_r(dst
, rn
, is64
, hi
, op
, ctx
);
561 /* ALU operation (64 bit) */
562 static inline void emit_a32_alu_r64(const bool is64
, const u8 dst
[],
563 const u8 src
[], bool dstk
,
564 bool sstk
, struct jit_ctx
*ctx
,
566 emit_a32_alu_r(dst_lo
, src_lo
, dstk
, sstk
, ctx
, is64
, false, op
);
568 emit_a32_alu_r(dst_hi
, src_hi
, dstk
, sstk
, ctx
, is64
, true, op
);
570 emit_a32_mov_i(dst_hi
, 0, dstk
, ctx
);
573 /* dst = imm (4 bytes)*/
574 static inline void emit_a32_mov_r(const u8 dst
, const u8 src
,
575 bool dstk
, bool sstk
,
576 struct jit_ctx
*ctx
) {
577 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
578 u8 rt
= sstk
? tmp
[0] : src
;
581 emit(ARM_LDR_I(tmp
[0], ARM_SP
, STACK_VAR(src
)), ctx
);
583 emit(ARM_STR_I(rt
, ARM_SP
, STACK_VAR(dst
)), ctx
);
585 emit(ARM_MOV_R(dst
, rt
), ctx
);
589 static inline void emit_a32_mov_r64(const bool is64
, const u8 dst
[],
590 const u8 src
[], bool dstk
,
591 bool sstk
, struct jit_ctx
*ctx
) {
592 emit_a32_mov_r(dst_lo
, src_lo
, dstk
, sstk
, ctx
);
594 /* complete 8 byte move */
595 emit_a32_mov_r(dst_hi
, src_hi
, dstk
, sstk
, ctx
);
597 /* Zero out high 4 bytes */
598 emit_a32_mov_i(dst_hi
, 0, dstk
, ctx
);
602 /* Shift operations */
603 static inline void emit_a32_alu_i(const u8 dst
, const u32 val
, bool dstk
,
604 struct jit_ctx
*ctx
, const u8 op
) {
605 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
606 u8 rd
= dstk
? tmp
[0] : dst
;
609 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst
)), ctx
);
611 /* Do shift operation */
614 emit(ARM_LSL_I(rd
, rd
, val
), ctx
);
617 emit(ARM_LSR_I(rd
, rd
, val
), ctx
);
620 emit(ARM_RSB_I(rd
, rd
, val
), ctx
);
625 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst
)), ctx
);
628 /* dst = ~dst (64 bit) */
629 static inline void emit_a32_neg64(const u8 dst
[], bool dstk
,
630 struct jit_ctx
*ctx
){
631 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
632 u8 rd
= dstk
? tmp
[1] : dst
[1];
633 u8 rm
= dstk
? tmp
[0] : dst
[0];
637 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
638 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
641 /* Do Negate Operation */
642 emit(ARM_RSBS_I(rd
, rd
, 0), ctx
);
643 emit(ARM_RSC_I(rm
, rm
, 0), ctx
);
646 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
647 emit(ARM_STR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
651 /* dst = dst << src */
652 static inline void emit_a32_lsh_r64(const u8 dst
[], const u8 src
[], bool dstk
,
653 bool sstk
, struct jit_ctx
*ctx
) {
654 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
655 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
658 u8 rt
= sstk
? tmp2
[1] : src_lo
;
659 u8 rd
= dstk
? tmp
[1] : dst_lo
;
660 u8 rm
= dstk
? tmp
[0] : dst_hi
;
663 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
665 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
666 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
669 /* Do LSH operation */
670 emit(ARM_SUB_I(ARM_IP
, rt
, 32), ctx
);
671 emit(ARM_RSB_I(tmp2
[0], rt
, 32), ctx
);
672 emit(ARM_MOV_SR(ARM_LR
, rm
, SRTYPE_ASL
, rt
), ctx
);
673 emit(ARM_ORR_SR(ARM_LR
, ARM_LR
, rd
, SRTYPE_ASL
, ARM_IP
), ctx
);
674 emit(ARM_ORR_SR(ARM_IP
, ARM_LR
, rd
, SRTYPE_LSR
, tmp2
[0]), ctx
);
675 emit(ARM_MOV_SR(ARM_LR
, rd
, SRTYPE_ASL
, rt
), ctx
);
678 emit(ARM_STR_I(ARM_LR
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
679 emit(ARM_STR_I(ARM_IP
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
681 emit(ARM_MOV_R(rd
, ARM_LR
), ctx
);
682 emit(ARM_MOV_R(rm
, ARM_IP
), ctx
);
686 /* dst = dst >> src (signed)*/
687 static inline void emit_a32_arsh_r64(const u8 dst
[], const u8 src
[], bool dstk
,
688 bool sstk
, struct jit_ctx
*ctx
) {
689 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
690 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
692 u8 rt
= sstk
? tmp2
[1] : src_lo
;
693 u8 rd
= dstk
? tmp
[1] : dst_lo
;
694 u8 rm
= dstk
? tmp
[0] : dst_hi
;
697 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
699 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
700 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
703 /* Do the ARSH operation */
704 emit(ARM_RSB_I(ARM_IP
, rt
, 32), ctx
);
705 emit(ARM_SUBS_I(tmp2
[0], rt
, 32), ctx
);
706 emit(ARM_MOV_SR(ARM_LR
, rd
, SRTYPE_LSR
, rt
), ctx
);
707 emit(ARM_ORR_SR(ARM_LR
, ARM_LR
, rm
, SRTYPE_ASL
, ARM_IP
), ctx
);
708 _emit(ARM_COND_MI
, ARM_B(0), ctx
);
709 emit(ARM_ORR_SR(ARM_LR
, ARM_LR
, rm
, SRTYPE_ASR
, tmp2
[0]), ctx
);
710 emit(ARM_MOV_SR(ARM_IP
, rm
, SRTYPE_ASR
, rt
), ctx
);
712 emit(ARM_STR_I(ARM_LR
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
713 emit(ARM_STR_I(ARM_IP
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
715 emit(ARM_MOV_R(rd
, ARM_LR
), ctx
);
716 emit(ARM_MOV_R(rm
, ARM_IP
), ctx
);
720 /* dst = dst >> src */
721 static inline void emit_a32_lsr_r64(const u8 dst
[], const u8 src
[], bool dstk
,
722 bool sstk
, struct jit_ctx
*ctx
) {
723 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
724 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
726 u8 rt
= sstk
? tmp2
[1] : src_lo
;
727 u8 rd
= dstk
? tmp
[1] : dst_lo
;
728 u8 rm
= dstk
? tmp
[0] : dst_hi
;
731 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
733 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
734 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
737 /* Do LSH operation */
738 emit(ARM_RSB_I(ARM_IP
, rt
, 32), ctx
);
739 emit(ARM_SUBS_I(tmp2
[0], rt
, 32), ctx
);
740 emit(ARM_MOV_SR(ARM_LR
, rd
, SRTYPE_LSR
, rt
), ctx
);
741 emit(ARM_ORR_SR(ARM_LR
, ARM_LR
, rm
, SRTYPE_ASL
, ARM_IP
), ctx
);
742 emit(ARM_ORR_SR(ARM_LR
, ARM_LR
, rm
, SRTYPE_LSR
, tmp2
[0]), ctx
);
743 emit(ARM_MOV_SR(ARM_IP
, rm
, SRTYPE_LSR
, rt
), ctx
);
745 emit(ARM_STR_I(ARM_LR
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
746 emit(ARM_STR_I(ARM_IP
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
748 emit(ARM_MOV_R(rd
, ARM_LR
), ctx
);
749 emit(ARM_MOV_R(rm
, ARM_IP
), ctx
);
753 /* dst = dst << val */
754 static inline void emit_a32_lsh_i64(const u8 dst
[], bool dstk
,
755 const u32 val
, struct jit_ctx
*ctx
){
756 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
757 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
759 u8 rd
= dstk
? tmp
[1] : dst_lo
;
760 u8 rm
= dstk
? tmp
[0] : dst_hi
;
763 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
764 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
767 /* Do LSH operation */
769 emit(ARM_MOV_SI(tmp2
[0], rm
, SRTYPE_ASL
, val
), ctx
);
770 emit(ARM_ORR_SI(rm
, tmp2
[0], rd
, SRTYPE_LSR
, 32 - val
), ctx
);
771 emit(ARM_MOV_SI(rd
, rd
, SRTYPE_ASL
, val
), ctx
);
774 emit(ARM_MOV_R(rm
, rd
), ctx
);
776 emit(ARM_MOV_SI(rm
, rd
, SRTYPE_ASL
, val
- 32), ctx
);
777 emit(ARM_EOR_R(rd
, rd
, rd
), ctx
);
781 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
782 emit(ARM_STR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
786 /* dst = dst >> val */
787 static inline void emit_a32_lsr_i64(const u8 dst
[], bool dstk
,
788 const u32 val
, struct jit_ctx
*ctx
) {
789 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
790 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
792 u8 rd
= dstk
? tmp
[1] : dst_lo
;
793 u8 rm
= dstk
? tmp
[0] : dst_hi
;
796 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
797 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
800 /* Do LSR operation */
802 emit(ARM_MOV_SI(tmp2
[1], rd
, SRTYPE_LSR
, val
), ctx
);
803 emit(ARM_ORR_SI(rd
, tmp2
[1], rm
, SRTYPE_ASL
, 32 - val
), ctx
);
804 emit(ARM_MOV_SI(rm
, rm
, SRTYPE_LSR
, val
), ctx
);
805 } else if (val
== 32) {
806 emit(ARM_MOV_R(rd
, rm
), ctx
);
807 emit(ARM_MOV_I(rm
, 0), ctx
);
809 emit(ARM_MOV_SI(rd
, rm
, SRTYPE_LSR
, val
- 32), ctx
);
810 emit(ARM_MOV_I(rm
, 0), ctx
);
814 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
815 emit(ARM_STR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
819 /* dst = dst >> val (signed) */
820 static inline void emit_a32_arsh_i64(const u8 dst
[], bool dstk
,
821 const u32 val
, struct jit_ctx
*ctx
){
822 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
823 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
825 u8 rd
= dstk
? tmp
[1] : dst_lo
;
826 u8 rm
= dstk
? tmp
[0] : dst_hi
;
829 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
830 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
833 /* Do ARSH operation */
835 emit(ARM_MOV_SI(tmp2
[1], rd
, SRTYPE_LSR
, val
), ctx
);
836 emit(ARM_ORR_SI(rd
, tmp2
[1], rm
, SRTYPE_ASL
, 32 - val
), ctx
);
837 emit(ARM_MOV_SI(rm
, rm
, SRTYPE_ASR
, val
), ctx
);
838 } else if (val
== 32) {
839 emit(ARM_MOV_R(rd
, rm
), ctx
);
840 emit(ARM_MOV_SI(rm
, rm
, SRTYPE_ASR
, 31), ctx
);
842 emit(ARM_MOV_SI(rd
, rm
, SRTYPE_ASR
, val
- 32), ctx
);
843 emit(ARM_MOV_SI(rm
, rm
, SRTYPE_ASR
, 31), ctx
);
847 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
848 emit(ARM_STR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
852 static inline void emit_a32_mul_r64(const u8 dst
[], const u8 src
[], bool dstk
,
853 bool sstk
, struct jit_ctx
*ctx
) {
854 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
855 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
856 /* Setup operands for multiplication */
857 u8 rd
= dstk
? tmp
[1] : dst_lo
;
858 u8 rm
= dstk
? tmp
[0] : dst_hi
;
859 u8 rt
= sstk
? tmp2
[1] : src_lo
;
860 u8 rn
= sstk
? tmp2
[0] : src_hi
;
863 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
864 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
867 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
868 emit(ARM_LDR_I(rn
, ARM_SP
, STACK_VAR(src_hi
)), ctx
);
871 /* Do Multiplication */
872 emit(ARM_MUL(ARM_IP
, rd
, rn
), ctx
);
873 emit(ARM_MUL(ARM_LR
, rm
, rt
), ctx
);
874 emit(ARM_ADD_R(ARM_LR
, ARM_IP
, ARM_LR
), ctx
);
876 emit(ARM_UMULL(ARM_IP
, rm
, rd
, rt
), ctx
);
877 emit(ARM_ADD_R(rm
, ARM_LR
, rm
), ctx
);
879 emit(ARM_STR_I(ARM_IP
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
880 emit(ARM_STR_I(rm
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
882 emit(ARM_MOV_R(rd
, ARM_IP
), ctx
);
886 /* *(size *)(dst + off) = src */
887 static inline void emit_str_r(const u8 dst
, const u8 src
, bool dstk
,
888 const s32 off
, struct jit_ctx
*ctx
, const u8 sz
){
889 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
890 u8 rd
= dstk
? tmp
[1] : dst
;
893 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst
)), ctx
);
895 emit_a32_mov_i(tmp
[0], off
, false, ctx
);
896 emit(ARM_ADD_R(tmp
[0], rd
, tmp
[0]), ctx
);
902 emit(ARM_STR_I(src
, rd
, 0), ctx
);
905 /* Store a HalfWord */
906 emit(ARM_STRH_I(src
, rd
, 0), ctx
);
910 emit(ARM_STRB_I(src
, rd
, 0), ctx
);
915 /* dst = *(size*)(src + off) */
916 static inline void emit_ldx_r(const u8 dst
, const u8 src
, bool dstk
,
917 const s32 off
, struct jit_ctx
*ctx
, const u8 sz
){
918 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
919 u8 rd
= dstk
? tmp
[1] : dst
;
923 emit_a32_mov_i(tmp
[0], off
, false, ctx
);
924 emit(ARM_ADD_R(tmp
[0], tmp
[0], src
), ctx
);
930 emit(ARM_LDR_I(rd
, rm
, 0), ctx
);
933 /* Load a HalfWord */
934 emit(ARM_LDRH_I(rd
, rm
, 0), ctx
);
938 emit(ARM_LDRB_I(rd
, rm
, 0), ctx
);
942 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst
)), ctx
);
945 /* Arithmatic Operation */
946 static inline void emit_ar_r(const u8 rd
, const u8 rt
, const u8 rm
,
947 const u8 rn
, struct jit_ctx
*ctx
, u8 op
) {
950 emit(ARM_AND_R(ARM_IP
, rt
, rn
), ctx
);
951 emit(ARM_AND_R(ARM_LR
, rd
, rm
), ctx
);
952 emit(ARM_ORRS_R(ARM_IP
, ARM_LR
, ARM_IP
), ctx
);
960 emit(ARM_CMP_R(rd
, rm
), ctx
);
961 _emit(ARM_COND_EQ
, ARM_CMP_R(rt
, rn
), ctx
);
965 emit(ARM_CMP_R(rn
, rt
), ctx
);
966 emit(ARM_SBCS_R(ARM_IP
, rm
, rd
), ctx
);
970 emit(ARM_CMP_R(rt
, rn
), ctx
);
971 emit(ARM_SBCS_R(ARM_IP
, rd
, rm
), ctx
);
976 static int out_offset
= -1; /* initialized on the first pass of build_body() */
977 static int emit_bpf_tail_call(struct jit_ctx
*ctx
)
980 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
981 const u8
*r2
= bpf2a32
[BPF_REG_2
];
982 const u8
*r3
= bpf2a32
[BPF_REG_3
];
983 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
984 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
985 const u8
*tcc
= bpf2a32
[TCALL_CNT
];
986 const int idx0
= ctx
->idx
;
987 #define cur_offset (ctx->idx - idx0)
988 #define jmp_offset (out_offset - (cur_offset) - 2)
991 /* if (index >= array->map.max_entries)
994 off
= offsetof(struct bpf_array
, map
.max_entries
);
995 /* array->map.max_entries */
996 emit_a32_mov_i(tmp
[1], off
, false, ctx
);
997 emit(ARM_LDR_I(tmp2
[1], ARM_SP
, STACK_VAR(r2
[1])), ctx
);
998 emit(ARM_LDR_R(tmp
[1], tmp2
[1], tmp
[1]), ctx
);
1000 emit(ARM_LDR_I(tmp2
[1], ARM_SP
, STACK_VAR(r3
[1])), ctx
);
1001 /* index >= array->map.max_entries */
1002 emit(ARM_CMP_R(tmp2
[1], tmp
[1]), ctx
);
1003 _emit(ARM_COND_CS
, ARM_B(jmp_offset
), ctx
);
1005 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1009 lo
= (u32
)MAX_TAIL_CALL_CNT
;
1010 hi
= (u32
)((u64
)MAX_TAIL_CALL_CNT
>> 32);
1011 emit(ARM_LDR_I(tmp
[1], ARM_SP
, STACK_VAR(tcc
[1])), ctx
);
1012 emit(ARM_LDR_I(tmp
[0], ARM_SP
, STACK_VAR(tcc
[0])), ctx
);
1013 emit(ARM_CMP_I(tmp
[0], hi
), ctx
);
1014 _emit(ARM_COND_EQ
, ARM_CMP_I(tmp
[1], lo
), ctx
);
1015 _emit(ARM_COND_HI
, ARM_B(jmp_offset
), ctx
);
1016 emit(ARM_ADDS_I(tmp
[1], tmp
[1], 1), ctx
);
1017 emit(ARM_ADC_I(tmp
[0], tmp
[0], 0), ctx
);
1018 emit(ARM_STR_I(tmp
[1], ARM_SP
, STACK_VAR(tcc
[1])), ctx
);
1019 emit(ARM_STR_I(tmp
[0], ARM_SP
, STACK_VAR(tcc
[0])), ctx
);
1021 /* prog = array->ptrs[index]
1025 off
= offsetof(struct bpf_array
, ptrs
);
1026 emit_a32_mov_i(tmp
[1], off
, false, ctx
);
1027 emit(ARM_LDR_I(tmp2
[1], ARM_SP
, STACK_VAR(r2
[1])), ctx
);
1028 emit(ARM_ADD_R(tmp
[1], tmp2
[1], tmp
[1]), ctx
);
1029 emit(ARM_LDR_I(tmp2
[1], ARM_SP
, STACK_VAR(r3
[1])), ctx
);
1030 emit(ARM_MOV_SI(tmp
[0], tmp2
[1], SRTYPE_ASL
, 2), ctx
);
1031 emit(ARM_LDR_R(tmp
[1], tmp
[1], tmp
[0]), ctx
);
1032 emit(ARM_CMP_I(tmp
[1], 0), ctx
);
1033 _emit(ARM_COND_EQ
, ARM_B(jmp_offset
), ctx
);
1035 /* goto *(prog->bpf_func + prologue_size); */
1036 off
= offsetof(struct bpf_prog
, bpf_func
);
1037 emit_a32_mov_i(tmp2
[1], off
, false, ctx
);
1038 emit(ARM_LDR_R(tmp
[1], tmp
[1], tmp2
[1]), ctx
);
1039 emit(ARM_ADD_I(tmp
[1], tmp
[1], ctx
->prologue_bytes
), ctx
);
1040 emit_bx_r(tmp
[1], ctx
);
1043 if (out_offset
== -1)
1044 out_offset
= cur_offset
;
1045 if (cur_offset
!= out_offset
) {
1046 pr_err_once("tail_call out_offset = %d, expected %d!\n",
1047 cur_offset
, out_offset
);
1055 /* 0xabcd => 0xcdab */
1056 static inline void emit_rev16(const u8 rd
, const u8 rn
, struct jit_ctx
*ctx
)
1058 #if __LINUX_ARM_ARCH__ < 6
1059 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
1061 emit(ARM_AND_I(tmp2
[1], rn
, 0xff), ctx
);
1062 emit(ARM_MOV_SI(tmp2
[0], rn
, SRTYPE_LSR
, 8), ctx
);
1063 emit(ARM_AND_I(tmp2
[0], tmp2
[0], 0xff), ctx
);
1064 emit(ARM_ORR_SI(rd
, tmp2
[0], tmp2
[1], SRTYPE_LSL
, 8), ctx
);
1066 emit(ARM_REV16(rd
, rn
), ctx
);
1070 /* 0xabcdefgh => 0xghefcdab */
1071 static inline void emit_rev32(const u8 rd
, const u8 rn
, struct jit_ctx
*ctx
)
1073 #if __LINUX_ARM_ARCH__ < 6
1074 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
1076 emit(ARM_AND_I(tmp2
[1], rn
, 0xff), ctx
);
1077 emit(ARM_MOV_SI(tmp2
[0], rn
, SRTYPE_LSR
, 24), ctx
);
1078 emit(ARM_ORR_SI(ARM_IP
, tmp2
[0], tmp2
[1], SRTYPE_LSL
, 24), ctx
);
1080 emit(ARM_MOV_SI(tmp2
[1], rn
, SRTYPE_LSR
, 8), ctx
);
1081 emit(ARM_AND_I(tmp2
[1], tmp2
[1], 0xff), ctx
);
1082 emit(ARM_MOV_SI(tmp2
[0], rn
, SRTYPE_LSR
, 16), ctx
);
1083 emit(ARM_AND_I(tmp2
[0], tmp2
[0], 0xff), ctx
);
1084 emit(ARM_MOV_SI(tmp2
[0], tmp2
[0], SRTYPE_LSL
, 8), ctx
);
1085 emit(ARM_ORR_SI(tmp2
[0], tmp2
[0], tmp2
[1], SRTYPE_LSL
, 16), ctx
);
1086 emit(ARM_ORR_R(rd
, ARM_IP
, tmp2
[0]), ctx
);
1089 emit(ARM_REV(rd
, rn
), ctx
);
1093 // push the scratch stack register on top of the stack
1094 static inline void emit_push_r64(const u8 src
[], const u8 shift
,
1095 struct jit_ctx
*ctx
)
1097 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
1100 emit(ARM_LDR_I(tmp2
[1], ARM_SP
, STACK_VAR(src
[1]+shift
)), ctx
);
1101 emit(ARM_LDR_I(tmp2
[0], ARM_SP
, STACK_VAR(src
[0]+shift
)), ctx
);
1103 reg_set
= (1 << tmp2
[1]) | (1 << tmp2
[0]);
1104 emit(ARM_PUSH(reg_set
), ctx
);
1107 static void build_prologue(struct jit_ctx
*ctx
)
1109 const u8 r0
= bpf2a32
[BPF_REG_0
][1];
1110 const u8 r2
= bpf2a32
[BPF_REG_1
][1];
1111 const u8 r3
= bpf2a32
[BPF_REG_1
][0];
1112 const u8 r4
= bpf2a32
[BPF_REG_6
][1];
1113 const u8 fplo
= bpf2a32
[BPF_REG_FP
][1];
1114 const u8 fphi
= bpf2a32
[BPF_REG_FP
][0];
1115 const u8
*tcc
= bpf2a32
[TCALL_CNT
];
1117 /* Save callee saved registers. */
1118 #ifdef CONFIG_FRAME_POINTER
1119 u16 reg_set
= CALLEE_PUSH_MASK
| 1 << ARM_IP
| 1 << ARM_PC
;
1120 emit(ARM_MOV_R(ARM_IP
, ARM_SP
), ctx
);
1121 emit(ARM_PUSH(reg_set
), ctx
);
1122 emit(ARM_SUB_I(ARM_FP
, ARM_IP
, 4), ctx
);
1124 emit(ARM_PUSH(CALLEE_PUSH_MASK
), ctx
);
1125 emit(ARM_MOV_R(ARM_FP
, ARM_SP
), ctx
);
1127 /* Save frame pointer for later */
1128 emit(ARM_SUB_I(ARM_IP
, ARM_SP
, SCRATCH_SIZE
), ctx
);
1130 ctx
->stack_size
= imm8m(STACK_SIZE
);
1132 /* Set up function call stack */
1133 emit(ARM_SUB_I(ARM_SP
, ARM_SP
, ctx
->stack_size
), ctx
);
1135 /* Set up BPF prog stack base register */
1136 emit_a32_mov_r(fplo
, ARM_IP
, true, false, ctx
);
1137 emit_a32_mov_i(fphi
, 0, true, ctx
);
1140 emit(ARM_MOV_I(r4
, 0), ctx
);
1142 /* Move BPF_CTX to BPF_R1 */
1143 emit(ARM_MOV_R(r3
, r4
), ctx
);
1144 emit(ARM_MOV_R(r2
, r0
), ctx
);
1145 /* Initialize Tail Count */
1146 emit(ARM_STR_I(r4
, ARM_SP
, STACK_VAR(tcc
[0])), ctx
);
1147 emit(ARM_STR_I(r4
, ARM_SP
, STACK_VAR(tcc
[1])), ctx
);
1148 /* end of prologue */
1151 /* restore callee saved registers. */
1152 static void build_epilogue(struct jit_ctx
*ctx
)
1154 #ifdef CONFIG_FRAME_POINTER
1155 /* When using frame pointers, some additional registers need to
1157 u16 reg_set
= CALLEE_POP_MASK
| 1 << ARM_SP
;
1158 emit(ARM_SUB_I(ARM_SP
, ARM_FP
, hweight16(reg_set
) * 4), ctx
);
1159 emit(ARM_LDM(ARM_SP
, reg_set
), ctx
);
1161 /* Restore callee saved registers. */
1162 emit(ARM_MOV_R(ARM_SP
, ARM_FP
), ctx
);
1163 emit(ARM_POP(CALLEE_POP_MASK
), ctx
);
1168 * Convert an eBPF instruction to native instruction, i.e
1169 * JITs an eBPF instruction.
1171 * 0 - Successfully JITed an 8-byte eBPF instruction
1172 * >0 - Successfully JITed a 16-byte eBPF instruction
1173 * <0 - Failed to JIT.
1175 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
1177 const u8 code
= insn
->code
;
1178 const u8
*dst
= bpf2a32
[insn
->dst_reg
];
1179 const u8
*src
= bpf2a32
[insn
->src_reg
];
1180 const u8
*tmp
= bpf2a32
[TMP_REG_1
];
1181 const u8
*tmp2
= bpf2a32
[TMP_REG_2
];
1182 const s16 off
= insn
->off
;
1183 const s32 imm
= insn
->imm
;
1184 const int i
= insn
- ctx
->prog
->insnsi
;
1185 const bool is64
= BPF_CLASS(code
) == BPF_ALU64
;
1186 const bool dstk
= is_on_stack(insn
->dst_reg
);
1187 const bool sstk
= is_on_stack(insn
->src_reg
);
1191 #define check_imm(bits, imm) do { \
1192 if ((((imm) > 0) && ((imm) >> (bits))) || \
1193 (((imm) < 0) && (~(imm) >> (bits)))) { \
1194 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
1199 #define check_imm24(imm) check_imm(24, imm)
1202 /* ALU operations */
1205 case BPF_ALU
| BPF_MOV
| BPF_K
:
1206 case BPF_ALU
| BPF_MOV
| BPF_X
:
1207 case BPF_ALU64
| BPF_MOV
| BPF_K
:
1208 case BPF_ALU64
| BPF_MOV
| BPF_X
:
1209 switch (BPF_SRC(code
)) {
1211 emit_a32_mov_r64(is64
, dst
, src
, dstk
, sstk
, ctx
);
1214 /* Sign-extend immediate value to destination reg */
1215 emit_a32_mov_i64(is64
, dst
, imm
, dstk
, ctx
);
1219 /* dst = dst + src/imm */
1220 /* dst = dst - src/imm */
1221 /* dst = dst | src/imm */
1222 /* dst = dst & src/imm */
1223 /* dst = dst ^ src/imm */
1224 /* dst = dst * src/imm */
1225 /* dst = dst << src */
1226 /* dst = dst >> src */
1227 case BPF_ALU
| BPF_ADD
| BPF_K
:
1228 case BPF_ALU
| BPF_ADD
| BPF_X
:
1229 case BPF_ALU
| BPF_SUB
| BPF_K
:
1230 case BPF_ALU
| BPF_SUB
| BPF_X
:
1231 case BPF_ALU
| BPF_OR
| BPF_K
:
1232 case BPF_ALU
| BPF_OR
| BPF_X
:
1233 case BPF_ALU
| BPF_AND
| BPF_K
:
1234 case BPF_ALU
| BPF_AND
| BPF_X
:
1235 case BPF_ALU
| BPF_XOR
| BPF_K
:
1236 case BPF_ALU
| BPF_XOR
| BPF_X
:
1237 case BPF_ALU
| BPF_MUL
| BPF_K
:
1238 case BPF_ALU
| BPF_MUL
| BPF_X
:
1239 case BPF_ALU
| BPF_LSH
| BPF_X
:
1240 case BPF_ALU
| BPF_RSH
| BPF_X
:
1241 case BPF_ALU
| BPF_ARSH
| BPF_K
:
1242 case BPF_ALU
| BPF_ARSH
| BPF_X
:
1243 case BPF_ALU64
| BPF_ADD
| BPF_K
:
1244 case BPF_ALU64
| BPF_ADD
| BPF_X
:
1245 case BPF_ALU64
| BPF_SUB
| BPF_K
:
1246 case BPF_ALU64
| BPF_SUB
| BPF_X
:
1247 case BPF_ALU64
| BPF_OR
| BPF_K
:
1248 case BPF_ALU64
| BPF_OR
| BPF_X
:
1249 case BPF_ALU64
| BPF_AND
| BPF_K
:
1250 case BPF_ALU64
| BPF_AND
| BPF_X
:
1251 case BPF_ALU64
| BPF_XOR
| BPF_K
:
1252 case BPF_ALU64
| BPF_XOR
| BPF_X
:
1253 switch (BPF_SRC(code
)) {
1255 emit_a32_alu_r64(is64
, dst
, src
, dstk
, sstk
,
1259 /* Move immediate value to the temporary register
1260 * and then do the ALU operation on the temporary
1261 * register as this will sign-extend the immediate
1262 * value into temporary reg and then it would be
1263 * safe to do the operation on it.
1265 emit_a32_mov_i64(is64
, tmp2
, imm
, false, ctx
);
1266 emit_a32_alu_r64(is64
, dst
, tmp2
, dstk
, false,
1271 /* dst = dst / src(imm) */
1272 /* dst = dst % src(imm) */
1273 case BPF_ALU
| BPF_DIV
| BPF_K
:
1274 case BPF_ALU
| BPF_DIV
| BPF_X
:
1275 case BPF_ALU
| BPF_MOD
| BPF_K
:
1276 case BPF_ALU
| BPF_MOD
| BPF_X
:
1278 rd
= dstk
? tmp2
[1] : dst_lo
;
1280 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
1281 switch (BPF_SRC(code
)) {
1283 rt
= sstk
? tmp2
[0] : rt
;
1285 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(src_lo
)),
1290 emit_a32_mov_i(rt
, imm
, false, ctx
);
1293 emit_udivmod(rd
, rd
, rt
, ctx
, BPF_OP(code
));
1295 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
1296 emit_a32_mov_i(dst_hi
, 0, dstk
, ctx
);
1298 case BPF_ALU64
| BPF_DIV
| BPF_K
:
1299 case BPF_ALU64
| BPF_DIV
| BPF_X
:
1300 case BPF_ALU64
| BPF_MOD
| BPF_K
:
1301 case BPF_ALU64
| BPF_MOD
| BPF_X
:
1303 /* dst = dst >> imm */
1304 /* dst = dst << imm */
1305 case BPF_ALU
| BPF_RSH
| BPF_K
:
1306 case BPF_ALU
| BPF_LSH
| BPF_K
:
1307 if (unlikely(imm
> 31))
1310 emit_a32_alu_i(dst_lo
, imm
, dstk
, ctx
, BPF_OP(code
));
1311 emit_a32_mov_i(dst_hi
, 0, dstk
, ctx
);
1313 /* dst = dst << imm */
1314 case BPF_ALU64
| BPF_LSH
| BPF_K
:
1315 if (unlikely(imm
> 63))
1317 emit_a32_lsh_i64(dst
, dstk
, imm
, ctx
);
1319 /* dst = dst >> imm */
1320 case BPF_ALU64
| BPF_RSH
| BPF_K
:
1321 if (unlikely(imm
> 63))
1323 emit_a32_lsr_i64(dst
, dstk
, imm
, ctx
);
1325 /* dst = dst << src */
1326 case BPF_ALU64
| BPF_LSH
| BPF_X
:
1327 emit_a32_lsh_r64(dst
, src
, dstk
, sstk
, ctx
);
1329 /* dst = dst >> src */
1330 case BPF_ALU64
| BPF_RSH
| BPF_X
:
1331 emit_a32_lsr_r64(dst
, src
, dstk
, sstk
, ctx
);
1333 /* dst = dst >> src (signed) */
1334 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
1335 emit_a32_arsh_r64(dst
, src
, dstk
, sstk
, ctx
);
1337 /* dst = dst >> imm (signed) */
1338 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
1339 if (unlikely(imm
> 63))
1341 emit_a32_arsh_i64(dst
, dstk
, imm
, ctx
);
1344 case BPF_ALU
| BPF_NEG
:
1345 emit_a32_alu_i(dst_lo
, 0, dstk
, ctx
, BPF_OP(code
));
1346 emit_a32_mov_i(dst_hi
, 0, dstk
, ctx
);
1348 /* dst = ~dst (64 bit) */
1349 case BPF_ALU64
| BPF_NEG
:
1350 emit_a32_neg64(dst
, dstk
, ctx
);
1352 /* dst = dst * src/imm */
1353 case BPF_ALU64
| BPF_MUL
| BPF_X
:
1354 case BPF_ALU64
| BPF_MUL
| BPF_K
:
1355 switch (BPF_SRC(code
)) {
1357 emit_a32_mul_r64(dst
, src
, dstk
, sstk
, ctx
);
1360 /* Move immediate value to the temporary register
1361 * and then do the multiplication on it as this
1362 * will sign-extend the immediate value into temp
1363 * reg then it would be safe to do the operation
1366 emit_a32_mov_i64(is64
, tmp2
, imm
, false, ctx
);
1367 emit_a32_mul_r64(dst
, tmp2
, dstk
, false, ctx
);
1371 /* dst = htole(dst) */
1372 /* dst = htobe(dst) */
1373 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1374 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1375 rd
= dstk
? tmp
[0] : dst_hi
;
1376 rt
= dstk
? tmp
[1] : dst_lo
;
1378 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
1379 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
1381 if (BPF_SRC(code
) == BPF_FROM_LE
)
1382 goto emit_bswap_uxt
;
1385 emit_rev16(rt
, rt
, ctx
);
1386 goto emit_bswap_uxt
;
1388 emit_rev32(rt
, rt
, ctx
);
1389 goto emit_bswap_uxt
;
1391 emit_rev32(ARM_LR
, rt
, ctx
);
1392 emit_rev32(rt
, rd
, ctx
);
1393 emit(ARM_MOV_R(rd
, ARM_LR
), ctx
);
1400 /* zero-extend 16 bits into 64 bits */
1401 #if __LINUX_ARM_ARCH__ < 6
1402 emit_a32_mov_i(tmp2
[1], 0xffff, false, ctx
);
1403 emit(ARM_AND_R(rt
, rt
, tmp2
[1]), ctx
);
1405 emit(ARM_UXTH(rt
, rt
), ctx
);
1407 emit(ARM_EOR_R(rd
, rd
, rd
), ctx
);
1410 /* zero-extend 32 bits into 64 bits */
1411 emit(ARM_EOR_R(rd
, rd
, rd
), ctx
);
1419 emit(ARM_STR_I(rt
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
1420 emit(ARM_STR_I(rd
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
1424 case BPF_LD
| BPF_IMM
| BPF_DW
:
1426 const struct bpf_insn insn1
= insn
[1];
1430 emit_a32_mov_i(dst_lo
, lo
, dstk
, ctx
);
1431 emit_a32_mov_i(dst_hi
, hi
, dstk
, ctx
);
1435 /* LDX: dst = *(size *)(src + off) */
1436 case BPF_LDX
| BPF_MEM
| BPF_W
:
1437 case BPF_LDX
| BPF_MEM
| BPF_H
:
1438 case BPF_LDX
| BPF_MEM
| BPF_B
:
1439 case BPF_LDX
| BPF_MEM
| BPF_DW
:
1440 rn
= sstk
? tmp2
[1] : src_lo
;
1442 emit(ARM_LDR_I(rn
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
1443 switch (BPF_SIZE(code
)) {
1447 /* Load a Half-Word */
1450 emit_ldx_r(dst_lo
, rn
, dstk
, off
, ctx
, BPF_SIZE(code
));
1451 emit_a32_mov_i(dst_hi
, 0, dstk
, ctx
);
1454 /* Load a double word */
1455 emit_ldx_r(dst_lo
, rn
, dstk
, off
, ctx
, BPF_W
);
1456 emit_ldx_r(dst_hi
, rn
, dstk
, off
+4, ctx
, BPF_W
);
1460 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1461 case BPF_LD
| BPF_ABS
| BPF_W
:
1462 case BPF_LD
| BPF_ABS
| BPF_H
:
1463 case BPF_LD
| BPF_ABS
| BPF_B
:
1464 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
1465 case BPF_LD
| BPF_IND
| BPF_W
:
1466 case BPF_LD
| BPF_IND
| BPF_H
:
1467 case BPF_LD
| BPF_IND
| BPF_B
:
1469 const u8 r4
= bpf2a32
[BPF_REG_6
][1]; /* r4 = ptr to sk_buff */
1470 const u8 r0
= bpf2a32
[BPF_REG_0
][1]; /*r0: struct sk_buff *skb*/
1472 const u8 r1
= bpf2a32
[BPF_REG_0
][0]; /* r1: int k */
1473 const u8 r2
= bpf2a32
[BPF_REG_1
][1]; /* r2: unsigned int size */
1474 const u8 r3
= bpf2a32
[BPF_REG_1
][0]; /* r3: void *buffer */
1475 const u8 r6
= bpf2a32
[TMP_REG_1
][1]; /* r6: void *(*func)(..) */
1478 /* Setting up first argument */
1479 emit(ARM_MOV_R(r0
, r4
), ctx
);
1481 /* Setting up second argument */
1482 emit_a32_mov_i(r1
, imm
, false, ctx
);
1483 if (BPF_MODE(code
) == BPF_IND
)
1484 emit_a32_alu_r(r1
, src_lo
, false, sstk
, ctx
,
1485 false, false, BPF_ADD
);
1487 /* Setting up third argument */
1488 switch (BPF_SIZE(code
)) {
1501 emit_a32_mov_i(r2
, size
, false, ctx
);
1503 /* Setting up fourth argument */
1504 emit(ARM_ADD_I(r3
, ARM_SP
, imm8m(SKB_BUFFER
)), ctx
);
1506 /* Setting up function pointer to call */
1507 emit_a32_mov_i(r6
, (unsigned int)bpf_load_pointer
, false, ctx
);
1508 emit_blx_r(r6
, ctx
);
1510 emit(ARM_EOR_R(r1
, r1
, r1
), ctx
);
1511 /* Check if return address is NULL or not.
1512 * if NULL then jump to epilogue
1513 * else continue to load the value from retn address
1515 emit(ARM_CMP_I(r0
, 0), ctx
);
1516 jmp_offset
= epilogue_offset(ctx
);
1517 check_imm24(jmp_offset
);
1518 _emit(ARM_COND_EQ
, ARM_B(jmp_offset
), ctx
);
1520 /* Load value from the address */
1521 switch (BPF_SIZE(code
)) {
1523 emit(ARM_LDR_I(r0
, r0
, 0), ctx
);
1524 emit_rev32(r0
, r0
, ctx
);
1527 emit(ARM_LDRH_I(r0
, r0
, 0), ctx
);
1528 emit_rev16(r0
, r0
, ctx
);
1531 emit(ARM_LDRB_I(r0
, r0
, 0), ctx
);
1532 /* No need to reverse */
1537 /* ST: *(size *)(dst + off) = imm */
1538 case BPF_ST
| BPF_MEM
| BPF_W
:
1539 case BPF_ST
| BPF_MEM
| BPF_H
:
1540 case BPF_ST
| BPF_MEM
| BPF_B
:
1541 case BPF_ST
| BPF_MEM
| BPF_DW
:
1542 switch (BPF_SIZE(code
)) {
1544 /* Sign-extend immediate value into temp reg */
1545 emit_a32_mov_i64(true, tmp2
, imm
, false, ctx
);
1546 emit_str_r(dst_lo
, tmp2
[1], dstk
, off
, ctx
, BPF_W
);
1547 emit_str_r(dst_lo
, tmp2
[0], dstk
, off
+4, ctx
, BPF_W
);
1552 emit_a32_mov_i(tmp2
[1], imm
, false, ctx
);
1553 emit_str_r(dst_lo
, tmp2
[1], dstk
, off
, ctx
,
1558 /* STX XADD: lock *(u32 *)(dst + off) += src */
1559 case BPF_STX
| BPF_XADD
| BPF_W
:
1560 /* STX XADD: lock *(u64 *)(dst + off) += src */
1561 case BPF_STX
| BPF_XADD
| BPF_DW
:
1563 /* STX: *(size *)(dst + off) = src */
1564 case BPF_STX
| BPF_MEM
| BPF_W
:
1565 case BPF_STX
| BPF_MEM
| BPF_H
:
1566 case BPF_STX
| BPF_MEM
| BPF_B
:
1567 case BPF_STX
| BPF_MEM
| BPF_DW
:
1569 u8 sz
= BPF_SIZE(code
);
1571 rn
= sstk
? tmp2
[1] : src_lo
;
1572 rm
= sstk
? tmp2
[0] : src_hi
;
1574 emit(ARM_LDR_I(rn
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
1575 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(src_hi
)), ctx
);
1578 /* Store the value */
1579 if (BPF_SIZE(code
) == BPF_DW
) {
1580 emit_str_r(dst_lo
, rn
, dstk
, off
, ctx
, BPF_W
);
1581 emit_str_r(dst_lo
, rm
, dstk
, off
+4, ctx
, BPF_W
);
1583 emit_str_r(dst_lo
, rn
, dstk
, off
, ctx
, sz
);
1587 /* PC += off if dst == src */
1588 /* PC += off if dst > src */
1589 /* PC += off if dst >= src */
1590 /* PC += off if dst < src */
1591 /* PC += off if dst <= src */
1592 /* PC += off if dst != src */
1593 /* PC += off if dst > src (signed) */
1594 /* PC += off if dst >= src (signed) */
1595 /* PC += off if dst < src (signed) */
1596 /* PC += off if dst <= src (signed) */
1597 /* PC += off if dst & src */
1598 case BPF_JMP
| BPF_JEQ
| BPF_X
:
1599 case BPF_JMP
| BPF_JGT
| BPF_X
:
1600 case BPF_JMP
| BPF_JGE
| BPF_X
:
1601 case BPF_JMP
| BPF_JNE
| BPF_X
:
1602 case BPF_JMP
| BPF_JSGT
| BPF_X
:
1603 case BPF_JMP
| BPF_JSGE
| BPF_X
:
1604 case BPF_JMP
| BPF_JSET
| BPF_X
:
1605 case BPF_JMP
| BPF_JLE
| BPF_X
:
1606 case BPF_JMP
| BPF_JLT
| BPF_X
:
1607 case BPF_JMP
| BPF_JSLT
| BPF_X
:
1608 case BPF_JMP
| BPF_JSLE
| BPF_X
:
1609 /* Setup source registers */
1610 rm
= sstk
? tmp2
[0] : src_hi
;
1611 rn
= sstk
? tmp2
[1] : src_lo
;
1613 emit(ARM_LDR_I(rn
, ARM_SP
, STACK_VAR(src_lo
)), ctx
);
1614 emit(ARM_LDR_I(rm
, ARM_SP
, STACK_VAR(src_hi
)), ctx
);
1617 /* PC += off if dst == imm */
1618 /* PC += off if dst > imm */
1619 /* PC += off if dst >= imm */
1620 /* PC += off if dst < imm */
1621 /* PC += off if dst <= imm */
1622 /* PC += off if dst != imm */
1623 /* PC += off if dst > imm (signed) */
1624 /* PC += off if dst >= imm (signed) */
1625 /* PC += off if dst < imm (signed) */
1626 /* PC += off if dst <= imm (signed) */
1627 /* PC += off if dst & imm */
1628 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1629 case BPF_JMP
| BPF_JGT
| BPF_K
:
1630 case BPF_JMP
| BPF_JGE
| BPF_K
:
1631 case BPF_JMP
| BPF_JNE
| BPF_K
:
1632 case BPF_JMP
| BPF_JSGT
| BPF_K
:
1633 case BPF_JMP
| BPF_JSGE
| BPF_K
:
1634 case BPF_JMP
| BPF_JSET
| BPF_K
:
1635 case BPF_JMP
| BPF_JLT
| BPF_K
:
1636 case BPF_JMP
| BPF_JLE
| BPF_K
:
1637 case BPF_JMP
| BPF_JSLT
| BPF_K
:
1638 case BPF_JMP
| BPF_JSLE
| BPF_K
:
1643 /* Sign-extend immediate value */
1644 emit_a32_mov_i64(true, tmp2
, imm
, false, ctx
);
1646 /* Setup destination register */
1647 rd
= dstk
? tmp
[0] : dst_hi
;
1648 rt
= dstk
? tmp
[1] : dst_lo
;
1650 emit(ARM_LDR_I(rt
, ARM_SP
, STACK_VAR(dst_lo
)), ctx
);
1651 emit(ARM_LDR_I(rd
, ARM_SP
, STACK_VAR(dst_hi
)), ctx
);
1654 /* Check for the condition */
1655 emit_ar_r(rd
, rt
, rm
, rn
, ctx
, BPF_OP(code
));
1657 /* Setup JUMP instruction */
1658 jmp_offset
= bpf2a32_offset(i
+off
, i
, ctx
);
1659 switch (BPF_OP(code
)) {
1662 _emit(ARM_COND_NE
, ARM_B(jmp_offset
), ctx
);
1665 _emit(ARM_COND_EQ
, ARM_B(jmp_offset
), ctx
);
1668 _emit(ARM_COND_HI
, ARM_B(jmp_offset
), ctx
);
1671 _emit(ARM_COND_CS
, ARM_B(jmp_offset
), ctx
);
1674 _emit(ARM_COND_LT
, ARM_B(jmp_offset
), ctx
);
1677 _emit(ARM_COND_GE
, ARM_B(jmp_offset
), ctx
);
1680 _emit(ARM_COND_LS
, ARM_B(jmp_offset
), ctx
);
1683 _emit(ARM_COND_CC
, ARM_B(jmp_offset
), ctx
);
1686 _emit(ARM_COND_LT
, ARM_B(jmp_offset
), ctx
);
1689 _emit(ARM_COND_GE
, ARM_B(jmp_offset
), ctx
);
1694 case BPF_JMP
| BPF_JA
:
1698 jmp_offset
= bpf2a32_offset(i
+off
, i
, ctx
);
1699 check_imm24(jmp_offset
);
1700 emit(ARM_B(jmp_offset
), ctx
);
1704 case BPF_JMP
| BPF_TAIL_CALL
:
1705 if (emit_bpf_tail_call(ctx
))
1709 case BPF_JMP
| BPF_CALL
:
1711 const u8
*r0
= bpf2a32
[BPF_REG_0
];
1712 const u8
*r1
= bpf2a32
[BPF_REG_1
];
1713 const u8
*r2
= bpf2a32
[BPF_REG_2
];
1714 const u8
*r3
= bpf2a32
[BPF_REG_3
];
1715 const u8
*r4
= bpf2a32
[BPF_REG_4
];
1716 const u8
*r5
= bpf2a32
[BPF_REG_5
];
1717 const u32 func
= (u32
)__bpf_call_base
+ (u32
)imm
;
1719 emit_a32_mov_r64(true, r0
, r1
, false, false, ctx
);
1720 emit_a32_mov_r64(true, r1
, r2
, false, true, ctx
);
1721 emit_push_r64(r5
, 0, ctx
);
1722 emit_push_r64(r4
, 8, ctx
);
1723 emit_push_r64(r3
, 16, ctx
);
1725 emit_a32_mov_i(tmp
[1], func
, false, ctx
);
1726 emit_blx_r(tmp
[1], ctx
);
1728 emit(ARM_ADD_I(ARM_SP
, ARM_SP
, imm8m(24)), ctx
); // callee clean
1731 /* function return */
1732 case BPF_JMP
| BPF_EXIT
:
1733 /* Optimization: when last instruction is EXIT
1734 * simply fallthrough to epilogue.
1736 if (i
== ctx
->prog
->len
- 1)
1738 jmp_offset
= epilogue_offset(ctx
);
1739 check_imm24(jmp_offset
);
1740 emit(ARM_B(jmp_offset
), ctx
);
1743 pr_info_once("*** NOT YET: opcode %02x ***\n", code
);
1746 pr_err_once("unknown opcode %02x\n", code
);
1750 if (ctx
->flags
& FLAG_IMM_OVERFLOW
)
1752 * this instruction generated an overflow when
1753 * trying to access the literal pool, so
1754 * delegate this filter to the kernel interpreter.
1760 static int build_body(struct jit_ctx
*ctx
)
1762 const struct bpf_prog
*prog
= ctx
->prog
;
1765 for (i
= 0; i
< prog
->len
; i
++) {
1766 const struct bpf_insn
*insn
= &(prog
->insnsi
[i
]);
1769 ret
= build_insn(insn
, ctx
);
1771 /* It's used with loading the 64 bit immediate value. */
1774 if (ctx
->target
== NULL
)
1775 ctx
->offsets
[i
] = ctx
->idx
;
1779 if (ctx
->target
== NULL
)
1780 ctx
->offsets
[i
] = ctx
->idx
;
1782 /* If unsuccesfull, return with error code */
1789 static int validate_code(struct jit_ctx
*ctx
)
1793 for (i
= 0; i
< ctx
->idx
; i
++) {
1794 if (ctx
->target
[i
] == __opcode_to_mem_arm(ARM_INST_UDF
))
1801 void bpf_jit_compile(struct bpf_prog
*prog
)
1803 /* Nothing to do here. We support Internal BPF. */
1806 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1808 struct bpf_prog
*tmp
, *orig_prog
= prog
;
1809 struct bpf_binary_header
*header
;
1810 bool tmp_blinded
= false;
1812 unsigned int tmp_idx
;
1813 unsigned int image_size
;
1816 /* If BPF JIT was not enabled then we must fall back to
1819 if (!bpf_jit_enable
)
1822 /* If constant blinding was enabled and we failed during blinding
1823 * then we must fall back to the interpreter. Otherwise, we save
1824 * the new JITed code.
1826 tmp
= bpf_jit_blind_constants(prog
);
1835 memset(&ctx
, 0, sizeof(ctx
));
1838 /* Not able to allocate memory for offsets[] , then
1839 * we must fall back to the interpreter
1841 ctx
.offsets
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
1842 if (ctx
.offsets
== NULL
) {
1847 /* 1) fake pass to find in the length of the JITed code,
1848 * to compute ctx->offsets and other context variables
1849 * needed to compute final JITed code.
1850 * Also, calculate random starting pointer/start of JITed code
1851 * which is prefixed by random number of fault instructions.
1853 * If the first pass fails then there is no chance of it
1854 * being successful in the second pass, so just fall back
1855 * to the interpreter.
1857 if (build_body(&ctx
)) {
1863 build_prologue(&ctx
);
1864 ctx
.prologue_bytes
= (ctx
.idx
- tmp_idx
) * 4;
1866 ctx
.epilogue_offset
= ctx
.idx
;
1868 #if __LINUX_ARM_ARCH__ < 7
1870 build_epilogue(&ctx
);
1871 ctx
.epilogue_bytes
= (ctx
.idx
- tmp_idx
) * 4;
1873 ctx
.idx
+= ctx
.imm_count
;
1874 if (ctx
.imm_count
) {
1875 ctx
.imms
= kcalloc(ctx
.imm_count
, sizeof(u32
), GFP_KERNEL
);
1876 if (ctx
.imms
== NULL
) {
1882 /* there's nothing about the epilogue on ARMv7 */
1883 build_epilogue(&ctx
);
1885 /* Now we can get the actual image size of the JITed arm code.
1886 * Currently, we are not considering the THUMB-2 instructions
1887 * for jit, although it can decrease the size of the image.
1889 * As each arm instruction is of length 32bit, we are translating
1890 * number of JITed intructions into the size required to store these
1893 image_size
= sizeof(u32
) * ctx
.idx
;
1895 /* Now we know the size of the structure to make */
1896 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1897 sizeof(u32
), jit_fill_hole
);
1898 /* Not able to allocate memory for the structure then
1899 * we must fall back to the interpretation
1901 if (header
== NULL
) {
1906 /* 2.) Actual pass to generate final JIT code */
1907 ctx
.target
= (u32
*) image_ptr
;
1910 build_prologue(&ctx
);
1912 /* If building the body of the JITed code fails somehow,
1913 * we fall back to the interpretation.
1915 if (build_body(&ctx
) < 0) {
1917 bpf_jit_binary_free(header
);
1921 build_epilogue(&ctx
);
1923 /* 3.) Extra pass to validate JITed Code */
1924 if (validate_code(&ctx
)) {
1926 bpf_jit_binary_free(header
);
1930 flush_icache_range((u32
)header
, (u32
)(ctx
.target
+ ctx
.idx
));
1932 if (bpf_jit_enable
> 1)
1933 /* there are 2 passes here */
1934 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.target
);
1936 set_memory_ro((unsigned long)header
, header
->pages
);
1937 prog
->bpf_func
= (void *)ctx
.target
;
1939 prog
->jited_len
= image_size
;
1942 #if __LINUX_ARM_ARCH__ < 7
1950 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?