]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/net/bpf_jit_comp64.c
a66e64b0b251f7853722333ba3559ff556ceabc4
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / net / bpf_jit_comp64.c
1 /*
2 * bpf_jit_comp64.c: eBPF JIT compiler
3 *
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5 * IBM Corporation
6 *
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
20 #include <linux/bpf.h>
21
22 #include "bpf_jit64.h"
23
24 int bpf_jit_enable __read_mostly;
25
26 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
27 {
28 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
29 }
30
31 static inline void bpf_flush_icache(void *start, void *end)
32 {
33 smp_wmb();
34 flush_icache_range((unsigned long)start, (unsigned long)end);
35 }
36
37 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
38 {
39 return (ctx->seen & (1 << (31 - b2p[i])));
40 }
41
42 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
43 {
44 ctx->seen |= (1 << (31 - b2p[i]));
45 }
46
47 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
48 {
49 /*
50 * We only need a stack frame if:
51 * - we call other functions (kernel helpers), or
52 * - the bpf program uses its stack area
53 * The latter condition is deduced from the usage of BPF_REG_FP
54 */
55 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
56 }
57
58 /*
59 * When not setting up our own stackframe, the redzone usage is:
60 *
61 * [ prev sp ] <-------------
62 * [ ... ] |
63 * sp (r1) ---> [ stack pointer ] --------------
64 * [ nv gpr save area ] 8*8
65 * [ tail_call_cnt ] 8
66 * [ local_tmp_var ] 8
67 * [ unused red zone ] 208 bytes protected
68 */
69 static int bpf_jit_stack_local(struct codegen_context *ctx)
70 {
71 if (bpf_has_stack_frame(ctx))
72 return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
73 else
74 return -(BPF_PPC_STACK_SAVE + 16);
75 }
76
77 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
78 {
79 return bpf_jit_stack_local(ctx) + 8;
80 }
81
82 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
83 {
84 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
85 return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
86 - (8 * (32 - reg));
87
88 pr_err("BPF JIT is asking about unknown registers");
89 BUG();
90 }
91
92 static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
93 {
94 /*
95 * Load skb->len and skb->data_len
96 * r3 points to skb
97 */
98 PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
99 PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
100 /* header_len = len - data_len */
101 PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
102
103 /* skb->data pointer */
104 PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
105 }
106
107 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
108 {
109 int i;
110
111 /*
112 * Initialize tail_call_cnt if we do tail calls.
113 * Otherwise, put in NOPs so that it can be skipped when we are
114 * invoked through a tail call.
115 */
116 if (ctx->seen & SEEN_TAILCALL) {
117 PPC_LI(b2p[TMP_REG_1], 0);
118 /* this goes in the redzone */
119 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
120 } else {
121 PPC_NOP();
122 PPC_NOP();
123 }
124
125 #define BPF_TAILCALL_PROLOGUE_SIZE 8
126
127 if (bpf_has_stack_frame(ctx)) {
128 /*
129 * We need a stack frame, but we don't necessarily need to
130 * save/restore LR unless we call other functions
131 */
132 if (ctx->seen & SEEN_FUNC) {
133 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
134 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
135 }
136
137 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
138 }
139
140 /*
141 * Back up non-volatile regs -- BPF registers 6-10
142 * If we haven't created our own stack frame, we save these
143 * in the protected zone below the previous stack frame
144 */
145 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
146 if (bpf_is_seen_register(ctx, i))
147 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
148
149 /*
150 * Save additional non-volatile regs if we cache skb
151 * Also, setup skb data
152 */
153 if (ctx->seen & SEEN_SKB) {
154 PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
155 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
156 PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
157 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
158 bpf_jit_emit_skb_loads(image, ctx);
159 }
160
161 /* Setup frame pointer to point to the bpf stack area */
162 if (bpf_is_seen_register(ctx, BPF_REG_FP))
163 PPC_ADDI(b2p[BPF_REG_FP], 1,
164 STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
165 }
166
167 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
168 {
169 int i;
170
171 /* Restore NVRs */
172 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
173 if (bpf_is_seen_register(ctx, i))
174 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
175
176 /* Restore non-volatile registers used for skb cache */
177 if (ctx->seen & SEEN_SKB) {
178 PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
179 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
180 PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
181 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
182 }
183
184 /* Tear down our stack frame */
185 if (bpf_has_stack_frame(ctx)) {
186 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
187 if (ctx->seen & SEEN_FUNC) {
188 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
189 PPC_MTLR(0);
190 }
191 }
192 }
193
194 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
195 {
196 bpf_jit_emit_common_epilogue(image, ctx);
197
198 /* Move result to r3 */
199 PPC_MR(3, b2p[BPF_REG_0]);
200
201 PPC_BLR();
202 }
203
204 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
205 {
206 #ifdef PPC64_ELF_ABI_v1
207 /* func points to the function descriptor */
208 PPC_LI64(b2p[TMP_REG_2], func);
209 /* Load actual entry point from function descriptor */
210 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
211 /* ... and move it to LR */
212 PPC_MTLR(b2p[TMP_REG_1]);
213 /*
214 * Load TOC from function descriptor at offset 8.
215 * We can clobber r2 since we get called through a
216 * function pointer (so caller will save/restore r2)
217 * and since we don't use a TOC ourself.
218 */
219 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
220 #else
221 /* We can clobber r12 */
222 PPC_FUNC_ADDR(12, func);
223 PPC_MTLR(12);
224 #endif
225 PPC_BLRL();
226 }
227
228 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
229 {
230 /*
231 * By now, the eBPF program has already setup parameters in r3, r4 and r5
232 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
233 * r4/BPF_REG_2 - pointer to bpf_array
234 * r5/BPF_REG_3 - index in bpf_array
235 */
236 int b2p_bpf_array = b2p[BPF_REG_2];
237 int b2p_index = b2p[BPF_REG_3];
238
239 /*
240 * if (index >= array->map.max_entries)
241 * goto out;
242 */
243 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
244 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
245 PPC_BCC(COND_GE, out);
246
247 /*
248 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
249 * goto out;
250 */
251 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
252 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
253 PPC_BCC(COND_GT, out);
254
255 /*
256 * tail_call_cnt++;
257 */
258 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
259 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
260
261 /* prog = array->ptrs[index]; */
262 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
263 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
264 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
265
266 /*
267 * if (prog == NULL)
268 * goto out;
269 */
270 PPC_CMPLDI(b2p[TMP_REG_1], 0);
271 PPC_BCC(COND_EQ, out);
272
273 /* goto *(prog->bpf_func + prologue_size); */
274 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
275 #ifdef PPC64_ELF_ABI_v1
276 /* skip past the function descriptor */
277 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
278 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
279 #else
280 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
281 #endif
282 PPC_MTCTR(b2p[TMP_REG_1]);
283
284 /* tear down stack, restore NVRs, ... */
285 bpf_jit_emit_common_epilogue(image, ctx);
286
287 PPC_BCTR();
288 /* out: */
289 }
290
291 /* Assemble the body code between the prologue & epilogue */
292 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
293 struct codegen_context *ctx,
294 u32 *addrs)
295 {
296 const struct bpf_insn *insn = fp->insnsi;
297 int flen = fp->len;
298 int i;
299
300 /* Start of epilogue code - will only be valid 2nd pass onwards */
301 u32 exit_addr = addrs[flen];
302
303 for (i = 0; i < flen; i++) {
304 u32 code = insn[i].code;
305 u32 dst_reg = b2p[insn[i].dst_reg];
306 u32 src_reg = b2p[insn[i].src_reg];
307 s16 off = insn[i].off;
308 s32 imm = insn[i].imm;
309 u64 imm64;
310 u8 *func;
311 u32 true_cond;
312
313 /*
314 * addrs[] maps a BPF bytecode address into a real offset from
315 * the start of the body code.
316 */
317 addrs[i] = ctx->idx * 4;
318
319 /*
320 * As an optimization, we note down which non-volatile registers
321 * are used so that we can only save/restore those in our
322 * prologue and epilogue. We do this here regardless of whether
323 * the actual BPF instruction uses src/dst registers or not
324 * (for instance, BPF_CALL does not use them). The expectation
325 * is that those instructions will have src_reg/dst_reg set to
326 * 0. Even otherwise, we just lose some prologue/epilogue
327 * optimization but everything else should work without
328 * any issues.
329 */
330 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
331 bpf_set_seen_register(ctx, insn[i].dst_reg);
332 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
333 bpf_set_seen_register(ctx, insn[i].src_reg);
334
335 switch (code) {
336 /*
337 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
338 */
339 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
340 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
341 PPC_ADD(dst_reg, dst_reg, src_reg);
342 goto bpf_alu32_trunc;
343 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
344 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
345 PPC_SUB(dst_reg, dst_reg, src_reg);
346 goto bpf_alu32_trunc;
347 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
348 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
349 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
350 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
351 if (BPF_OP(code) == BPF_SUB)
352 imm = -imm;
353 if (imm) {
354 if (imm >= -32768 && imm < 32768)
355 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
356 else {
357 PPC_LI32(b2p[TMP_REG_1], imm);
358 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
359 }
360 }
361 goto bpf_alu32_trunc;
362 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
363 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
364 if (BPF_CLASS(code) == BPF_ALU)
365 PPC_MULW(dst_reg, dst_reg, src_reg);
366 else
367 PPC_MULD(dst_reg, dst_reg, src_reg);
368 goto bpf_alu32_trunc;
369 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
370 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
371 if (imm >= -32768 && imm < 32768)
372 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
373 else {
374 PPC_LI32(b2p[TMP_REG_1], imm);
375 if (BPF_CLASS(code) == BPF_ALU)
376 PPC_MULW(dst_reg, dst_reg,
377 b2p[TMP_REG_1]);
378 else
379 PPC_MULD(dst_reg, dst_reg,
380 b2p[TMP_REG_1]);
381 }
382 goto bpf_alu32_trunc;
383 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
384 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
385 PPC_CMPWI(src_reg, 0);
386 PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
387 PPC_LI(b2p[BPF_REG_0], 0);
388 PPC_JMP(exit_addr);
389 if (BPF_OP(code) == BPF_MOD) {
390 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
391 PPC_MULW(b2p[TMP_REG_1], src_reg,
392 b2p[TMP_REG_1]);
393 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
394 } else
395 PPC_DIVWU(dst_reg, dst_reg, src_reg);
396 goto bpf_alu32_trunc;
397 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
398 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
399 PPC_CMPDI(src_reg, 0);
400 PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
401 PPC_LI(b2p[BPF_REG_0], 0);
402 PPC_JMP(exit_addr);
403 if (BPF_OP(code) == BPF_MOD) {
404 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
405 PPC_MULD(b2p[TMP_REG_1], src_reg,
406 b2p[TMP_REG_1]);
407 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
408 } else
409 PPC_DIVD(dst_reg, dst_reg, src_reg);
410 break;
411 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
412 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
413 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
414 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
415 if (imm == 0)
416 return -EINVAL;
417 else if (imm == 1)
418 goto bpf_alu32_trunc;
419
420 PPC_LI32(b2p[TMP_REG_1], imm);
421 switch (BPF_CLASS(code)) {
422 case BPF_ALU:
423 if (BPF_OP(code) == BPF_MOD) {
424 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
425 b2p[TMP_REG_1]);
426 PPC_MULW(b2p[TMP_REG_1],
427 b2p[TMP_REG_1],
428 b2p[TMP_REG_2]);
429 PPC_SUB(dst_reg, dst_reg,
430 b2p[TMP_REG_1]);
431 } else
432 PPC_DIVWU(dst_reg, dst_reg,
433 b2p[TMP_REG_1]);
434 break;
435 case BPF_ALU64:
436 if (BPF_OP(code) == BPF_MOD) {
437 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
438 b2p[TMP_REG_1]);
439 PPC_MULD(b2p[TMP_REG_1],
440 b2p[TMP_REG_1],
441 b2p[TMP_REG_2]);
442 PPC_SUB(dst_reg, dst_reg,
443 b2p[TMP_REG_1]);
444 } else
445 PPC_DIVD(dst_reg, dst_reg,
446 b2p[TMP_REG_1]);
447 break;
448 }
449 goto bpf_alu32_trunc;
450 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
451 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
452 PPC_NEG(dst_reg, dst_reg);
453 goto bpf_alu32_trunc;
454
455 /*
456 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
457 */
458 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
459 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
460 PPC_AND(dst_reg, dst_reg, src_reg);
461 goto bpf_alu32_trunc;
462 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
463 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
464 if (!IMM_H(imm))
465 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
466 else {
467 /* Sign-extended */
468 PPC_LI32(b2p[TMP_REG_1], imm);
469 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
470 }
471 goto bpf_alu32_trunc;
472 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
473 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
474 PPC_OR(dst_reg, dst_reg, src_reg);
475 goto bpf_alu32_trunc;
476 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
477 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
478 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
479 /* Sign-extended */
480 PPC_LI32(b2p[TMP_REG_1], imm);
481 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
482 } else {
483 if (IMM_L(imm))
484 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
485 if (IMM_H(imm))
486 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
487 }
488 goto bpf_alu32_trunc;
489 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
490 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
491 PPC_XOR(dst_reg, dst_reg, src_reg);
492 goto bpf_alu32_trunc;
493 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
494 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
495 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
496 /* Sign-extended */
497 PPC_LI32(b2p[TMP_REG_1], imm);
498 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
499 } else {
500 if (IMM_L(imm))
501 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
502 if (IMM_H(imm))
503 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
504 }
505 goto bpf_alu32_trunc;
506 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
507 /* slw clears top 32 bits */
508 PPC_SLW(dst_reg, dst_reg, src_reg);
509 break;
510 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
511 PPC_SLD(dst_reg, dst_reg, src_reg);
512 break;
513 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
514 /* with imm 0, we still need to clear top 32 bits */
515 PPC_SLWI(dst_reg, dst_reg, imm);
516 break;
517 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
518 if (imm != 0)
519 PPC_SLDI(dst_reg, dst_reg, imm);
520 break;
521 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
522 PPC_SRW(dst_reg, dst_reg, src_reg);
523 break;
524 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
525 PPC_SRD(dst_reg, dst_reg, src_reg);
526 break;
527 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
528 PPC_SRWI(dst_reg, dst_reg, imm);
529 break;
530 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
531 if (imm != 0)
532 PPC_SRDI(dst_reg, dst_reg, imm);
533 break;
534 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
535 PPC_SRAD(dst_reg, dst_reg, src_reg);
536 break;
537 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
538 if (imm != 0)
539 PPC_SRADI(dst_reg, dst_reg, imm);
540 break;
541
542 /*
543 * MOV
544 */
545 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
546 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
547 PPC_MR(dst_reg, src_reg);
548 goto bpf_alu32_trunc;
549 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
550 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
551 PPC_LI32(dst_reg, imm);
552 if (imm < 0)
553 goto bpf_alu32_trunc;
554 break;
555
556 bpf_alu32_trunc:
557 /* Truncate to 32-bits */
558 if (BPF_CLASS(code) == BPF_ALU)
559 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
560 break;
561
562 /*
563 * BPF_FROM_BE/LE
564 */
565 case BPF_ALU | BPF_END | BPF_FROM_LE:
566 case BPF_ALU | BPF_END | BPF_FROM_BE:
567 #ifdef __BIG_ENDIAN__
568 if (BPF_SRC(code) == BPF_FROM_BE)
569 goto emit_clear;
570 #else /* !__BIG_ENDIAN__ */
571 if (BPF_SRC(code) == BPF_FROM_LE)
572 goto emit_clear;
573 #endif
574 switch (imm) {
575 case 16:
576 /* Rotate 8 bits left & mask with 0x0000ff00 */
577 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
578 /* Rotate 8 bits right & insert LSB to reg */
579 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
580 /* Move result back to dst_reg */
581 PPC_MR(dst_reg, b2p[TMP_REG_1]);
582 break;
583 case 32:
584 /*
585 * Rotate word left by 8 bits:
586 * 2 bytes are already in their final position
587 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
588 */
589 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
590 /* Rotate 24 bits and insert byte 1 */
591 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
592 /* Rotate 24 bits and insert byte 3 */
593 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
594 PPC_MR(dst_reg, b2p[TMP_REG_1]);
595 break;
596 case 64:
597 /*
598 * Way easier and faster(?) to store the value
599 * into stack and then use ldbrx
600 *
601 * ctx->seen will be reliable in pass2, but
602 * the instructions generated will remain the
603 * same across all passes
604 */
605 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
606 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
607 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
608 break;
609 }
610 break;
611
612 emit_clear:
613 switch (imm) {
614 case 16:
615 /* zero-extend 16 bits into 64 bits */
616 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
617 break;
618 case 32:
619 /* zero-extend 32 bits into 64 bits */
620 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
621 break;
622 case 64:
623 /* nop */
624 break;
625 }
626 break;
627
628 /*
629 * BPF_ST(X)
630 */
631 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
632 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
633 if (BPF_CLASS(code) == BPF_ST) {
634 PPC_LI(b2p[TMP_REG_1], imm);
635 src_reg = b2p[TMP_REG_1];
636 }
637 PPC_STB(src_reg, dst_reg, off);
638 break;
639 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
640 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
641 if (BPF_CLASS(code) == BPF_ST) {
642 PPC_LI(b2p[TMP_REG_1], imm);
643 src_reg = b2p[TMP_REG_1];
644 }
645 PPC_STH(src_reg, dst_reg, off);
646 break;
647 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
648 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
649 if (BPF_CLASS(code) == BPF_ST) {
650 PPC_LI32(b2p[TMP_REG_1], imm);
651 src_reg = b2p[TMP_REG_1];
652 }
653 PPC_STW(src_reg, dst_reg, off);
654 break;
655 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
656 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
657 if (BPF_CLASS(code) == BPF_ST) {
658 PPC_LI32(b2p[TMP_REG_1], imm);
659 src_reg = b2p[TMP_REG_1];
660 }
661 PPC_STD(src_reg, dst_reg, off);
662 break;
663
664 /*
665 * BPF_STX XADD (atomic_add)
666 */
667 /* *(u32 *)(dst + off) += src */
668 case BPF_STX | BPF_XADD | BPF_W:
669 /* Get EA into TMP_REG_1 */
670 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
671 /* error if EA is not word-aligned */
672 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
673 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
674 PPC_LI(b2p[BPF_REG_0], 0);
675 PPC_JMP(exit_addr);
676 /* load value from memory into TMP_REG_2 */
677 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
678 /* add value from src_reg into this */
679 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
680 /* store result back */
681 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
682 /* we're done if this succeeded */
683 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
684 /* otherwise, let's try once more */
685 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
686 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
687 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
688 /* exit if the store was not successful */
689 PPC_LI(b2p[BPF_REG_0], 0);
690 PPC_BCC(COND_NE, exit_addr);
691 break;
692 /* *(u64 *)(dst + off) += src */
693 case BPF_STX | BPF_XADD | BPF_DW:
694 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
695 /* error if EA is not doubleword-aligned */
696 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
697 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
698 PPC_LI(b2p[BPF_REG_0], 0);
699 PPC_JMP(exit_addr);
700 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
701 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
702 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
703 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
704 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
705 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
706 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
707 PPC_LI(b2p[BPF_REG_0], 0);
708 PPC_BCC(COND_NE, exit_addr);
709 break;
710
711 /*
712 * BPF_LDX
713 */
714 /* dst = *(u8 *)(ul) (src + off) */
715 case BPF_LDX | BPF_MEM | BPF_B:
716 PPC_LBZ(dst_reg, src_reg, off);
717 break;
718 /* dst = *(u16 *)(ul) (src + off) */
719 case BPF_LDX | BPF_MEM | BPF_H:
720 PPC_LHZ(dst_reg, src_reg, off);
721 break;
722 /* dst = *(u32 *)(ul) (src + off) */
723 case BPF_LDX | BPF_MEM | BPF_W:
724 PPC_LWZ(dst_reg, src_reg, off);
725 break;
726 /* dst = *(u64 *)(ul) (src + off) */
727 case BPF_LDX | BPF_MEM | BPF_DW:
728 PPC_LD(dst_reg, src_reg, off);
729 break;
730
731 /*
732 * Doubleword load
733 * 16 byte instruction that uses two 'struct bpf_insn'
734 */
735 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
736 imm64 = ((u64)(u32) insn[i].imm) |
737 (((u64)(u32) insn[i+1].imm) << 32);
738 /* Adjust for two bpf instructions */
739 addrs[++i] = ctx->idx * 4;
740 PPC_LI64(dst_reg, imm64);
741 break;
742
743 /*
744 * Return/Exit
745 */
746 case BPF_JMP | BPF_EXIT:
747 /*
748 * If this isn't the very last instruction, branch to
749 * the epilogue. If we _are_ the last instruction,
750 * we'll just fall through to the epilogue.
751 */
752 if (i != flen - 1)
753 PPC_JMP(exit_addr);
754 /* else fall through to the epilogue */
755 break;
756
757 /*
758 * Call kernel helper
759 */
760 case BPF_JMP | BPF_CALL:
761 ctx->seen |= SEEN_FUNC;
762 func = (u8 *) __bpf_call_base + imm;
763
764 /* Save skb pointer if we need to re-cache skb data */
765 if (bpf_helper_changes_pkt_data(func))
766 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
767
768 bpf_jit_emit_func_call(image, ctx, (u64)func);
769
770 /* move return value from r3 to BPF_REG_0 */
771 PPC_MR(b2p[BPF_REG_0], 3);
772
773 /* refresh skb cache */
774 if (bpf_helper_changes_pkt_data(func)) {
775 /* reload skb pointer to r3 */
776 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
777 bpf_jit_emit_skb_loads(image, ctx);
778 }
779 break;
780
781 /*
782 * Jumps and branches
783 */
784 case BPF_JMP | BPF_JA:
785 PPC_JMP(addrs[i + 1 + off]);
786 break;
787
788 case BPF_JMP | BPF_JGT | BPF_K:
789 case BPF_JMP | BPF_JGT | BPF_X:
790 case BPF_JMP | BPF_JSGT | BPF_K:
791 case BPF_JMP | BPF_JSGT | BPF_X:
792 true_cond = COND_GT;
793 goto cond_branch;
794 case BPF_JMP | BPF_JLT | BPF_K:
795 case BPF_JMP | BPF_JLT | BPF_X:
796 case BPF_JMP | BPF_JSLT | BPF_K:
797 case BPF_JMP | BPF_JSLT | BPF_X:
798 true_cond = COND_LT;
799 goto cond_branch;
800 case BPF_JMP | BPF_JGE | BPF_K:
801 case BPF_JMP | BPF_JGE | BPF_X:
802 case BPF_JMP | BPF_JSGE | BPF_K:
803 case BPF_JMP | BPF_JSGE | BPF_X:
804 true_cond = COND_GE;
805 goto cond_branch;
806 case BPF_JMP | BPF_JLE | BPF_K:
807 case BPF_JMP | BPF_JLE | BPF_X:
808 case BPF_JMP | BPF_JSLE | BPF_K:
809 case BPF_JMP | BPF_JSLE | BPF_X:
810 true_cond = COND_LE;
811 goto cond_branch;
812 case BPF_JMP | BPF_JEQ | BPF_K:
813 case BPF_JMP | BPF_JEQ | BPF_X:
814 true_cond = COND_EQ;
815 goto cond_branch;
816 case BPF_JMP | BPF_JNE | BPF_K:
817 case BPF_JMP | BPF_JNE | BPF_X:
818 true_cond = COND_NE;
819 goto cond_branch;
820 case BPF_JMP | BPF_JSET | BPF_K:
821 case BPF_JMP | BPF_JSET | BPF_X:
822 true_cond = COND_NE;
823 /* Fall through */
824
825 cond_branch:
826 switch (code) {
827 case BPF_JMP | BPF_JGT | BPF_X:
828 case BPF_JMP | BPF_JLT | BPF_X:
829 case BPF_JMP | BPF_JGE | BPF_X:
830 case BPF_JMP | BPF_JLE | BPF_X:
831 case BPF_JMP | BPF_JEQ | BPF_X:
832 case BPF_JMP | BPF_JNE | BPF_X:
833 /* unsigned comparison */
834 PPC_CMPLD(dst_reg, src_reg);
835 break;
836 case BPF_JMP | BPF_JSGT | BPF_X:
837 case BPF_JMP | BPF_JSLT | BPF_X:
838 case BPF_JMP | BPF_JSGE | BPF_X:
839 case BPF_JMP | BPF_JSLE | BPF_X:
840 /* signed comparison */
841 PPC_CMPD(dst_reg, src_reg);
842 break;
843 case BPF_JMP | BPF_JSET | BPF_X:
844 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
845 break;
846 case BPF_JMP | BPF_JNE | BPF_K:
847 case BPF_JMP | BPF_JEQ | BPF_K:
848 case BPF_JMP | BPF_JGT | BPF_K:
849 case BPF_JMP | BPF_JLT | BPF_K:
850 case BPF_JMP | BPF_JGE | BPF_K:
851 case BPF_JMP | BPF_JLE | BPF_K:
852 /*
853 * Need sign-extended load, so only positive
854 * values can be used as imm in cmpldi
855 */
856 if (imm >= 0 && imm < 32768)
857 PPC_CMPLDI(dst_reg, imm);
858 else {
859 /* sign-extending load */
860 PPC_LI32(b2p[TMP_REG_1], imm);
861 /* ... but unsigned comparison */
862 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
863 }
864 break;
865 case BPF_JMP | BPF_JSGT | BPF_K:
866 case BPF_JMP | BPF_JSLT | BPF_K:
867 case BPF_JMP | BPF_JSGE | BPF_K:
868 case BPF_JMP | BPF_JSLE | BPF_K:
869 /*
870 * signed comparison, so any 16-bit value
871 * can be used in cmpdi
872 */
873 if (imm >= -32768 && imm < 32768)
874 PPC_CMPDI(dst_reg, imm);
875 else {
876 PPC_LI32(b2p[TMP_REG_1], imm);
877 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
878 }
879 break;
880 case BPF_JMP | BPF_JSET | BPF_K:
881 /* andi does not sign-extend the immediate */
882 if (imm >= 0 && imm < 32768)
883 /* PPC_ANDI is _only/always_ dot-form */
884 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
885 else {
886 PPC_LI32(b2p[TMP_REG_1], imm);
887 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
888 b2p[TMP_REG_1]);
889 }
890 break;
891 }
892 PPC_BCC(true_cond, addrs[i + 1 + off]);
893 break;
894
895 /*
896 * Loads from packet header/data
897 * Assume 32-bit input value in imm and X (src_reg)
898 */
899
900 /* Absolute loads */
901 case BPF_LD | BPF_W | BPF_ABS:
902 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
903 goto common_load_abs;
904 case BPF_LD | BPF_H | BPF_ABS:
905 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
906 goto common_load_abs;
907 case BPF_LD | BPF_B | BPF_ABS:
908 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
909 common_load_abs:
910 /*
911 * Load from [imm]
912 * Load into r4, which can just be passed onto
913 * skb load helpers as the second parameter
914 */
915 PPC_LI32(4, imm);
916 goto common_load;
917
918 /* Indirect loads */
919 case BPF_LD | BPF_W | BPF_IND:
920 func = (u8 *)sk_load_word;
921 goto common_load_ind;
922 case BPF_LD | BPF_H | BPF_IND:
923 func = (u8 *)sk_load_half;
924 goto common_load_ind;
925 case BPF_LD | BPF_B | BPF_IND:
926 func = (u8 *)sk_load_byte;
927 common_load_ind:
928 /*
929 * Load from [src_reg + imm]
930 * Treat src_reg as a 32-bit value
931 */
932 PPC_EXTSW(4, src_reg);
933 if (imm) {
934 if (imm >= -32768 && imm < 32768)
935 PPC_ADDI(4, 4, IMM_L(imm));
936 else {
937 PPC_LI32(b2p[TMP_REG_1], imm);
938 PPC_ADD(4, 4, b2p[TMP_REG_1]);
939 }
940 }
941
942 common_load:
943 ctx->seen |= SEEN_SKB;
944 ctx->seen |= SEEN_FUNC;
945 bpf_jit_emit_func_call(image, ctx, (u64)func);
946
947 /*
948 * Helper returns 'lt' condition on error, and an
949 * appropriate return value in BPF_REG_0
950 */
951 PPC_BCC(COND_LT, exit_addr);
952 break;
953
954 /*
955 * Tail call
956 */
957 case BPF_JMP | BPF_TAIL_CALL:
958 ctx->seen |= SEEN_TAILCALL;
959 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
960 break;
961
962 default:
963 /*
964 * The filter contains something cruel & unusual.
965 * We don't handle it, but also there shouldn't be
966 * anything missing from our list.
967 */
968 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
969 code, i);
970 return -ENOTSUPP;
971 }
972 }
973
974 /* Set end-of-body-code address for exit. */
975 addrs[i] = ctx->idx * 4;
976
977 return 0;
978 }
979
980 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
981 {
982 u32 proglen;
983 u32 alloclen;
984 u8 *image = NULL;
985 u32 *code_base;
986 u32 *addrs;
987 struct codegen_context cgctx;
988 int pass;
989 int flen;
990 struct bpf_binary_header *bpf_hdr;
991 struct bpf_prog *org_fp = fp;
992 struct bpf_prog *tmp_fp;
993 bool bpf_blinded = false;
994
995 if (!bpf_jit_enable)
996 return org_fp;
997
998 tmp_fp = bpf_jit_blind_constants(org_fp);
999 if (IS_ERR(tmp_fp))
1000 return org_fp;
1001
1002 if (tmp_fp != org_fp) {
1003 bpf_blinded = true;
1004 fp = tmp_fp;
1005 }
1006
1007 flen = fp->len;
1008 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
1009 if (addrs == NULL) {
1010 fp = org_fp;
1011 goto out;
1012 }
1013
1014 memset(&cgctx, 0, sizeof(struct codegen_context));
1015
1016 /* Scouting faux-generate pass 0 */
1017 if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
1018 /* We hit something illegal or unsupported. */
1019 fp = org_fp;
1020 goto out;
1021 }
1022
1023 /*
1024 * Pretend to build prologue, given the features we've seen. This will
1025 * update ctgtx.idx as it pretends to output instructions, then we can
1026 * calculate total size from idx.
1027 */
1028 bpf_jit_build_prologue(0, &cgctx);
1029 bpf_jit_build_epilogue(0, &cgctx);
1030
1031 proglen = cgctx.idx * 4;
1032 alloclen = proglen + FUNCTION_DESCR_SIZE;
1033
1034 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1035 bpf_jit_fill_ill_insns);
1036 if (!bpf_hdr) {
1037 fp = org_fp;
1038 goto out;
1039 }
1040
1041 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1042
1043 /* Code generation passes 1-2 */
1044 for (pass = 1; pass < 3; pass++) {
1045 /* Now build the prologue, body code & epilogue for real. */
1046 cgctx.idx = 0;
1047 bpf_jit_build_prologue(code_base, &cgctx);
1048 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1049 bpf_jit_build_epilogue(code_base, &cgctx);
1050
1051 if (bpf_jit_enable > 1)
1052 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1053 proglen - (cgctx.idx * 4), cgctx.seen);
1054 }
1055
1056 if (bpf_jit_enable > 1)
1057 /*
1058 * Note that we output the base address of the code_base
1059 * rather than image, since opcodes are in code_base.
1060 */
1061 bpf_jit_dump(flen, proglen, pass, code_base);
1062
1063 #ifdef PPC64_ELF_ABI_v1
1064 /* Function descriptor nastiness: Address + TOC */
1065 ((u64 *)image)[0] = (u64)code_base;
1066 ((u64 *)image)[1] = local_paca->kernel_toc;
1067 #endif
1068
1069 fp->bpf_func = (void *)image;
1070 fp->jited = 1;
1071 fp->jited_len = alloclen;
1072
1073 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1074
1075 out:
1076 kfree(addrs);
1077
1078 if (bpf_blinded)
1079 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1080
1081 return fp;
1082 }
1083
1084 /* Overriding bpf_jit_free() as we don't set images read-only. */
1085 void bpf_jit_free(struct bpf_prog *fp)
1086 {
1087 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1088 struct bpf_binary_header *bpf_hdr = (void *)addr;
1089
1090 if (fp->jited)
1091 bpf_jit_binary_free(bpf_hdr);
1092
1093 bpf_prog_unlock_free(fp);
1094 }