]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/net/bpf_jit64.h
Merge branch 'next' into for-linus
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / net / bpf_jit64.h
1 /*
2 * bpf_jit64.h: BPF JIT compiler for PPC64
3 *
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5 * IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12 #ifndef _BPF_JIT64_H
13 #define _BPF_JIT64_H
14
15 #include "bpf_jit.h"
16
17 /*
18 * Stack layout:
19 *
20 * [ prev sp ] <-------------
21 * [ nv gpr save area ] 8*8 |
22 * fp (r31) --> [ ebpf stack space ] 512 |
23 * [ local/tmp var space ] 16 |
24 * [ frame header ] 32/112 |
25 * sp (r1) ---> [ stack pointer ] --------------
26 */
27
28 /* for bpf JIT code internal usage */
29 #define BPF_PPC_STACK_LOCALS 16
30 /* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
31 #define BPF_PPC_STACK_SAVE (8*8)
32 /* Ensure this is quadword aligned */
33 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS + \
34 MAX_BPF_STACK + BPF_PPC_STACK_SAVE)
35
36 #ifndef __ASSEMBLY__
37
38 /* BPF register usage */
39 #define SKB_HLEN_REG (MAX_BPF_REG + 0)
40 #define SKB_DATA_REG (MAX_BPF_REG + 1)
41 #define TMP_REG_1 (MAX_BPF_REG + 2)
42 #define TMP_REG_2 (MAX_BPF_REG + 3)
43
44 /* BPF to ppc register mappings */
45 static const int b2p[] = {
46 /* function return value */
47 [BPF_REG_0] = 8,
48 /* function arguments */
49 [BPF_REG_1] = 3,
50 [BPF_REG_2] = 4,
51 [BPF_REG_3] = 5,
52 [BPF_REG_4] = 6,
53 [BPF_REG_5] = 7,
54 /* non volatile registers */
55 [BPF_REG_6] = 27,
56 [BPF_REG_7] = 28,
57 [BPF_REG_8] = 29,
58 [BPF_REG_9] = 30,
59 /* frame pointer aka BPF_REG_10 */
60 [BPF_REG_FP] = 31,
61 /* eBPF jit internal registers */
62 [SKB_HLEN_REG] = 25,
63 [SKB_DATA_REG] = 26,
64 [TMP_REG_1] = 9,
65 [TMP_REG_2] = 10
66 };
67
68 /* Assembly helpers */
69 #define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
70 u64 func##_negative_offset(u64 r3, u64 r4); \
71 u64 func##_positive_offset(u64 r3, u64 r4);
72
73 DECLARE_LOAD_FUNC(sk_load_word);
74 DECLARE_LOAD_FUNC(sk_load_half);
75 DECLARE_LOAD_FUNC(sk_load_byte);
76
77 #define CHOOSE_LOAD_FUNC(imm, func) \
78 (imm < 0 ? \
79 (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
80 func##_positive_offset)
81
82 #define SEEN_FUNC 0x1000 /* might call external helpers */
83 #define SEEN_STACK 0x2000 /* uses BPF stack */
84 #define SEEN_SKB 0x4000 /* uses sk_buff */
85
86 struct codegen_context {
87 /*
88 * This is used to track register usage as well
89 * as calls to external helpers.
90 * - register usage is tracked with corresponding
91 * bits (r3-r10 and r25-r31)
92 * - rest of the bits can be used to track other
93 * things -- for now, we use bits 16 to 23
94 * encoded in SEEN_* macros above
95 */
96 unsigned int seen;
97 unsigned int idx;
98 };
99
100 #endif /* !__ASSEMBLY__ */
101
102 #endif