]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/net/bpf_jit.S
Merge branch 'perf/x86-ibs' into perf/core
[mirror_ubuntu-artful-kernel.git] / arch / x86 / net / bpf_jit.S
1 /* bpf_jit.S : BPF JIT helper functions
2 *
3 * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com)
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 */
10 #include <linux/linkage.h>
11 #include <asm/dwarf2.h>
12
13 /*
14 * Calling convention :
15 * rdi : skb pointer
16 * esi : offset of byte(s) to fetch in skb (can be scratched)
17 * r8 : copy of skb->data
18 * r9d : hlen = skb->len - skb->data_len
19 */
20 #define SKBDATA %r8
21 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
22
23 sk_load_word:
24 .globl sk_load_word
25
26 test %esi,%esi
27 js bpf_slow_path_word_neg
28
29 sk_load_word_positive_offset:
30 .globl sk_load_word_positive_offset
31
32 mov %r9d,%eax # hlen
33 sub %esi,%eax # hlen - offset
34 cmp $3,%eax
35 jle bpf_slow_path_word
36 mov (SKBDATA,%rsi),%eax
37 bswap %eax /* ntohl() */
38 ret
39
40 sk_load_half:
41 .globl sk_load_half
42
43 test %esi,%esi
44 js bpf_slow_path_half_neg
45
46 sk_load_half_positive_offset:
47 .globl sk_load_half_positive_offset
48
49 mov %r9d,%eax
50 sub %esi,%eax # hlen - offset
51 cmp $1,%eax
52 jle bpf_slow_path_half
53 movzwl (SKBDATA,%rsi),%eax
54 rol $8,%ax # ntohs()
55 ret
56
57 sk_load_byte:
58 .globl sk_load_byte
59
60 test %esi,%esi
61 js bpf_slow_path_byte_neg
62
63 sk_load_byte_positive_offset:
64 .globl sk_load_byte_positive_offset
65
66 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
67 jle bpf_slow_path_byte
68 movzbl (SKBDATA,%rsi),%eax
69 ret
70
71 /**
72 * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
73 *
74 * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
75 * Must preserve A accumulator (%eax)
76 * Inputs : %esi is the offset value
77 */
78 sk_load_byte_msh:
79 .globl sk_load_byte_msh
80 test %esi,%esi
81 js bpf_slow_path_byte_msh_neg
82
83 sk_load_byte_msh_positive_offset:
84 .globl sk_load_byte_msh_positive_offset
85 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
86 jle bpf_slow_path_byte_msh
87 movzbl (SKBDATA,%rsi),%ebx
88 and $15,%bl
89 shl $2,%bl
90 ret
91
92 /* rsi contains offset and can be scratched */
93 #define bpf_slow_path_common(LEN) \
94 push %rdi; /* save skb */ \
95 push %r9; \
96 push SKBDATA; \
97 /* rsi already has offset */ \
98 mov $LEN,%ecx; /* len */ \
99 lea -12(%rbp),%rdx; \
100 call skb_copy_bits; \
101 test %eax,%eax; \
102 pop SKBDATA; \
103 pop %r9; \
104 pop %rdi
105
106
107 bpf_slow_path_word:
108 bpf_slow_path_common(4)
109 js bpf_error
110 mov -12(%rbp),%eax
111 bswap %eax
112 ret
113
114 bpf_slow_path_half:
115 bpf_slow_path_common(2)
116 js bpf_error
117 mov -12(%rbp),%ax
118 rol $8,%ax
119 movzwl %ax,%eax
120 ret
121
122 bpf_slow_path_byte:
123 bpf_slow_path_common(1)
124 js bpf_error
125 movzbl -12(%rbp),%eax
126 ret
127
128 bpf_slow_path_byte_msh:
129 xchg %eax,%ebx /* dont lose A , X is about to be scratched */
130 bpf_slow_path_common(1)
131 js bpf_error
132 movzbl -12(%rbp),%eax
133 and $15,%al
134 shl $2,%al
135 xchg %eax,%ebx
136 ret
137
138 #define sk_negative_common(SIZE) \
139 push %rdi; /* save skb */ \
140 push %r9; \
141 push SKBDATA; \
142 /* rsi already has offset */ \
143 mov $SIZE,%ecx; /* size */ \
144 call bpf_internal_load_pointer_neg_helper; \
145 test %rax,%rax; \
146 pop SKBDATA; \
147 pop %r9; \
148 pop %rdi; \
149 jz bpf_error
150
151
152 bpf_slow_path_word_neg:
153 cmp SKF_MAX_NEG_OFF, %esi /* test range */
154 jl bpf_error /* offset lower -> error */
155 sk_load_word_negative_offset:
156 .globl sk_load_word_negative_offset
157 sk_negative_common(4)
158 mov (%rax), %eax
159 bswap %eax
160 ret
161
162 bpf_slow_path_half_neg:
163 cmp SKF_MAX_NEG_OFF, %esi
164 jl bpf_error
165 sk_load_half_negative_offset:
166 .globl sk_load_half_negative_offset
167 sk_negative_common(2)
168 mov (%rax),%ax
169 rol $8,%ax
170 movzwl %ax,%eax
171 ret
172
173 bpf_slow_path_byte_neg:
174 cmp SKF_MAX_NEG_OFF, %esi
175 jl bpf_error
176 sk_load_byte_negative_offset:
177 .globl sk_load_byte_negative_offset
178 sk_negative_common(1)
179 movzbl (%rax), %eax
180 ret
181
182 bpf_slow_path_byte_msh_neg:
183 cmp SKF_MAX_NEG_OFF, %esi
184 jl bpf_error
185 sk_load_byte_msh_negative_offset:
186 .globl sk_load_byte_msh_negative_offset
187 xchg %eax,%ebx /* dont lose A , X is about to be scratched */
188 sk_negative_common(1)
189 movzbl (%rax),%eax
190 and $15,%al
191 shl $2,%al
192 xchg %eax,%ebx
193 ret
194
195 bpf_error:
196 # force a return 0 from jit handler
197 xor %eax,%eax
198 mov -8(%rbp),%rbx
199 leaveq
200 ret