]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/core/filter.c
Merge tag 'sound-3.18-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[mirror_ubuntu-zesty-kernel.git] / net / core / filter.c
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22 */
23
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
29 #include <linux/in.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/if_packet.h>
33 #include <linux/gfp.h>
34 #include <net/ip.h>
35 #include <net/protocol.h>
36 #include <net/netlink.h>
37 #include <linux/skbuff.h>
38 #include <net/sock.h>
39 #include <linux/errno.h>
40 #include <linux/timer.h>
41 #include <asm/uaccess.h>
42 #include <asm/unaligned.h>
43 #include <linux/filter.h>
44 #include <linux/ratelimit.h>
45 #include <linux/seccomp.h>
46 #include <linux/if_vlan.h>
47
48 /**
49 * sk_filter - run a packet through a socket filter
50 * @sk: sock associated with &sk_buff
51 * @skb: buffer to filter
52 *
53 * Run the filter code and then cut skb->data to correct size returned by
54 * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
55 * than pkt_len we keep whole skb->data. This is the socket level
56 * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
57 * be accepted or -EPERM if the packet should be tossed.
58 *
59 */
60 int sk_filter(struct sock *sk, struct sk_buff *skb)
61 {
62 int err;
63 struct sk_filter *filter;
64
65 /*
66 * If the skb was allocated from pfmemalloc reserves, only
67 * allow SOCK_MEMALLOC sockets to use it as this socket is
68 * helping free memory
69 */
70 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
71 return -ENOMEM;
72
73 err = security_sock_rcv_skb(sk, skb);
74 if (err)
75 return err;
76
77 rcu_read_lock();
78 filter = rcu_dereference(sk->sk_filter);
79 if (filter) {
80 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
81
82 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
83 }
84 rcu_read_unlock();
85
86 return err;
87 }
88 EXPORT_SYMBOL(sk_filter);
89
90 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
91 {
92 return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
93 }
94
95 static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
96 {
97 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
98 struct nlattr *nla;
99
100 if (skb_is_nonlinear(skb))
101 return 0;
102
103 if (skb->len < sizeof(struct nlattr))
104 return 0;
105
106 if (a > skb->len - sizeof(struct nlattr))
107 return 0;
108
109 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
110 if (nla)
111 return (void *) nla - (void *) skb->data;
112
113 return 0;
114 }
115
116 static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
117 {
118 struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
119 struct nlattr *nla;
120
121 if (skb_is_nonlinear(skb))
122 return 0;
123
124 if (skb->len < sizeof(struct nlattr))
125 return 0;
126
127 if (a > skb->len - sizeof(struct nlattr))
128 return 0;
129
130 nla = (struct nlattr *) &skb->data[a];
131 if (nla->nla_len > skb->len - a)
132 return 0;
133
134 nla = nla_find_nested(nla, x);
135 if (nla)
136 return (void *) nla - (void *) skb->data;
137
138 return 0;
139 }
140
141 static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
142 {
143 return raw_smp_processor_id();
144 }
145
146 /* note that this only generates 32-bit random numbers */
147 static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
148 {
149 return prandom_u32();
150 }
151
152 static bool convert_bpf_extensions(struct sock_filter *fp,
153 struct bpf_insn **insnp)
154 {
155 struct bpf_insn *insn = *insnp;
156
157 switch (fp->k) {
158 case SKF_AD_OFF + SKF_AD_PROTOCOL:
159 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
160
161 /* A = *(u16 *) (CTX + offsetof(protocol)) */
162 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
163 offsetof(struct sk_buff, protocol));
164 /* A = ntohs(A) [emitting a nop or swap16] */
165 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
166 break;
167
168 case SKF_AD_OFF + SKF_AD_PKTTYPE:
169 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
170 PKT_TYPE_OFFSET());
171 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
172 #ifdef __BIG_ENDIAN_BITFIELD
173 insn++;
174 *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5);
175 #endif
176 break;
177
178 case SKF_AD_OFF + SKF_AD_IFINDEX:
179 case SKF_AD_OFF + SKF_AD_HATYPE:
180 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
181 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
182 BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
183
184 *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
185 BPF_REG_TMP, BPF_REG_CTX,
186 offsetof(struct sk_buff, dev));
187 /* if (tmp != 0) goto pc + 1 */
188 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
189 *insn++ = BPF_EXIT_INSN();
190 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
191 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
192 offsetof(struct net_device, ifindex));
193 else
194 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
195 offsetof(struct net_device, type));
196 break;
197
198 case SKF_AD_OFF + SKF_AD_MARK:
199 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
200
201 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
202 offsetof(struct sk_buff, mark));
203 break;
204
205 case SKF_AD_OFF + SKF_AD_RXHASH:
206 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
207
208 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
209 offsetof(struct sk_buff, hash));
210 break;
211
212 case SKF_AD_OFF + SKF_AD_QUEUE:
213 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
214
215 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
216 offsetof(struct sk_buff, queue_mapping));
217 break;
218
219 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
220 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
221 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
222 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
223
224 /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */
225 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
226 offsetof(struct sk_buff, vlan_tci));
227 if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
228 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
229 ~VLAN_TAG_PRESENT);
230 } else {
231 /* A >>= 12 */
232 *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
233 /* A &= 1 */
234 *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
235 }
236 break;
237
238 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
239 case SKF_AD_OFF + SKF_AD_NLATTR:
240 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
241 case SKF_AD_OFF + SKF_AD_CPU:
242 case SKF_AD_OFF + SKF_AD_RANDOM:
243 /* arg1 = CTX */
244 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
245 /* arg2 = A */
246 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
247 /* arg3 = X */
248 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
249 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
250 switch (fp->k) {
251 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
252 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
253 break;
254 case SKF_AD_OFF + SKF_AD_NLATTR:
255 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
256 break;
257 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
258 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
259 break;
260 case SKF_AD_OFF + SKF_AD_CPU:
261 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
262 break;
263 case SKF_AD_OFF + SKF_AD_RANDOM:
264 *insn = BPF_EMIT_CALL(__get_random_u32);
265 break;
266 }
267 break;
268
269 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
270 /* A ^= X */
271 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
272 break;
273
274 default:
275 /* This is just a dummy call to avoid letting the compiler
276 * evict __bpf_call_base() as an optimization. Placed here
277 * where no-one bothers.
278 */
279 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
280 return false;
281 }
282
283 *insnp = insn;
284 return true;
285 }
286
287 /**
288 * bpf_convert_filter - convert filter program
289 * @prog: the user passed filter program
290 * @len: the length of the user passed filter program
291 * @new_prog: buffer where converted program will be stored
292 * @new_len: pointer to store length of converted program
293 *
294 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
295 * Conversion workflow:
296 *
297 * 1) First pass for calculating the new program length:
298 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
299 *
300 * 2) 2nd pass to remap in two passes: 1st pass finds new
301 * jump offsets, 2nd pass remapping:
302 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
303 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
304 *
305 * User BPF's register A is mapped to our BPF register 6, user BPF
306 * register X is mapped to BPF register 7; frame pointer is always
307 * register 10; Context 'void *ctx' is stored in register 1, that is,
308 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
309 * ctx == 'struct seccomp_data *'.
310 */
311 int bpf_convert_filter(struct sock_filter *prog, int len,
312 struct bpf_insn *new_prog, int *new_len)
313 {
314 int new_flen = 0, pass = 0, target, i;
315 struct bpf_insn *new_insn;
316 struct sock_filter *fp;
317 int *addrs = NULL;
318 u8 bpf_src;
319
320 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
321 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
322
323 if (len <= 0 || len > BPF_MAXINSNS)
324 return -EINVAL;
325
326 if (new_prog) {
327 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
328 if (!addrs)
329 return -ENOMEM;
330 }
331
332 do_pass:
333 new_insn = new_prog;
334 fp = prog;
335
336 if (new_insn)
337 *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
338 new_insn++;
339
340 for (i = 0; i < len; fp++, i++) {
341 struct bpf_insn tmp_insns[6] = { };
342 struct bpf_insn *insn = tmp_insns;
343
344 if (addrs)
345 addrs[i] = new_insn - new_prog;
346
347 switch (fp->code) {
348 /* All arithmetic insns and skb loads map as-is. */
349 case BPF_ALU | BPF_ADD | BPF_X:
350 case BPF_ALU | BPF_ADD | BPF_K:
351 case BPF_ALU | BPF_SUB | BPF_X:
352 case BPF_ALU | BPF_SUB | BPF_K:
353 case BPF_ALU | BPF_AND | BPF_X:
354 case BPF_ALU | BPF_AND | BPF_K:
355 case BPF_ALU | BPF_OR | BPF_X:
356 case BPF_ALU | BPF_OR | BPF_K:
357 case BPF_ALU | BPF_LSH | BPF_X:
358 case BPF_ALU | BPF_LSH | BPF_K:
359 case BPF_ALU | BPF_RSH | BPF_X:
360 case BPF_ALU | BPF_RSH | BPF_K:
361 case BPF_ALU | BPF_XOR | BPF_X:
362 case BPF_ALU | BPF_XOR | BPF_K:
363 case BPF_ALU | BPF_MUL | BPF_X:
364 case BPF_ALU | BPF_MUL | BPF_K:
365 case BPF_ALU | BPF_DIV | BPF_X:
366 case BPF_ALU | BPF_DIV | BPF_K:
367 case BPF_ALU | BPF_MOD | BPF_X:
368 case BPF_ALU | BPF_MOD | BPF_K:
369 case BPF_ALU | BPF_NEG:
370 case BPF_LD | BPF_ABS | BPF_W:
371 case BPF_LD | BPF_ABS | BPF_H:
372 case BPF_LD | BPF_ABS | BPF_B:
373 case BPF_LD | BPF_IND | BPF_W:
374 case BPF_LD | BPF_IND | BPF_H:
375 case BPF_LD | BPF_IND | BPF_B:
376 /* Check for overloaded BPF extension and
377 * directly convert it if found, otherwise
378 * just move on with mapping.
379 */
380 if (BPF_CLASS(fp->code) == BPF_LD &&
381 BPF_MODE(fp->code) == BPF_ABS &&
382 convert_bpf_extensions(fp, &insn))
383 break;
384
385 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
386 break;
387
388 /* Jump transformation cannot use BPF block macros
389 * everywhere as offset calculation and target updates
390 * require a bit more work than the rest, i.e. jump
391 * opcodes map as-is, but offsets need adjustment.
392 */
393
394 #define BPF_EMIT_JMP \
395 do { \
396 if (target >= len || target < 0) \
397 goto err; \
398 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
399 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
400 insn->off -= insn - tmp_insns; \
401 } while (0)
402
403 case BPF_JMP | BPF_JA:
404 target = i + fp->k + 1;
405 insn->code = fp->code;
406 BPF_EMIT_JMP;
407 break;
408
409 case BPF_JMP | BPF_JEQ | BPF_K:
410 case BPF_JMP | BPF_JEQ | BPF_X:
411 case BPF_JMP | BPF_JSET | BPF_K:
412 case BPF_JMP | BPF_JSET | BPF_X:
413 case BPF_JMP | BPF_JGT | BPF_K:
414 case BPF_JMP | BPF_JGT | BPF_X:
415 case BPF_JMP | BPF_JGE | BPF_K:
416 case BPF_JMP | BPF_JGE | BPF_X:
417 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
418 /* BPF immediates are signed, zero extend
419 * immediate into tmp register and use it
420 * in compare insn.
421 */
422 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
423
424 insn->dst_reg = BPF_REG_A;
425 insn->src_reg = BPF_REG_TMP;
426 bpf_src = BPF_X;
427 } else {
428 insn->dst_reg = BPF_REG_A;
429 insn->src_reg = BPF_REG_X;
430 insn->imm = fp->k;
431 bpf_src = BPF_SRC(fp->code);
432 }
433
434 /* Common case where 'jump_false' is next insn. */
435 if (fp->jf == 0) {
436 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
437 target = i + fp->jt + 1;
438 BPF_EMIT_JMP;
439 break;
440 }
441
442 /* Convert JEQ into JNE when 'jump_true' is next insn. */
443 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
444 insn->code = BPF_JMP | BPF_JNE | bpf_src;
445 target = i + fp->jf + 1;
446 BPF_EMIT_JMP;
447 break;
448 }
449
450 /* Other jumps are mapped into two insns: Jxx and JA. */
451 target = i + fp->jt + 1;
452 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
453 BPF_EMIT_JMP;
454 insn++;
455
456 insn->code = BPF_JMP | BPF_JA;
457 target = i + fp->jf + 1;
458 BPF_EMIT_JMP;
459 break;
460
461 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
462 case BPF_LDX | BPF_MSH | BPF_B:
463 /* tmp = A */
464 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
465 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
466 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
467 /* A &= 0xf */
468 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
469 /* A <<= 2 */
470 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
471 /* X = A */
472 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
473 /* A = tmp */
474 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
475 break;
476
477 /* RET_K, RET_A are remaped into 2 insns. */
478 case BPF_RET | BPF_A:
479 case BPF_RET | BPF_K:
480 *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
481 BPF_K : BPF_X, BPF_REG_0,
482 BPF_REG_A, fp->k);
483 *insn = BPF_EXIT_INSN();
484 break;
485
486 /* Store to stack. */
487 case BPF_ST:
488 case BPF_STX:
489 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
490 BPF_ST ? BPF_REG_A : BPF_REG_X,
491 -(BPF_MEMWORDS - fp->k) * 4);
492 break;
493
494 /* Load from stack. */
495 case BPF_LD | BPF_MEM:
496 case BPF_LDX | BPF_MEM:
497 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
498 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
499 -(BPF_MEMWORDS - fp->k) * 4);
500 break;
501
502 /* A = K or X = K */
503 case BPF_LD | BPF_IMM:
504 case BPF_LDX | BPF_IMM:
505 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
506 BPF_REG_A : BPF_REG_X, fp->k);
507 break;
508
509 /* X = A */
510 case BPF_MISC | BPF_TAX:
511 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
512 break;
513
514 /* A = X */
515 case BPF_MISC | BPF_TXA:
516 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
517 break;
518
519 /* A = skb->len or X = skb->len */
520 case BPF_LD | BPF_W | BPF_LEN:
521 case BPF_LDX | BPF_W | BPF_LEN:
522 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
523 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
524 offsetof(struct sk_buff, len));
525 break;
526
527 /* Access seccomp_data fields. */
528 case BPF_LDX | BPF_ABS | BPF_W:
529 /* A = *(u32 *) (ctx + K) */
530 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
531 break;
532
533 /* Unkown instruction. */
534 default:
535 goto err;
536 }
537
538 insn++;
539 if (new_prog)
540 memcpy(new_insn, tmp_insns,
541 sizeof(*insn) * (insn - tmp_insns));
542 new_insn += insn - tmp_insns;
543 }
544
545 if (!new_prog) {
546 /* Only calculating new length. */
547 *new_len = new_insn - new_prog;
548 return 0;
549 }
550
551 pass++;
552 if (new_flen != new_insn - new_prog) {
553 new_flen = new_insn - new_prog;
554 if (pass > 2)
555 goto err;
556 goto do_pass;
557 }
558
559 kfree(addrs);
560 BUG_ON(*new_len != new_flen);
561 return 0;
562 err:
563 kfree(addrs);
564 return -EINVAL;
565 }
566
567 /* Security:
568 *
569 * As we dont want to clear mem[] array for each packet going through
570 * __bpf_prog_run(), we check that filter loaded by user never try to read
571 * a cell if not previously written, and we check all branches to be sure
572 * a malicious user doesn't try to abuse us.
573 */
574 static int check_load_and_stores(const struct sock_filter *filter, int flen)
575 {
576 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
577 int pc, ret = 0;
578
579 BUILD_BUG_ON(BPF_MEMWORDS > 16);
580
581 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
582 if (!masks)
583 return -ENOMEM;
584
585 memset(masks, 0xff, flen * sizeof(*masks));
586
587 for (pc = 0; pc < flen; pc++) {
588 memvalid &= masks[pc];
589
590 switch (filter[pc].code) {
591 case BPF_ST:
592 case BPF_STX:
593 memvalid |= (1 << filter[pc].k);
594 break;
595 case BPF_LD | BPF_MEM:
596 case BPF_LDX | BPF_MEM:
597 if (!(memvalid & (1 << filter[pc].k))) {
598 ret = -EINVAL;
599 goto error;
600 }
601 break;
602 case BPF_JMP | BPF_JA:
603 /* A jump must set masks on target */
604 masks[pc + 1 + filter[pc].k] &= memvalid;
605 memvalid = ~0;
606 break;
607 case BPF_JMP | BPF_JEQ | BPF_K:
608 case BPF_JMP | BPF_JEQ | BPF_X:
609 case BPF_JMP | BPF_JGE | BPF_K:
610 case BPF_JMP | BPF_JGE | BPF_X:
611 case BPF_JMP | BPF_JGT | BPF_K:
612 case BPF_JMP | BPF_JGT | BPF_X:
613 case BPF_JMP | BPF_JSET | BPF_K:
614 case BPF_JMP | BPF_JSET | BPF_X:
615 /* A jump must set masks on targets */
616 masks[pc + 1 + filter[pc].jt] &= memvalid;
617 masks[pc + 1 + filter[pc].jf] &= memvalid;
618 memvalid = ~0;
619 break;
620 }
621 }
622 error:
623 kfree(masks);
624 return ret;
625 }
626
627 static bool chk_code_allowed(u16 code_to_probe)
628 {
629 static const bool codes[] = {
630 /* 32 bit ALU operations */
631 [BPF_ALU | BPF_ADD | BPF_K] = true,
632 [BPF_ALU | BPF_ADD | BPF_X] = true,
633 [BPF_ALU | BPF_SUB | BPF_K] = true,
634 [BPF_ALU | BPF_SUB | BPF_X] = true,
635 [BPF_ALU | BPF_MUL | BPF_K] = true,
636 [BPF_ALU | BPF_MUL | BPF_X] = true,
637 [BPF_ALU | BPF_DIV | BPF_K] = true,
638 [BPF_ALU | BPF_DIV | BPF_X] = true,
639 [BPF_ALU | BPF_MOD | BPF_K] = true,
640 [BPF_ALU | BPF_MOD | BPF_X] = true,
641 [BPF_ALU | BPF_AND | BPF_K] = true,
642 [BPF_ALU | BPF_AND | BPF_X] = true,
643 [BPF_ALU | BPF_OR | BPF_K] = true,
644 [BPF_ALU | BPF_OR | BPF_X] = true,
645 [BPF_ALU | BPF_XOR | BPF_K] = true,
646 [BPF_ALU | BPF_XOR | BPF_X] = true,
647 [BPF_ALU | BPF_LSH | BPF_K] = true,
648 [BPF_ALU | BPF_LSH | BPF_X] = true,
649 [BPF_ALU | BPF_RSH | BPF_K] = true,
650 [BPF_ALU | BPF_RSH | BPF_X] = true,
651 [BPF_ALU | BPF_NEG] = true,
652 /* Load instructions */
653 [BPF_LD | BPF_W | BPF_ABS] = true,
654 [BPF_LD | BPF_H | BPF_ABS] = true,
655 [BPF_LD | BPF_B | BPF_ABS] = true,
656 [BPF_LD | BPF_W | BPF_LEN] = true,
657 [BPF_LD | BPF_W | BPF_IND] = true,
658 [BPF_LD | BPF_H | BPF_IND] = true,
659 [BPF_LD | BPF_B | BPF_IND] = true,
660 [BPF_LD | BPF_IMM] = true,
661 [BPF_LD | BPF_MEM] = true,
662 [BPF_LDX | BPF_W | BPF_LEN] = true,
663 [BPF_LDX | BPF_B | BPF_MSH] = true,
664 [BPF_LDX | BPF_IMM] = true,
665 [BPF_LDX | BPF_MEM] = true,
666 /* Store instructions */
667 [BPF_ST] = true,
668 [BPF_STX] = true,
669 /* Misc instructions */
670 [BPF_MISC | BPF_TAX] = true,
671 [BPF_MISC | BPF_TXA] = true,
672 /* Return instructions */
673 [BPF_RET | BPF_K] = true,
674 [BPF_RET | BPF_A] = true,
675 /* Jump instructions */
676 [BPF_JMP | BPF_JA] = true,
677 [BPF_JMP | BPF_JEQ | BPF_K] = true,
678 [BPF_JMP | BPF_JEQ | BPF_X] = true,
679 [BPF_JMP | BPF_JGE | BPF_K] = true,
680 [BPF_JMP | BPF_JGE | BPF_X] = true,
681 [BPF_JMP | BPF_JGT | BPF_K] = true,
682 [BPF_JMP | BPF_JGT | BPF_X] = true,
683 [BPF_JMP | BPF_JSET | BPF_K] = true,
684 [BPF_JMP | BPF_JSET | BPF_X] = true,
685 };
686
687 if (code_to_probe >= ARRAY_SIZE(codes))
688 return false;
689
690 return codes[code_to_probe];
691 }
692
693 /**
694 * bpf_check_classic - verify socket filter code
695 * @filter: filter to verify
696 * @flen: length of filter
697 *
698 * Check the user's filter code. If we let some ugly
699 * filter code slip through kaboom! The filter must contain
700 * no references or jumps that are out of range, no illegal
701 * instructions, and must end with a RET instruction.
702 *
703 * All jumps are forward as they are not signed.
704 *
705 * Returns 0 if the rule set is legal or -EINVAL if not.
706 */
707 int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
708 {
709 bool anc_found;
710 int pc;
711
712 if (flen == 0 || flen > BPF_MAXINSNS)
713 return -EINVAL;
714
715 /* Check the filter code now */
716 for (pc = 0; pc < flen; pc++) {
717 const struct sock_filter *ftest = &filter[pc];
718
719 /* May we actually operate on this code? */
720 if (!chk_code_allowed(ftest->code))
721 return -EINVAL;
722
723 /* Some instructions need special checks */
724 switch (ftest->code) {
725 case BPF_ALU | BPF_DIV | BPF_K:
726 case BPF_ALU | BPF_MOD | BPF_K:
727 /* Check for division by zero */
728 if (ftest->k == 0)
729 return -EINVAL;
730 break;
731 case BPF_LD | BPF_MEM:
732 case BPF_LDX | BPF_MEM:
733 case BPF_ST:
734 case BPF_STX:
735 /* Check for invalid memory addresses */
736 if (ftest->k >= BPF_MEMWORDS)
737 return -EINVAL;
738 break;
739 case BPF_JMP | BPF_JA:
740 /* Note, the large ftest->k might cause loops.
741 * Compare this with conditional jumps below,
742 * where offsets are limited. --ANK (981016)
743 */
744 if (ftest->k >= (unsigned int)(flen - pc - 1))
745 return -EINVAL;
746 break;
747 case BPF_JMP | BPF_JEQ | BPF_K:
748 case BPF_JMP | BPF_JEQ | BPF_X:
749 case BPF_JMP | BPF_JGE | BPF_K:
750 case BPF_JMP | BPF_JGE | BPF_X:
751 case BPF_JMP | BPF_JGT | BPF_K:
752 case BPF_JMP | BPF_JGT | BPF_X:
753 case BPF_JMP | BPF_JSET | BPF_K:
754 case BPF_JMP | BPF_JSET | BPF_X:
755 /* Both conditionals must be safe */
756 if (pc + ftest->jt + 1 >= flen ||
757 pc + ftest->jf + 1 >= flen)
758 return -EINVAL;
759 break;
760 case BPF_LD | BPF_W | BPF_ABS:
761 case BPF_LD | BPF_H | BPF_ABS:
762 case BPF_LD | BPF_B | BPF_ABS:
763 anc_found = false;
764 if (bpf_anc_helper(ftest) & BPF_ANC)
765 anc_found = true;
766 /* Ancillary operation unknown or unsupported */
767 if (anc_found == false && ftest->k >= SKF_AD_OFF)
768 return -EINVAL;
769 }
770 }
771
772 /* Last instruction must be a RET code */
773 switch (filter[flen - 1].code) {
774 case BPF_RET | BPF_K:
775 case BPF_RET | BPF_A:
776 return check_load_and_stores(filter, flen);
777 }
778
779 return -EINVAL;
780 }
781 EXPORT_SYMBOL(bpf_check_classic);
782
783 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
784 const struct sock_fprog *fprog)
785 {
786 unsigned int fsize = bpf_classic_proglen(fprog);
787 struct sock_fprog_kern *fkprog;
788
789 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
790 if (!fp->orig_prog)
791 return -ENOMEM;
792
793 fkprog = fp->orig_prog;
794 fkprog->len = fprog->len;
795 fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
796 if (!fkprog->filter) {
797 kfree(fp->orig_prog);
798 return -ENOMEM;
799 }
800
801 return 0;
802 }
803
804 static void bpf_release_orig_filter(struct bpf_prog *fp)
805 {
806 struct sock_fprog_kern *fprog = fp->orig_prog;
807
808 if (fprog) {
809 kfree(fprog->filter);
810 kfree(fprog);
811 }
812 }
813
814 static void __bpf_prog_release(struct bpf_prog *prog)
815 {
816 bpf_release_orig_filter(prog);
817 bpf_prog_free(prog);
818 }
819
820 static void __sk_filter_release(struct sk_filter *fp)
821 {
822 __bpf_prog_release(fp->prog);
823 kfree(fp);
824 }
825
826 /**
827 * sk_filter_release_rcu - Release a socket filter by rcu_head
828 * @rcu: rcu_head that contains the sk_filter to free
829 */
830 static void sk_filter_release_rcu(struct rcu_head *rcu)
831 {
832 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
833
834 __sk_filter_release(fp);
835 }
836
837 /**
838 * sk_filter_release - release a socket filter
839 * @fp: filter to remove
840 *
841 * Remove a filter from a socket and release its resources.
842 */
843 static void sk_filter_release(struct sk_filter *fp)
844 {
845 if (atomic_dec_and_test(&fp->refcnt))
846 call_rcu(&fp->rcu, sk_filter_release_rcu);
847 }
848
849 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
850 {
851 u32 filter_size = bpf_prog_size(fp->prog->len);
852
853 atomic_sub(filter_size, &sk->sk_omem_alloc);
854 sk_filter_release(fp);
855 }
856
857 /* try to charge the socket memory if there is space available
858 * return true on success
859 */
860 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
861 {
862 u32 filter_size = bpf_prog_size(fp->prog->len);
863
864 /* same check as in sock_kmalloc() */
865 if (filter_size <= sysctl_optmem_max &&
866 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
867 atomic_inc(&fp->refcnt);
868 atomic_add(filter_size, &sk->sk_omem_alloc);
869 return true;
870 }
871 return false;
872 }
873
874 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
875 {
876 struct sock_filter *old_prog;
877 struct bpf_prog *old_fp;
878 int err, new_len, old_len = fp->len;
879
880 /* We are free to overwrite insns et al right here as it
881 * won't be used at this point in time anymore internally
882 * after the migration to the internal BPF instruction
883 * representation.
884 */
885 BUILD_BUG_ON(sizeof(struct sock_filter) !=
886 sizeof(struct bpf_insn));
887
888 /* Conversion cannot happen on overlapping memory areas,
889 * so we need to keep the user BPF around until the 2nd
890 * pass. At this time, the user BPF is stored in fp->insns.
891 */
892 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
893 GFP_KERNEL);
894 if (!old_prog) {
895 err = -ENOMEM;
896 goto out_err;
897 }
898
899 /* 1st pass: calculate the new program length. */
900 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
901 if (err)
902 goto out_err_free;
903
904 /* Expand fp for appending the new filter representation. */
905 old_fp = fp;
906 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
907 if (!fp) {
908 /* The old_fp is still around in case we couldn't
909 * allocate new memory, so uncharge on that one.
910 */
911 fp = old_fp;
912 err = -ENOMEM;
913 goto out_err_free;
914 }
915
916 fp->len = new_len;
917
918 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
919 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
920 if (err)
921 /* 2nd bpf_convert_filter() can fail only if it fails
922 * to allocate memory, remapping must succeed. Note,
923 * that at this time old_fp has already been released
924 * by krealloc().
925 */
926 goto out_err_free;
927
928 bpf_prog_select_runtime(fp);
929
930 kfree(old_prog);
931 return fp;
932
933 out_err_free:
934 kfree(old_prog);
935 out_err:
936 __bpf_prog_release(fp);
937 return ERR_PTR(err);
938 }
939
940 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
941 {
942 int err;
943
944 fp->bpf_func = NULL;
945 fp->jited = false;
946
947 err = bpf_check_classic(fp->insns, fp->len);
948 if (err) {
949 __bpf_prog_release(fp);
950 return ERR_PTR(err);
951 }
952
953 /* Probe if we can JIT compile the filter and if so, do
954 * the compilation of the filter.
955 */
956 bpf_jit_compile(fp);
957
958 /* JIT compiler couldn't process this filter, so do the
959 * internal BPF translation for the optimized interpreter.
960 */
961 if (!fp->jited)
962 fp = bpf_migrate_filter(fp);
963
964 return fp;
965 }
966
967 /**
968 * bpf_prog_create - create an unattached filter
969 * @pfp: the unattached filter that is created
970 * @fprog: the filter program
971 *
972 * Create a filter independent of any socket. We first run some
973 * sanity checks on it to make sure it does not explode on us later.
974 * If an error occurs or there is insufficient memory for the filter
975 * a negative errno code is returned. On success the return is zero.
976 */
977 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
978 {
979 unsigned int fsize = bpf_classic_proglen(fprog);
980 struct bpf_prog *fp;
981
982 /* Make sure new filter is there and in the right amounts. */
983 if (fprog->filter == NULL)
984 return -EINVAL;
985
986 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
987 if (!fp)
988 return -ENOMEM;
989
990 memcpy(fp->insns, fprog->filter, fsize);
991
992 fp->len = fprog->len;
993 /* Since unattached filters are not copied back to user
994 * space through sk_get_filter(), we do not need to hold
995 * a copy here, and can spare us the work.
996 */
997 fp->orig_prog = NULL;
998
999 /* bpf_prepare_filter() already takes care of freeing
1000 * memory in case something goes wrong.
1001 */
1002 fp = bpf_prepare_filter(fp);
1003 if (IS_ERR(fp))
1004 return PTR_ERR(fp);
1005
1006 *pfp = fp;
1007 return 0;
1008 }
1009 EXPORT_SYMBOL_GPL(bpf_prog_create);
1010
1011 void bpf_prog_destroy(struct bpf_prog *fp)
1012 {
1013 __bpf_prog_release(fp);
1014 }
1015 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1016
1017 /**
1018 * sk_attach_filter - attach a socket filter
1019 * @fprog: the filter program
1020 * @sk: the socket to use
1021 *
1022 * Attach the user's filter code. We first run some sanity checks on
1023 * it to make sure it does not explode on us later. If an error
1024 * occurs or there is insufficient memory for the filter a negative
1025 * errno code is returned. On success the return is zero.
1026 */
1027 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1028 {
1029 struct sk_filter *fp, *old_fp;
1030 unsigned int fsize = bpf_classic_proglen(fprog);
1031 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1032 struct bpf_prog *prog;
1033 int err;
1034
1035 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1036 return -EPERM;
1037
1038 /* Make sure new filter is there and in the right amounts. */
1039 if (fprog->filter == NULL)
1040 return -EINVAL;
1041
1042 prog = bpf_prog_alloc(bpf_fsize, 0);
1043 if (!prog)
1044 return -ENOMEM;
1045
1046 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1047 __bpf_prog_free(prog);
1048 return -EFAULT;
1049 }
1050
1051 prog->len = fprog->len;
1052
1053 err = bpf_prog_store_orig_filter(prog, fprog);
1054 if (err) {
1055 __bpf_prog_free(prog);
1056 return -ENOMEM;
1057 }
1058
1059 /* bpf_prepare_filter() already takes care of freeing
1060 * memory in case something goes wrong.
1061 */
1062 prog = bpf_prepare_filter(prog);
1063 if (IS_ERR(prog))
1064 return PTR_ERR(prog);
1065
1066 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1067 if (!fp) {
1068 __bpf_prog_release(prog);
1069 return -ENOMEM;
1070 }
1071 fp->prog = prog;
1072
1073 atomic_set(&fp->refcnt, 0);
1074
1075 if (!sk_filter_charge(sk, fp)) {
1076 __sk_filter_release(fp);
1077 return -ENOMEM;
1078 }
1079
1080 old_fp = rcu_dereference_protected(sk->sk_filter,
1081 sock_owned_by_user(sk));
1082 rcu_assign_pointer(sk->sk_filter, fp);
1083
1084 if (old_fp)
1085 sk_filter_uncharge(sk, old_fp);
1086
1087 return 0;
1088 }
1089 EXPORT_SYMBOL_GPL(sk_attach_filter);
1090
1091 int sk_detach_filter(struct sock *sk)
1092 {
1093 int ret = -ENOENT;
1094 struct sk_filter *filter;
1095
1096 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1097 return -EPERM;
1098
1099 filter = rcu_dereference_protected(sk->sk_filter,
1100 sock_owned_by_user(sk));
1101 if (filter) {
1102 RCU_INIT_POINTER(sk->sk_filter, NULL);
1103 sk_filter_uncharge(sk, filter);
1104 ret = 0;
1105 }
1106
1107 return ret;
1108 }
1109 EXPORT_SYMBOL_GPL(sk_detach_filter);
1110
1111 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1112 unsigned int len)
1113 {
1114 struct sock_fprog_kern *fprog;
1115 struct sk_filter *filter;
1116 int ret = 0;
1117
1118 lock_sock(sk);
1119 filter = rcu_dereference_protected(sk->sk_filter,
1120 sock_owned_by_user(sk));
1121 if (!filter)
1122 goto out;
1123
1124 /* We're copying the filter that has been originally attached,
1125 * so no conversion/decode needed anymore.
1126 */
1127 fprog = filter->prog->orig_prog;
1128
1129 ret = fprog->len;
1130 if (!len)
1131 /* User space only enquires number of filter blocks. */
1132 goto out;
1133
1134 ret = -EINVAL;
1135 if (len < fprog->len)
1136 goto out;
1137
1138 ret = -EFAULT;
1139 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
1140 goto out;
1141
1142 /* Instead of bytes, the API requests to return the number
1143 * of filter blocks.
1144 */
1145 ret = fprog->len;
1146 out:
1147 release_sock(sk);
1148 return ret;
1149 }