]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/core/filter.c
Merge tag 'driver-core-3.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / net / core / filter.c
1 /*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Author:
5 * Jay Schulist <jschlst@samba.org>
6 *
7 * Based on the design of:
8 * - The Berkeley Packet Filter
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
17 */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/ratelimit.h>
40 #include <linux/seccomp.h>
41 #include <linux/if_vlan.h>
42
43 /* No hurry in this branch
44 *
45 * Exported for the bpf jit load helper.
46 */
47 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
48 {
49 u8 *ptr = NULL;
50
51 if (k >= SKF_NET_OFF)
52 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
53 else if (k >= SKF_LL_OFF)
54 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
55
56 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
57 return ptr;
58 return NULL;
59 }
60
61 static inline void *load_pointer(const struct sk_buff *skb, int k,
62 unsigned int size, void *buffer)
63 {
64 if (k >= 0)
65 return skb_header_pointer(skb, k, size, buffer);
66 return bpf_internal_load_pointer_neg_helper(skb, k, size);
67 }
68
69 /**
70 * sk_filter - run a packet through a socket filter
71 * @sk: sock associated with &sk_buff
72 * @skb: buffer to filter
73 *
74 * Run the filter code and then cut skb->data to correct size returned by
75 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
76 * than pkt_len we keep whole skb->data. This is the socket level
77 * wrapper to sk_run_filter. It returns 0 if the packet should
78 * be accepted or -EPERM if the packet should be tossed.
79 *
80 */
81 int sk_filter(struct sock *sk, struct sk_buff *skb)
82 {
83 int err;
84 struct sk_filter *filter;
85
86 /*
87 * If the skb was allocated from pfmemalloc reserves, only
88 * allow SOCK_MEMALLOC sockets to use it as this socket is
89 * helping free memory
90 */
91 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
92 return -ENOMEM;
93
94 err = security_sock_rcv_skb(sk, skb);
95 if (err)
96 return err;
97
98 rcu_read_lock();
99 filter = rcu_dereference(sk->sk_filter);
100 if (filter) {
101 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
102
103 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
104 }
105 rcu_read_unlock();
106
107 return err;
108 }
109 EXPORT_SYMBOL(sk_filter);
110
111 /**
112 * sk_run_filter - run a filter on a socket
113 * @skb: buffer to run the filter on
114 * @fentry: filter to apply
115 *
116 * Decode and apply filter instructions to the skb->data.
117 * Return length to keep, 0 for none. @skb is the data we are
118 * filtering, @filter is the array of filter instructions.
119 * Because all jumps are guaranteed to be before last instruction,
120 * and last instruction guaranteed to be a RET, we dont need to check
121 * flen. (We used to pass to this function the length of filter)
122 */
123 unsigned int sk_run_filter(const struct sk_buff *skb,
124 const struct sock_filter *fentry)
125 {
126 void *ptr;
127 u32 A = 0; /* Accumulator */
128 u32 X = 0; /* Index Register */
129 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
130 u32 tmp;
131 int k;
132
133 /*
134 * Process array of filter instructions.
135 */
136 for (;; fentry++) {
137 #if defined(CONFIG_X86_32)
138 #define K (fentry->k)
139 #else
140 const u32 K = fentry->k;
141 #endif
142
143 switch (fentry->code) {
144 case BPF_S_ALU_ADD_X:
145 A += X;
146 continue;
147 case BPF_S_ALU_ADD_K:
148 A += K;
149 continue;
150 case BPF_S_ALU_SUB_X:
151 A -= X;
152 continue;
153 case BPF_S_ALU_SUB_K:
154 A -= K;
155 continue;
156 case BPF_S_ALU_MUL_X:
157 A *= X;
158 continue;
159 case BPF_S_ALU_MUL_K:
160 A *= K;
161 continue;
162 case BPF_S_ALU_DIV_X:
163 if (X == 0)
164 return 0;
165 A /= X;
166 continue;
167 case BPF_S_ALU_DIV_K:
168 A /= K;
169 continue;
170 case BPF_S_ALU_MOD_X:
171 if (X == 0)
172 return 0;
173 A %= X;
174 continue;
175 case BPF_S_ALU_MOD_K:
176 A %= K;
177 continue;
178 case BPF_S_ALU_AND_X:
179 A &= X;
180 continue;
181 case BPF_S_ALU_AND_K:
182 A &= K;
183 continue;
184 case BPF_S_ALU_OR_X:
185 A |= X;
186 continue;
187 case BPF_S_ALU_OR_K:
188 A |= K;
189 continue;
190 case BPF_S_ANC_ALU_XOR_X:
191 case BPF_S_ALU_XOR_X:
192 A ^= X;
193 continue;
194 case BPF_S_ALU_XOR_K:
195 A ^= K;
196 continue;
197 case BPF_S_ALU_LSH_X:
198 A <<= X;
199 continue;
200 case BPF_S_ALU_LSH_K:
201 A <<= K;
202 continue;
203 case BPF_S_ALU_RSH_X:
204 A >>= X;
205 continue;
206 case BPF_S_ALU_RSH_K:
207 A >>= K;
208 continue;
209 case BPF_S_ALU_NEG:
210 A = -A;
211 continue;
212 case BPF_S_JMP_JA:
213 fentry += K;
214 continue;
215 case BPF_S_JMP_JGT_K:
216 fentry += (A > K) ? fentry->jt : fentry->jf;
217 continue;
218 case BPF_S_JMP_JGE_K:
219 fentry += (A >= K) ? fentry->jt : fentry->jf;
220 continue;
221 case BPF_S_JMP_JEQ_K:
222 fentry += (A == K) ? fentry->jt : fentry->jf;
223 continue;
224 case BPF_S_JMP_JSET_K:
225 fentry += (A & K) ? fentry->jt : fentry->jf;
226 continue;
227 case BPF_S_JMP_JGT_X:
228 fentry += (A > X) ? fentry->jt : fentry->jf;
229 continue;
230 case BPF_S_JMP_JGE_X:
231 fentry += (A >= X) ? fentry->jt : fentry->jf;
232 continue;
233 case BPF_S_JMP_JEQ_X:
234 fentry += (A == X) ? fentry->jt : fentry->jf;
235 continue;
236 case BPF_S_JMP_JSET_X:
237 fentry += (A & X) ? fentry->jt : fentry->jf;
238 continue;
239 case BPF_S_LD_W_ABS:
240 k = K;
241 load_w:
242 ptr = load_pointer(skb, k, 4, &tmp);
243 if (ptr != NULL) {
244 A = get_unaligned_be32(ptr);
245 continue;
246 }
247 return 0;
248 case BPF_S_LD_H_ABS:
249 k = K;
250 load_h:
251 ptr = load_pointer(skb, k, 2, &tmp);
252 if (ptr != NULL) {
253 A = get_unaligned_be16(ptr);
254 continue;
255 }
256 return 0;
257 case BPF_S_LD_B_ABS:
258 k = K;
259 load_b:
260 ptr = load_pointer(skb, k, 1, &tmp);
261 if (ptr != NULL) {
262 A = *(u8 *)ptr;
263 continue;
264 }
265 return 0;
266 case BPF_S_LD_W_LEN:
267 A = skb->len;
268 continue;
269 case BPF_S_LDX_W_LEN:
270 X = skb->len;
271 continue;
272 case BPF_S_LD_W_IND:
273 k = X + K;
274 goto load_w;
275 case BPF_S_LD_H_IND:
276 k = X + K;
277 goto load_h;
278 case BPF_S_LD_B_IND:
279 k = X + K;
280 goto load_b;
281 case BPF_S_LDX_B_MSH:
282 ptr = load_pointer(skb, K, 1, &tmp);
283 if (ptr != NULL) {
284 X = (*(u8 *)ptr & 0xf) << 2;
285 continue;
286 }
287 return 0;
288 case BPF_S_LD_IMM:
289 A = K;
290 continue;
291 case BPF_S_LDX_IMM:
292 X = K;
293 continue;
294 case BPF_S_LD_MEM:
295 A = mem[K];
296 continue;
297 case BPF_S_LDX_MEM:
298 X = mem[K];
299 continue;
300 case BPF_S_MISC_TAX:
301 X = A;
302 continue;
303 case BPF_S_MISC_TXA:
304 A = X;
305 continue;
306 case BPF_S_RET_K:
307 return K;
308 case BPF_S_RET_A:
309 return A;
310 case BPF_S_ST:
311 mem[K] = A;
312 continue;
313 case BPF_S_STX:
314 mem[K] = X;
315 continue;
316 case BPF_S_ANC_PROTOCOL:
317 A = ntohs(skb->protocol);
318 continue;
319 case BPF_S_ANC_PKTTYPE:
320 A = skb->pkt_type;
321 continue;
322 case BPF_S_ANC_IFINDEX:
323 if (!skb->dev)
324 return 0;
325 A = skb->dev->ifindex;
326 continue;
327 case BPF_S_ANC_MARK:
328 A = skb->mark;
329 continue;
330 case BPF_S_ANC_QUEUE:
331 A = skb->queue_mapping;
332 continue;
333 case BPF_S_ANC_HATYPE:
334 if (!skb->dev)
335 return 0;
336 A = skb->dev->type;
337 continue;
338 case BPF_S_ANC_RXHASH:
339 A = skb->rxhash;
340 continue;
341 case BPF_S_ANC_CPU:
342 A = raw_smp_processor_id();
343 continue;
344 case BPF_S_ANC_VLAN_TAG:
345 A = vlan_tx_tag_get(skb);
346 continue;
347 case BPF_S_ANC_VLAN_TAG_PRESENT:
348 A = !!vlan_tx_tag_present(skb);
349 continue;
350 case BPF_S_ANC_PAY_OFFSET:
351 A = __skb_get_poff(skb);
352 continue;
353 case BPF_S_ANC_NLATTR: {
354 struct nlattr *nla;
355
356 if (skb_is_nonlinear(skb))
357 return 0;
358 if (A > skb->len - sizeof(struct nlattr))
359 return 0;
360
361 nla = nla_find((struct nlattr *)&skb->data[A],
362 skb->len - A, X);
363 if (nla)
364 A = (void *)nla - (void *)skb->data;
365 else
366 A = 0;
367 continue;
368 }
369 case BPF_S_ANC_NLATTR_NEST: {
370 struct nlattr *nla;
371
372 if (skb_is_nonlinear(skb))
373 return 0;
374 if (A > skb->len - sizeof(struct nlattr))
375 return 0;
376
377 nla = (struct nlattr *)&skb->data[A];
378 if (nla->nla_len > A - skb->len)
379 return 0;
380
381 nla = nla_find_nested(nla, X);
382 if (nla)
383 A = (void *)nla - (void *)skb->data;
384 else
385 A = 0;
386 continue;
387 }
388 #ifdef CONFIG_SECCOMP_FILTER
389 case BPF_S_ANC_SECCOMP_LD_W:
390 A = seccomp_bpf_load(fentry->k);
391 continue;
392 #endif
393 default:
394 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
395 fentry->code, fentry->jt,
396 fentry->jf, fentry->k);
397 return 0;
398 }
399 }
400
401 return 0;
402 }
403 EXPORT_SYMBOL(sk_run_filter);
404
405 /*
406 * Security :
407 * A BPF program is able to use 16 cells of memory to store intermediate
408 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
409 * As we dont want to clear mem[] array for each packet going through
410 * sk_run_filter(), we check that filter loaded by user never try to read
411 * a cell if not previously written, and we check all branches to be sure
412 * a malicious user doesn't try to abuse us.
413 */
414 static int check_load_and_stores(struct sock_filter *filter, int flen)
415 {
416 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
417 int pc, ret = 0;
418
419 BUILD_BUG_ON(BPF_MEMWORDS > 16);
420 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
421 if (!masks)
422 return -ENOMEM;
423 memset(masks, 0xff, flen * sizeof(*masks));
424
425 for (pc = 0; pc < flen; pc++) {
426 memvalid &= masks[pc];
427
428 switch (filter[pc].code) {
429 case BPF_S_ST:
430 case BPF_S_STX:
431 memvalid |= (1 << filter[pc].k);
432 break;
433 case BPF_S_LD_MEM:
434 case BPF_S_LDX_MEM:
435 if (!(memvalid & (1 << filter[pc].k))) {
436 ret = -EINVAL;
437 goto error;
438 }
439 break;
440 case BPF_S_JMP_JA:
441 /* a jump must set masks on target */
442 masks[pc + 1 + filter[pc].k] &= memvalid;
443 memvalid = ~0;
444 break;
445 case BPF_S_JMP_JEQ_K:
446 case BPF_S_JMP_JEQ_X:
447 case BPF_S_JMP_JGE_K:
448 case BPF_S_JMP_JGE_X:
449 case BPF_S_JMP_JGT_K:
450 case BPF_S_JMP_JGT_X:
451 case BPF_S_JMP_JSET_X:
452 case BPF_S_JMP_JSET_K:
453 /* a jump must set masks on targets */
454 masks[pc + 1 + filter[pc].jt] &= memvalid;
455 masks[pc + 1 + filter[pc].jf] &= memvalid;
456 memvalid = ~0;
457 break;
458 }
459 }
460 error:
461 kfree(masks);
462 return ret;
463 }
464
465 /**
466 * sk_chk_filter - verify socket filter code
467 * @filter: filter to verify
468 * @flen: length of filter
469 *
470 * Check the user's filter code. If we let some ugly
471 * filter code slip through kaboom! The filter must contain
472 * no references or jumps that are out of range, no illegal
473 * instructions, and must end with a RET instruction.
474 *
475 * All jumps are forward as they are not signed.
476 *
477 * Returns 0 if the rule set is legal or -EINVAL if not.
478 */
479 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
480 {
481 /*
482 * Valid instructions are initialized to non-0.
483 * Invalid instructions are initialized to 0.
484 */
485 static const u8 codes[] = {
486 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
487 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
488 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
489 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
490 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
491 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
492 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
493 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
494 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
495 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
496 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
497 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
498 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
499 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
500 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
501 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
502 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
503 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
504 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
505 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
506 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
507 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
508 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
509 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
510 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
511 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
512 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
513 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
514 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
515 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
516 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
517 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
518 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
519 [BPF_RET|BPF_K] = BPF_S_RET_K,
520 [BPF_RET|BPF_A] = BPF_S_RET_A,
521 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
522 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
523 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
524 [BPF_ST] = BPF_S_ST,
525 [BPF_STX] = BPF_S_STX,
526 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
527 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
528 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
529 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
530 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
531 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
532 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
533 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
534 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
535 };
536 int pc;
537 bool anc_found;
538
539 if (flen == 0 || flen > BPF_MAXINSNS)
540 return -EINVAL;
541
542 /* check the filter code now */
543 for (pc = 0; pc < flen; pc++) {
544 struct sock_filter *ftest = &filter[pc];
545 u16 code = ftest->code;
546
547 if (code >= ARRAY_SIZE(codes))
548 return -EINVAL;
549 code = codes[code];
550 if (!code)
551 return -EINVAL;
552 /* Some instructions need special checks */
553 switch (code) {
554 case BPF_S_ALU_DIV_K:
555 case BPF_S_ALU_MOD_K:
556 /* check for division by zero */
557 if (ftest->k == 0)
558 return -EINVAL;
559 break;
560 case BPF_S_LD_MEM:
561 case BPF_S_LDX_MEM:
562 case BPF_S_ST:
563 case BPF_S_STX:
564 /* check for invalid memory addresses */
565 if (ftest->k >= BPF_MEMWORDS)
566 return -EINVAL;
567 break;
568 case BPF_S_JMP_JA:
569 /*
570 * Note, the large ftest->k might cause loops.
571 * Compare this with conditional jumps below,
572 * where offsets are limited. --ANK (981016)
573 */
574 if (ftest->k >= (unsigned int)(flen-pc-1))
575 return -EINVAL;
576 break;
577 case BPF_S_JMP_JEQ_K:
578 case BPF_S_JMP_JEQ_X:
579 case BPF_S_JMP_JGE_K:
580 case BPF_S_JMP_JGE_X:
581 case BPF_S_JMP_JGT_K:
582 case BPF_S_JMP_JGT_X:
583 case BPF_S_JMP_JSET_X:
584 case BPF_S_JMP_JSET_K:
585 /* for conditionals both must be safe */
586 if (pc + ftest->jt + 1 >= flen ||
587 pc + ftest->jf + 1 >= flen)
588 return -EINVAL;
589 break;
590 case BPF_S_LD_W_ABS:
591 case BPF_S_LD_H_ABS:
592 case BPF_S_LD_B_ABS:
593 anc_found = false;
594 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
595 code = BPF_S_ANC_##CODE; \
596 anc_found = true; \
597 break
598 switch (ftest->k) {
599 ANCILLARY(PROTOCOL);
600 ANCILLARY(PKTTYPE);
601 ANCILLARY(IFINDEX);
602 ANCILLARY(NLATTR);
603 ANCILLARY(NLATTR_NEST);
604 ANCILLARY(MARK);
605 ANCILLARY(QUEUE);
606 ANCILLARY(HATYPE);
607 ANCILLARY(RXHASH);
608 ANCILLARY(CPU);
609 ANCILLARY(ALU_XOR_X);
610 ANCILLARY(VLAN_TAG);
611 ANCILLARY(VLAN_TAG_PRESENT);
612 ANCILLARY(PAY_OFFSET);
613 }
614
615 /* ancillary operation unknown or unsupported */
616 if (anc_found == false && ftest->k >= SKF_AD_OFF)
617 return -EINVAL;
618 }
619 ftest->code = code;
620 }
621
622 /* last instruction must be a RET code */
623 switch (filter[flen - 1].code) {
624 case BPF_S_RET_K:
625 case BPF_S_RET_A:
626 return check_load_and_stores(filter, flen);
627 }
628 return -EINVAL;
629 }
630 EXPORT_SYMBOL(sk_chk_filter);
631
632 /**
633 * sk_filter_release_rcu - Release a socket filter by rcu_head
634 * @rcu: rcu_head that contains the sk_filter to free
635 */
636 void sk_filter_release_rcu(struct rcu_head *rcu)
637 {
638 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
639
640 bpf_jit_free(fp);
641 }
642 EXPORT_SYMBOL(sk_filter_release_rcu);
643
644 static int __sk_prepare_filter(struct sk_filter *fp)
645 {
646 int err;
647
648 fp->bpf_func = sk_run_filter;
649
650 err = sk_chk_filter(fp->insns, fp->len);
651 if (err)
652 return err;
653
654 bpf_jit_compile(fp);
655 return 0;
656 }
657
658 /**
659 * sk_unattached_filter_create - create an unattached filter
660 * @fprog: the filter program
661 * @pfp: the unattached filter that is created
662 *
663 * Create a filter independent of any socket. We first run some
664 * sanity checks on it to make sure it does not explode on us later.
665 * If an error occurs or there is insufficient memory for the filter
666 * a negative errno code is returned. On success the return is zero.
667 */
668 int sk_unattached_filter_create(struct sk_filter **pfp,
669 struct sock_fprog *fprog)
670 {
671 struct sk_filter *fp;
672 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
673 int err;
674
675 /* Make sure new filter is there and in the right amounts. */
676 if (fprog->filter == NULL)
677 return -EINVAL;
678
679 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
680 if (!fp)
681 return -ENOMEM;
682 memcpy(fp->insns, fprog->filter, fsize);
683
684 atomic_set(&fp->refcnt, 1);
685 fp->len = fprog->len;
686
687 err = __sk_prepare_filter(fp);
688 if (err)
689 goto free_mem;
690
691 *pfp = fp;
692 return 0;
693 free_mem:
694 kfree(fp);
695 return err;
696 }
697 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
698
699 void sk_unattached_filter_destroy(struct sk_filter *fp)
700 {
701 sk_filter_release(fp);
702 }
703 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
704
705 /**
706 * sk_attach_filter - attach a socket filter
707 * @fprog: the filter program
708 * @sk: the socket to use
709 *
710 * Attach the user's filter code. We first run some sanity checks on
711 * it to make sure it does not explode on us later. If an error
712 * occurs or there is insufficient memory for the filter a negative
713 * errno code is returned. On success the return is zero.
714 */
715 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
716 {
717 struct sk_filter *fp, *old_fp;
718 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
719 unsigned int sk_fsize = sk_filter_size(fprog->len);
720 int err;
721
722 if (sock_flag(sk, SOCK_FILTER_LOCKED))
723 return -EPERM;
724
725 /* Make sure new filter is there and in the right amounts. */
726 if (fprog->filter == NULL)
727 return -EINVAL;
728
729 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
730 if (!fp)
731 return -ENOMEM;
732 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
733 sock_kfree_s(sk, fp, sk_fsize);
734 return -EFAULT;
735 }
736
737 atomic_set(&fp->refcnt, 1);
738 fp->len = fprog->len;
739
740 err = __sk_prepare_filter(fp);
741 if (err) {
742 sk_filter_uncharge(sk, fp);
743 return err;
744 }
745
746 old_fp = rcu_dereference_protected(sk->sk_filter,
747 sock_owned_by_user(sk));
748 rcu_assign_pointer(sk->sk_filter, fp);
749
750 if (old_fp)
751 sk_filter_uncharge(sk, old_fp);
752 return 0;
753 }
754 EXPORT_SYMBOL_GPL(sk_attach_filter);
755
756 int sk_detach_filter(struct sock *sk)
757 {
758 int ret = -ENOENT;
759 struct sk_filter *filter;
760
761 if (sock_flag(sk, SOCK_FILTER_LOCKED))
762 return -EPERM;
763
764 filter = rcu_dereference_protected(sk->sk_filter,
765 sock_owned_by_user(sk));
766 if (filter) {
767 RCU_INIT_POINTER(sk->sk_filter, NULL);
768 sk_filter_uncharge(sk, filter);
769 ret = 0;
770 }
771 return ret;
772 }
773 EXPORT_SYMBOL_GPL(sk_detach_filter);
774
775 void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
776 {
777 static const u16 decodes[] = {
778 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
779 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
780 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
781 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
782 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
783 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
784 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
785 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
786 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
787 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
788 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
789 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
790 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
791 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
792 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
793 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
794 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
795 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
796 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
797 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
798 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
799 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
800 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
801 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
802 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
803 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
804 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
805 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
806 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
807 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
808 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
809 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
810 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
811 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
812 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
813 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
814 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
815 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
816 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
817 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
818 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
819 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
820 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
821 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
822 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
823 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
824 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
825 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
826 [BPF_S_RET_K] = BPF_RET|BPF_K,
827 [BPF_S_RET_A] = BPF_RET|BPF_A,
828 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
829 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
830 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
831 [BPF_S_ST] = BPF_ST,
832 [BPF_S_STX] = BPF_STX,
833 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
834 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
835 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
836 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
837 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
838 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
839 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
840 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
841 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
842 };
843 u16 code;
844
845 code = filt->code;
846
847 to->code = decodes[code];
848 to->jt = filt->jt;
849 to->jf = filt->jf;
850 to->k = filt->k;
851 }
852
853 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
854 {
855 struct sk_filter *filter;
856 int i, ret;
857
858 lock_sock(sk);
859 filter = rcu_dereference_protected(sk->sk_filter,
860 sock_owned_by_user(sk));
861 ret = 0;
862 if (!filter)
863 goto out;
864 ret = filter->len;
865 if (!len)
866 goto out;
867 ret = -EINVAL;
868 if (len < filter->len)
869 goto out;
870
871 ret = -EFAULT;
872 for (i = 0; i < filter->len; i++) {
873 struct sock_filter fb;
874
875 sk_decode_filter(&filter->insns[i], &fb);
876 if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
877 goto out;
878 }
879
880 ret = filter->len;
881 out:
882 release_sock(sk);
883 return ret;
884 }