]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bridge/netfilter/ebtables.c
Merge branch 'master' into for-linus
[mirror_ubuntu-zesty-kernel.git] / net / bridge / netfilter / ebtables.c
1 /*
2 * ebtables
3 *
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
6 *
7 * ebtables.c,v 2.0, July, 2002
8 *
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18
19 #include <linux/kmod.h>
20 #include <linux/module.h>
21 #include <linux/vmalloc.h>
22 #include <linux/netfilter/x_tables.h>
23 #include <linux/netfilter_bridge/ebtables.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <net/sock.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
32
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
36
37 /*
38 * Each cpu has its own set of counters, so there is no need for write_lock in
39 * the softirq
40 * For reading or updating the counters, the user context needs to
41 * get a write_lock
42 */
43
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
49
50
51
52 static DEFINE_MUTEX(ebt_mutex);
53
54 #ifdef CONFIG_COMPAT
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
56 {
57 int v = *(compat_int_t *)src;
58
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
62 }
63
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65 {
66 compat_int_t cv = *(int *)src;
67
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71 }
72 #endif
73
74
75 static struct xt_target ebt_standard_target = {
76 .name = "standard",
77 .revision = 0,
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
80 #ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84 #endif
85 };
86
87 static inline int
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_target_param *par)
90 {
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
95 return 0;
96 }
97
98 static inline int ebt_do_match (struct ebt_entry_match *m,
99 const struct sk_buff *skb, struct xt_match_param *par)
100 {
101 par->match = m->u.match;
102 par->matchinfo = m->data;
103 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
104 }
105
106 static inline int
107 ebt_dev_check(const char *entry, const struct net_device *device)
108 {
109 int i = 0;
110 const char *devname;
111
112 if (*entry == '\0')
113 return 0;
114 if (!device)
115 return 1;
116 devname = device->name;
117 /* 1 is the wildcard token */
118 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
119 i++;
120 return (devname[i] != entry[i] && entry[i] != 1);
121 }
122
123 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
124 /* process standard matches */
125 static inline int
126 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
127 const struct net_device *in, const struct net_device *out)
128 {
129 int verdict, i;
130
131 if (e->bitmask & EBT_802_3) {
132 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
133 return 1;
134 } else if (!(e->bitmask & EBT_NOPROTO) &&
135 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
136 return 1;
137
138 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
139 return 1;
140 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
141 return 1;
142 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
143 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
144 return 1;
145 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
146 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
147 return 1;
148
149 if (e->bitmask & EBT_SOURCEMAC) {
150 verdict = 0;
151 for (i = 0; i < 6; i++)
152 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
153 e->sourcemsk[i];
154 if (FWINV2(verdict != 0, EBT_ISOURCE) )
155 return 1;
156 }
157 if (e->bitmask & EBT_DESTMAC) {
158 verdict = 0;
159 for (i = 0; i < 6; i++)
160 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
161 e->destmsk[i];
162 if (FWINV2(verdict != 0, EBT_IDEST) )
163 return 1;
164 }
165 return 0;
166 }
167
168 static inline __pure
169 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
170 {
171 return (void *)entry + entry->next_offset;
172 }
173
174 /* Do some firewalling */
175 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
176 const struct net_device *in, const struct net_device *out,
177 struct ebt_table *table)
178 {
179 int i, nentries;
180 struct ebt_entry *point;
181 struct ebt_counter *counter_base, *cb_base;
182 const struct ebt_entry_target *t;
183 int verdict, sp = 0;
184 struct ebt_chainstack *cs;
185 struct ebt_entries *chaininfo;
186 const char *base;
187 const struct ebt_table_info *private;
188 bool hotdrop = false;
189 struct xt_match_param mtpar;
190 struct xt_target_param tgpar;
191
192 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
193 mtpar.in = tgpar.in = in;
194 mtpar.out = tgpar.out = out;
195 mtpar.hotdrop = &hotdrop;
196 mtpar.hooknum = tgpar.hooknum = hook;
197
198 read_lock_bh(&table->lock);
199 private = table->private;
200 cb_base = COUNTER_BASE(private->counters, private->nentries,
201 smp_processor_id());
202 if (private->chainstack)
203 cs = private->chainstack[smp_processor_id()];
204 else
205 cs = NULL;
206 chaininfo = private->hook_entry[hook];
207 nentries = private->hook_entry[hook]->nentries;
208 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
209 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
210 /* base for chain jumps */
211 base = private->entries;
212 i = 0;
213 while (i < nentries) {
214 if (ebt_basic_match(point, eth_hdr(skb), in, out))
215 goto letscontinue;
216
217 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &mtpar) != 0)
218 goto letscontinue;
219 if (hotdrop) {
220 read_unlock_bh(&table->lock);
221 return NF_DROP;
222 }
223
224 /* increase counter */
225 (*(counter_base + i)).pcnt++;
226 (*(counter_base + i)).bcnt += skb->len;
227
228 /* these should only watch: not modify, nor tell us
229 what to do with the packet */
230 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &tgpar);
231
232 t = (struct ebt_entry_target *)
233 (((char *)point) + point->target_offset);
234 /* standard target */
235 if (!t->u.target->target)
236 verdict = ((struct ebt_standard_target *)t)->verdict;
237 else {
238 tgpar.target = t->u.target;
239 tgpar.targinfo = t->data;
240 verdict = t->u.target->target(skb, &tgpar);
241 }
242 if (verdict == EBT_ACCEPT) {
243 read_unlock_bh(&table->lock);
244 return NF_ACCEPT;
245 }
246 if (verdict == EBT_DROP) {
247 read_unlock_bh(&table->lock);
248 return NF_DROP;
249 }
250 if (verdict == EBT_RETURN) {
251 letsreturn:
252 #ifdef CONFIG_NETFILTER_DEBUG
253 if (sp == 0) {
254 BUGPRINT("RETURN on base chain");
255 /* act like this is EBT_CONTINUE */
256 goto letscontinue;
257 }
258 #endif
259 sp--;
260 /* put all the local variables right */
261 i = cs[sp].n;
262 chaininfo = cs[sp].chaininfo;
263 nentries = chaininfo->nentries;
264 point = cs[sp].e;
265 counter_base = cb_base +
266 chaininfo->counter_offset;
267 continue;
268 }
269 if (verdict == EBT_CONTINUE)
270 goto letscontinue;
271 #ifdef CONFIG_NETFILTER_DEBUG
272 if (verdict < 0) {
273 BUGPRINT("bogus standard verdict\n");
274 read_unlock_bh(&table->lock);
275 return NF_DROP;
276 }
277 #endif
278 /* jump to a udc */
279 cs[sp].n = i + 1;
280 cs[sp].chaininfo = chaininfo;
281 cs[sp].e = ebt_next_entry(point);
282 i = 0;
283 chaininfo = (struct ebt_entries *) (base + verdict);
284 #ifdef CONFIG_NETFILTER_DEBUG
285 if (chaininfo->distinguisher) {
286 BUGPRINT("jump to non-chain\n");
287 read_unlock_bh(&table->lock);
288 return NF_DROP;
289 }
290 #endif
291 nentries = chaininfo->nentries;
292 point = (struct ebt_entry *)chaininfo->data;
293 counter_base = cb_base + chaininfo->counter_offset;
294 sp++;
295 continue;
296 letscontinue:
297 point = ebt_next_entry(point);
298 i++;
299 }
300
301 /* I actually like this :) */
302 if (chaininfo->policy == EBT_RETURN)
303 goto letsreturn;
304 if (chaininfo->policy == EBT_ACCEPT) {
305 read_unlock_bh(&table->lock);
306 return NF_ACCEPT;
307 }
308 read_unlock_bh(&table->lock);
309 return NF_DROP;
310 }
311
312 /* If it succeeds, returns element and locks mutex */
313 static inline void *
314 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
315 struct mutex *mutex)
316 {
317 struct {
318 struct list_head list;
319 char name[EBT_FUNCTION_MAXNAMELEN];
320 } *e;
321
322 *error = mutex_lock_interruptible(mutex);
323 if (*error != 0)
324 return NULL;
325
326 list_for_each_entry(e, head, list) {
327 if (strcmp(e->name, name) == 0)
328 return e;
329 }
330 *error = -ENOENT;
331 mutex_unlock(mutex);
332 return NULL;
333 }
334
335 static void *
336 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
337 int *error, struct mutex *mutex)
338 {
339 return try_then_request_module(
340 find_inlist_lock_noload(head, name, error, mutex),
341 "%s%s", prefix, name);
342 }
343
344 static inline struct ebt_table *
345 find_table_lock(struct net *net, const char *name, int *error,
346 struct mutex *mutex)
347 {
348 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
349 "ebtable_", error, mutex);
350 }
351
352 static inline int
353 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
354 unsigned int *cnt)
355 {
356 const struct ebt_entry *e = par->entryinfo;
357 struct xt_match *match;
358 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
359 int ret;
360
361 if (left < sizeof(struct ebt_entry_match) ||
362 left - sizeof(struct ebt_entry_match) < m->match_size)
363 return -EINVAL;
364
365 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
366 m->u.name, 0), "ebt_%s", m->u.name);
367 if (IS_ERR(match))
368 return PTR_ERR(match);
369 if (match == NULL)
370 return -ENOENT;
371 m->u.match = match;
372
373 par->match = match;
374 par->matchinfo = m->data;
375 ret = xt_check_match(par, m->match_size,
376 e->ethproto, e->invflags & EBT_IPROTO);
377 if (ret < 0) {
378 module_put(match->me);
379 return ret;
380 }
381
382 (*cnt)++;
383 return 0;
384 }
385
386 static inline int
387 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
388 unsigned int *cnt)
389 {
390 const struct ebt_entry *e = par->entryinfo;
391 struct xt_target *watcher;
392 size_t left = ((char *)e + e->target_offset) - (char *)w;
393 int ret;
394
395 if (left < sizeof(struct ebt_entry_watcher) ||
396 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
397 return -EINVAL;
398
399 watcher = try_then_request_module(
400 xt_find_target(NFPROTO_BRIDGE, w->u.name, 0),
401 "ebt_%s", w->u.name);
402 if (IS_ERR(watcher))
403 return PTR_ERR(watcher);
404 if (watcher == NULL)
405 return -ENOENT;
406 w->u.watcher = watcher;
407
408 par->target = watcher;
409 par->targinfo = w->data;
410 ret = xt_check_target(par, w->watcher_size,
411 e->ethproto, e->invflags & EBT_IPROTO);
412 if (ret < 0) {
413 module_put(watcher->me);
414 return ret;
415 }
416
417 (*cnt)++;
418 return 0;
419 }
420
421 static int ebt_verify_pointers(const struct ebt_replace *repl,
422 struct ebt_table_info *newinfo)
423 {
424 unsigned int limit = repl->entries_size;
425 unsigned int valid_hooks = repl->valid_hooks;
426 unsigned int offset = 0;
427 int i;
428
429 for (i = 0; i < NF_BR_NUMHOOKS; i++)
430 newinfo->hook_entry[i] = NULL;
431
432 newinfo->entries_size = repl->entries_size;
433 newinfo->nentries = repl->nentries;
434
435 while (offset < limit) {
436 size_t left = limit - offset;
437 struct ebt_entry *e = (void *)newinfo->entries + offset;
438
439 if (left < sizeof(unsigned int))
440 break;
441
442 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
443 if ((valid_hooks & (1 << i)) == 0)
444 continue;
445 if ((char __user *)repl->hook_entry[i] ==
446 repl->entries + offset)
447 break;
448 }
449
450 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
451 if (e->bitmask != 0) {
452 /* we make userspace set this right,
453 so there is no misunderstanding */
454 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
455 "in distinguisher\n");
456 return -EINVAL;
457 }
458 if (i != NF_BR_NUMHOOKS)
459 newinfo->hook_entry[i] = (struct ebt_entries *)e;
460 if (left < sizeof(struct ebt_entries))
461 break;
462 offset += sizeof(struct ebt_entries);
463 } else {
464 if (left < sizeof(struct ebt_entry))
465 break;
466 if (left < e->next_offset)
467 break;
468 if (e->next_offset < sizeof(struct ebt_entry))
469 return -EINVAL;
470 offset += e->next_offset;
471 }
472 }
473 if (offset != limit) {
474 BUGPRINT("entries_size too small\n");
475 return -EINVAL;
476 }
477
478 /* check if all valid hooks have a chain */
479 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
480 if (!newinfo->hook_entry[i] &&
481 (valid_hooks & (1 << i))) {
482 BUGPRINT("Valid hook without chain\n");
483 return -EINVAL;
484 }
485 }
486 return 0;
487 }
488
489 /*
490 * this one is very careful, as it is the first function
491 * to parse the userspace data
492 */
493 static inline int
494 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
495 const struct ebt_table_info *newinfo,
496 unsigned int *n, unsigned int *cnt,
497 unsigned int *totalcnt, unsigned int *udc_cnt)
498 {
499 int i;
500
501 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
502 if ((void *)e == (void *)newinfo->hook_entry[i])
503 break;
504 }
505 /* beginning of a new chain
506 if i == NF_BR_NUMHOOKS it must be a user defined chain */
507 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
508 /* this checks if the previous chain has as many entries
509 as it said it has */
510 if (*n != *cnt) {
511 BUGPRINT("nentries does not equal the nr of entries "
512 "in the chain\n");
513 return -EINVAL;
514 }
515 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
516 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
517 /* only RETURN from udc */
518 if (i != NF_BR_NUMHOOKS ||
519 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
520 BUGPRINT("bad policy\n");
521 return -EINVAL;
522 }
523 }
524 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
525 (*udc_cnt)++;
526 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
527 BUGPRINT("counter_offset != totalcnt");
528 return -EINVAL;
529 }
530 *n = ((struct ebt_entries *)e)->nentries;
531 *cnt = 0;
532 return 0;
533 }
534 /* a plain old entry, heh */
535 if (sizeof(struct ebt_entry) > e->watchers_offset ||
536 e->watchers_offset > e->target_offset ||
537 e->target_offset >= e->next_offset) {
538 BUGPRINT("entry offsets not in right order\n");
539 return -EINVAL;
540 }
541 /* this is not checked anywhere else */
542 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
543 BUGPRINT("target size too small\n");
544 return -EINVAL;
545 }
546 (*cnt)++;
547 (*totalcnt)++;
548 return 0;
549 }
550
551 struct ebt_cl_stack
552 {
553 struct ebt_chainstack cs;
554 int from;
555 unsigned int hookmask;
556 };
557
558 /*
559 * we need these positions to check that the jumps to a different part of the
560 * entries is a jump to the beginning of a new chain.
561 */
562 static inline int
563 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
564 unsigned int *n, struct ebt_cl_stack *udc)
565 {
566 int i;
567
568 /* we're only interested in chain starts */
569 if (e->bitmask)
570 return 0;
571 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
572 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
573 break;
574 }
575 /* only care about udc */
576 if (i != NF_BR_NUMHOOKS)
577 return 0;
578
579 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
580 /* these initialisations are depended on later in check_chainloops() */
581 udc[*n].cs.n = 0;
582 udc[*n].hookmask = 0;
583
584 (*n)++;
585 return 0;
586 }
587
588 static inline int
589 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
590 {
591 struct xt_mtdtor_param par;
592
593 if (i && (*i)-- == 0)
594 return 1;
595
596 par.net = net;
597 par.match = m->u.match;
598 par.matchinfo = m->data;
599 par.family = NFPROTO_BRIDGE;
600 if (par.match->destroy != NULL)
601 par.match->destroy(&par);
602 module_put(par.match->me);
603 return 0;
604 }
605
606 static inline int
607 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
608 {
609 struct xt_tgdtor_param par;
610
611 if (i && (*i)-- == 0)
612 return 1;
613
614 par.net = net;
615 par.target = w->u.watcher;
616 par.targinfo = w->data;
617 par.family = NFPROTO_BRIDGE;
618 if (par.target->destroy != NULL)
619 par.target->destroy(&par);
620 module_put(par.target->me);
621 return 0;
622 }
623
624 static inline int
625 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
626 {
627 struct xt_tgdtor_param par;
628 struct ebt_entry_target *t;
629
630 if (e->bitmask == 0)
631 return 0;
632 /* we're done */
633 if (cnt && (*cnt)-- == 0)
634 return 1;
635 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
636 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
637 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
638
639 par.net = net;
640 par.target = t->u.target;
641 par.targinfo = t->data;
642 par.family = NFPROTO_BRIDGE;
643 if (par.target->destroy != NULL)
644 par.target->destroy(&par);
645 module_put(par.target->me);
646 return 0;
647 }
648
649 static inline int
650 ebt_check_entry(struct ebt_entry *e, struct net *net,
651 const struct ebt_table_info *newinfo,
652 const char *name, unsigned int *cnt,
653 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
654 {
655 struct ebt_entry_target *t;
656 struct xt_target *target;
657 unsigned int i, j, hook = 0, hookmask = 0;
658 size_t gap;
659 int ret;
660 struct xt_mtchk_param mtpar;
661 struct xt_tgchk_param tgpar;
662
663 /* don't mess with the struct ebt_entries */
664 if (e->bitmask == 0)
665 return 0;
666
667 if (e->bitmask & ~EBT_F_MASK) {
668 BUGPRINT("Unknown flag for bitmask\n");
669 return -EINVAL;
670 }
671 if (e->invflags & ~EBT_INV_MASK) {
672 BUGPRINT("Unknown flag for inv bitmask\n");
673 return -EINVAL;
674 }
675 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
676 BUGPRINT("NOPROTO & 802_3 not allowed\n");
677 return -EINVAL;
678 }
679 /* what hook do we belong to? */
680 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
681 if (!newinfo->hook_entry[i])
682 continue;
683 if ((char *)newinfo->hook_entry[i] < (char *)e)
684 hook = i;
685 else
686 break;
687 }
688 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
689 a base chain */
690 if (i < NF_BR_NUMHOOKS)
691 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
692 else {
693 for (i = 0; i < udc_cnt; i++)
694 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
695 break;
696 if (i == 0)
697 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
698 else
699 hookmask = cl_s[i - 1].hookmask;
700 }
701 i = 0;
702
703 mtpar.net = tgpar.net = net;
704 mtpar.table = tgpar.table = name;
705 mtpar.entryinfo = tgpar.entryinfo = e;
706 mtpar.hook_mask = tgpar.hook_mask = hookmask;
707 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
708 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
709 if (ret != 0)
710 goto cleanup_matches;
711 j = 0;
712 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
713 if (ret != 0)
714 goto cleanup_watchers;
715 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
716 gap = e->next_offset - e->target_offset;
717
718 target = try_then_request_module(
719 xt_find_target(NFPROTO_BRIDGE, t->u.name, 0),
720 "ebt_%s", t->u.name);
721 if (IS_ERR(target)) {
722 ret = PTR_ERR(target);
723 goto cleanup_watchers;
724 } else if (target == NULL) {
725 ret = -ENOENT;
726 goto cleanup_watchers;
727 }
728
729 t->u.target = target;
730 if (t->u.target == &ebt_standard_target) {
731 if (gap < sizeof(struct ebt_standard_target)) {
732 BUGPRINT("Standard target size too big\n");
733 ret = -EFAULT;
734 goto cleanup_watchers;
735 }
736 if (((struct ebt_standard_target *)t)->verdict <
737 -NUM_STANDARD_TARGETS) {
738 BUGPRINT("Invalid standard target\n");
739 ret = -EFAULT;
740 goto cleanup_watchers;
741 }
742 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
743 module_put(t->u.target->me);
744 ret = -EFAULT;
745 goto cleanup_watchers;
746 }
747
748 tgpar.target = target;
749 tgpar.targinfo = t->data;
750 ret = xt_check_target(&tgpar, t->target_size,
751 e->ethproto, e->invflags & EBT_IPROTO);
752 if (ret < 0) {
753 module_put(target->me);
754 goto cleanup_watchers;
755 }
756 (*cnt)++;
757 return 0;
758 cleanup_watchers:
759 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
760 cleanup_matches:
761 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
762 return ret;
763 }
764
765 /*
766 * checks for loops and sets the hook mask for udc
767 * the hook mask for udc tells us from which base chains the udc can be
768 * accessed. This mask is a parameter to the check() functions of the extensions
769 */
770 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
771 unsigned int udc_cnt, unsigned int hooknr, char *base)
772 {
773 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
774 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
775 const struct ebt_entry_target *t;
776
777 while (pos < nentries || chain_nr != -1) {
778 /* end of udc, go back one 'recursion' step */
779 if (pos == nentries) {
780 /* put back values of the time when this chain was called */
781 e = cl_s[chain_nr].cs.e;
782 if (cl_s[chain_nr].from != -1)
783 nentries =
784 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
785 else
786 nentries = chain->nentries;
787 pos = cl_s[chain_nr].cs.n;
788 /* make sure we won't see a loop that isn't one */
789 cl_s[chain_nr].cs.n = 0;
790 chain_nr = cl_s[chain_nr].from;
791 if (pos == nentries)
792 continue;
793 }
794 t = (struct ebt_entry_target *)
795 (((char *)e) + e->target_offset);
796 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
797 goto letscontinue;
798 if (e->target_offset + sizeof(struct ebt_standard_target) >
799 e->next_offset) {
800 BUGPRINT("Standard target size too big\n");
801 return -1;
802 }
803 verdict = ((struct ebt_standard_target *)t)->verdict;
804 if (verdict >= 0) { /* jump to another chain */
805 struct ebt_entries *hlp2 =
806 (struct ebt_entries *)(base + verdict);
807 for (i = 0; i < udc_cnt; i++)
808 if (hlp2 == cl_s[i].cs.chaininfo)
809 break;
810 /* bad destination or loop */
811 if (i == udc_cnt) {
812 BUGPRINT("bad destination\n");
813 return -1;
814 }
815 if (cl_s[i].cs.n) {
816 BUGPRINT("loop\n");
817 return -1;
818 }
819 if (cl_s[i].hookmask & (1 << hooknr))
820 goto letscontinue;
821 /* this can't be 0, so the loop test is correct */
822 cl_s[i].cs.n = pos + 1;
823 pos = 0;
824 cl_s[i].cs.e = ebt_next_entry(e);
825 e = (struct ebt_entry *)(hlp2->data);
826 nentries = hlp2->nentries;
827 cl_s[i].from = chain_nr;
828 chain_nr = i;
829 /* this udc is accessible from the base chain for hooknr */
830 cl_s[i].hookmask |= (1 << hooknr);
831 continue;
832 }
833 letscontinue:
834 e = ebt_next_entry(e);
835 pos++;
836 }
837 return 0;
838 }
839
840 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
841 static int translate_table(struct net *net, const char *name,
842 struct ebt_table_info *newinfo)
843 {
844 unsigned int i, j, k, udc_cnt;
845 int ret;
846 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
847
848 i = 0;
849 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
850 i++;
851 if (i == NF_BR_NUMHOOKS) {
852 BUGPRINT("No valid hooks specified\n");
853 return -EINVAL;
854 }
855 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
856 BUGPRINT("Chains don't start at beginning\n");
857 return -EINVAL;
858 }
859 /* make sure chains are ordered after each other in same order
860 as their corresponding hooks */
861 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
862 if (!newinfo->hook_entry[j])
863 continue;
864 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
865 BUGPRINT("Hook order must be followed\n");
866 return -EINVAL;
867 }
868 i = j;
869 }
870
871 /* do some early checkings and initialize some things */
872 i = 0; /* holds the expected nr. of entries for the chain */
873 j = 0; /* holds the up to now counted entries for the chain */
874 k = 0; /* holds the total nr. of entries, should equal
875 newinfo->nentries afterwards */
876 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
877 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
878 ebt_check_entry_size_and_hooks, newinfo,
879 &i, &j, &k, &udc_cnt);
880
881 if (ret != 0)
882 return ret;
883
884 if (i != j) {
885 BUGPRINT("nentries does not equal the nr of entries in the "
886 "(last) chain\n");
887 return -EINVAL;
888 }
889 if (k != newinfo->nentries) {
890 BUGPRINT("Total nentries is wrong\n");
891 return -EINVAL;
892 }
893
894 /* get the location of the udc, put them in an array
895 while we're at it, allocate the chainstack */
896 if (udc_cnt) {
897 /* this will get free'd in do_replace()/ebt_register_table()
898 if an error occurs */
899 newinfo->chainstack =
900 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
901 if (!newinfo->chainstack)
902 return -ENOMEM;
903 for_each_possible_cpu(i) {
904 newinfo->chainstack[i] =
905 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
906 if (!newinfo->chainstack[i]) {
907 while (i)
908 vfree(newinfo->chainstack[--i]);
909 vfree(newinfo->chainstack);
910 newinfo->chainstack = NULL;
911 return -ENOMEM;
912 }
913 }
914
915 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
916 if (!cl_s)
917 return -ENOMEM;
918 i = 0; /* the i'th udc */
919 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
920 ebt_get_udc_positions, newinfo, &i, cl_s);
921 /* sanity check */
922 if (i != udc_cnt) {
923 BUGPRINT("i != udc_cnt\n");
924 vfree(cl_s);
925 return -EFAULT;
926 }
927 }
928
929 /* Check for loops */
930 for (i = 0; i < NF_BR_NUMHOOKS; i++)
931 if (newinfo->hook_entry[i])
932 if (check_chainloops(newinfo->hook_entry[i],
933 cl_s, udc_cnt, i, newinfo->entries)) {
934 vfree(cl_s);
935 return -EINVAL;
936 }
937
938 /* we now know the following (along with E=mc²):
939 - the nr of entries in each chain is right
940 - the size of the allocated space is right
941 - all valid hooks have a corresponding chain
942 - there are no loops
943 - wrong data can still be on the level of a single entry
944 - could be there are jumps to places that are not the
945 beginning of a chain. This can only occur in chains that
946 are not accessible from any base chains, so we don't care. */
947
948 /* used to know what we need to clean up if something goes wrong */
949 i = 0;
950 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
951 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
952 if (ret != 0) {
953 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
954 ebt_cleanup_entry, net, &i);
955 }
956 vfree(cl_s);
957 return ret;
958 }
959
960 /* called under write_lock */
961 static void get_counters(const struct ebt_counter *oldcounters,
962 struct ebt_counter *counters, unsigned int nentries)
963 {
964 int i, cpu;
965 struct ebt_counter *counter_base;
966
967 /* counters of cpu 0 */
968 memcpy(counters, oldcounters,
969 sizeof(struct ebt_counter) * nentries);
970
971 /* add other counters to those of cpu 0 */
972 for_each_possible_cpu(cpu) {
973 if (cpu == 0)
974 continue;
975 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
976 for (i = 0; i < nentries; i++) {
977 counters[i].pcnt += counter_base[i].pcnt;
978 counters[i].bcnt += counter_base[i].bcnt;
979 }
980 }
981 }
982
983 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
984 struct ebt_table_info *newinfo)
985 {
986 int ret, i;
987 struct ebt_counter *counterstmp = NULL;
988 /* used to be able to unlock earlier */
989 struct ebt_table_info *table;
990 struct ebt_table *t;
991
992 /* the user wants counters back
993 the check on the size is done later, when we have the lock */
994 if (repl->num_counters) {
995 unsigned long size = repl->num_counters * sizeof(*counterstmp);
996 counterstmp = vmalloc(size);
997 if (!counterstmp)
998 return -ENOMEM;
999 }
1000
1001 newinfo->chainstack = NULL;
1002 ret = ebt_verify_pointers(repl, newinfo);
1003 if (ret != 0)
1004 goto free_counterstmp;
1005
1006 ret = translate_table(net, repl->name, newinfo);
1007
1008 if (ret != 0)
1009 goto free_counterstmp;
1010
1011 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1012 if (!t) {
1013 ret = -ENOENT;
1014 goto free_iterate;
1015 }
1016
1017 /* the table doesn't like it */
1018 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1019 goto free_unlock;
1020
1021 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1022 BUGPRINT("Wrong nr. of counters requested\n");
1023 ret = -EINVAL;
1024 goto free_unlock;
1025 }
1026
1027 /* we have the mutex lock, so no danger in reading this pointer */
1028 table = t->private;
1029 /* make sure the table can only be rmmod'ed if it contains no rules */
1030 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1031 ret = -ENOENT;
1032 goto free_unlock;
1033 } else if (table->nentries && !newinfo->nentries)
1034 module_put(t->me);
1035 /* we need an atomic snapshot of the counters */
1036 write_lock_bh(&t->lock);
1037 if (repl->num_counters)
1038 get_counters(t->private->counters, counterstmp,
1039 t->private->nentries);
1040
1041 t->private = newinfo;
1042 write_unlock_bh(&t->lock);
1043 mutex_unlock(&ebt_mutex);
1044 /* so, a user can change the chains while having messed up her counter
1045 allocation. Only reason why this is done is because this way the lock
1046 is held only once, while this doesn't bring the kernel into a
1047 dangerous state. */
1048 if (repl->num_counters &&
1049 copy_to_user(repl->counters, counterstmp,
1050 repl->num_counters * sizeof(struct ebt_counter))) {
1051 ret = -EFAULT;
1052 }
1053 else
1054 ret = 0;
1055
1056 /* decrease module count and free resources */
1057 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1058 ebt_cleanup_entry, net, NULL);
1059
1060 vfree(table->entries);
1061 if (table->chainstack) {
1062 for_each_possible_cpu(i)
1063 vfree(table->chainstack[i]);
1064 vfree(table->chainstack);
1065 }
1066 vfree(table);
1067
1068 vfree(counterstmp);
1069 return ret;
1070
1071 free_unlock:
1072 mutex_unlock(&ebt_mutex);
1073 free_iterate:
1074 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1075 ebt_cleanup_entry, net, NULL);
1076 free_counterstmp:
1077 vfree(counterstmp);
1078 /* can be initialized in translate_table() */
1079 if (newinfo->chainstack) {
1080 for_each_possible_cpu(i)
1081 vfree(newinfo->chainstack[i]);
1082 vfree(newinfo->chainstack);
1083 }
1084 return ret;
1085 }
1086
1087 /* replace the table */
1088 static int do_replace(struct net *net, const void __user *user,
1089 unsigned int len)
1090 {
1091 int ret, countersize;
1092 struct ebt_table_info *newinfo;
1093 struct ebt_replace tmp;
1094
1095 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1096 return -EFAULT;
1097
1098 if (len != sizeof(tmp) + tmp.entries_size) {
1099 BUGPRINT("Wrong len argument\n");
1100 return -EINVAL;
1101 }
1102
1103 if (tmp.entries_size == 0) {
1104 BUGPRINT("Entries_size never zero\n");
1105 return -EINVAL;
1106 }
1107 /* overflow check */
1108 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1109 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1110 return -ENOMEM;
1111 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1112 return -ENOMEM;
1113
1114 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1115 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1116 if (!newinfo)
1117 return -ENOMEM;
1118
1119 if (countersize)
1120 memset(newinfo->counters, 0, countersize);
1121
1122 newinfo->entries = vmalloc(tmp.entries_size);
1123 if (!newinfo->entries) {
1124 ret = -ENOMEM;
1125 goto free_newinfo;
1126 }
1127 if (copy_from_user(
1128 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1129 BUGPRINT("Couldn't copy entries from userspace\n");
1130 ret = -EFAULT;
1131 goto free_entries;
1132 }
1133
1134 ret = do_replace_finish(net, &tmp, newinfo);
1135 if (ret == 0)
1136 return ret;
1137 free_entries:
1138 vfree(newinfo->entries);
1139 free_newinfo:
1140 vfree(newinfo);
1141 return ret;
1142 }
1143
1144 struct ebt_table *
1145 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1146 {
1147 struct ebt_table_info *newinfo;
1148 struct ebt_table *t, *table;
1149 struct ebt_replace_kernel *repl;
1150 int ret, i, countersize;
1151 void *p;
1152
1153 if (input_table == NULL || (repl = input_table->table) == NULL ||
1154 repl->entries == 0 || repl->entries_size == 0 ||
1155 repl->counters != NULL || input_table->private != NULL) {
1156 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1157 return ERR_PTR(-EINVAL);
1158 }
1159
1160 /* Don't add one table to multiple lists. */
1161 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1162 if (!table) {
1163 ret = -ENOMEM;
1164 goto out;
1165 }
1166
1167 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1168 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1169 ret = -ENOMEM;
1170 if (!newinfo)
1171 goto free_table;
1172
1173 p = vmalloc(repl->entries_size);
1174 if (!p)
1175 goto free_newinfo;
1176
1177 memcpy(p, repl->entries, repl->entries_size);
1178 newinfo->entries = p;
1179
1180 newinfo->entries_size = repl->entries_size;
1181 newinfo->nentries = repl->nentries;
1182
1183 if (countersize)
1184 memset(newinfo->counters, 0, countersize);
1185
1186 /* fill in newinfo and parse the entries */
1187 newinfo->chainstack = NULL;
1188 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1189 if ((repl->valid_hooks & (1 << i)) == 0)
1190 newinfo->hook_entry[i] = NULL;
1191 else
1192 newinfo->hook_entry[i] = p +
1193 ((char *)repl->hook_entry[i] - repl->entries);
1194 }
1195 ret = translate_table(net, repl->name, newinfo);
1196 if (ret != 0) {
1197 BUGPRINT("Translate_table failed\n");
1198 goto free_chainstack;
1199 }
1200
1201 if (table->check && table->check(newinfo, table->valid_hooks)) {
1202 BUGPRINT("The table doesn't like its own initial data, lol\n");
1203 return ERR_PTR(-EINVAL);
1204 }
1205
1206 table->private = newinfo;
1207 rwlock_init(&table->lock);
1208 ret = mutex_lock_interruptible(&ebt_mutex);
1209 if (ret != 0)
1210 goto free_chainstack;
1211
1212 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1213 if (strcmp(t->name, table->name) == 0) {
1214 ret = -EEXIST;
1215 BUGPRINT("Table name already exists\n");
1216 goto free_unlock;
1217 }
1218 }
1219
1220 /* Hold a reference count if the chains aren't empty */
1221 if (newinfo->nentries && !try_module_get(table->me)) {
1222 ret = -ENOENT;
1223 goto free_unlock;
1224 }
1225 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1226 mutex_unlock(&ebt_mutex);
1227 return table;
1228 free_unlock:
1229 mutex_unlock(&ebt_mutex);
1230 free_chainstack:
1231 if (newinfo->chainstack) {
1232 for_each_possible_cpu(i)
1233 vfree(newinfo->chainstack[i]);
1234 vfree(newinfo->chainstack);
1235 }
1236 vfree(newinfo->entries);
1237 free_newinfo:
1238 vfree(newinfo);
1239 free_table:
1240 kfree(table);
1241 out:
1242 return ERR_PTR(ret);
1243 }
1244
1245 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1246 {
1247 int i;
1248
1249 if (!table) {
1250 BUGPRINT("Request to unregister NULL table!!!\n");
1251 return;
1252 }
1253 mutex_lock(&ebt_mutex);
1254 list_del(&table->list);
1255 mutex_unlock(&ebt_mutex);
1256 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1257 ebt_cleanup_entry, net, NULL);
1258 if (table->private->nentries)
1259 module_put(table->me);
1260 vfree(table->private->entries);
1261 if (table->private->chainstack) {
1262 for_each_possible_cpu(i)
1263 vfree(table->private->chainstack[i]);
1264 vfree(table->private->chainstack);
1265 }
1266 vfree(table->private);
1267 kfree(table);
1268 }
1269
1270 /* userspace just supplied us with counters */
1271 static int do_update_counters(struct net *net, const char *name,
1272 struct ebt_counter __user *counters,
1273 unsigned int num_counters,
1274 const void __user *user, unsigned int len)
1275 {
1276 int i, ret;
1277 struct ebt_counter *tmp;
1278 struct ebt_table *t;
1279
1280 if (num_counters == 0)
1281 return -EINVAL;
1282
1283 tmp = vmalloc(num_counters * sizeof(*tmp));
1284 if (!tmp)
1285 return -ENOMEM;
1286
1287 t = find_table_lock(net, name, &ret, &ebt_mutex);
1288 if (!t)
1289 goto free_tmp;
1290
1291 if (num_counters != t->private->nentries) {
1292 BUGPRINT("Wrong nr of counters\n");
1293 ret = -EINVAL;
1294 goto unlock_mutex;
1295 }
1296
1297 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1298 ret = -EFAULT;
1299 goto unlock_mutex;
1300 }
1301
1302 /* we want an atomic add of the counters */
1303 write_lock_bh(&t->lock);
1304
1305 /* we add to the counters of the first cpu */
1306 for (i = 0; i < num_counters; i++) {
1307 t->private->counters[i].pcnt += tmp[i].pcnt;
1308 t->private->counters[i].bcnt += tmp[i].bcnt;
1309 }
1310
1311 write_unlock_bh(&t->lock);
1312 ret = 0;
1313 unlock_mutex:
1314 mutex_unlock(&ebt_mutex);
1315 free_tmp:
1316 vfree(tmp);
1317 return ret;
1318 }
1319
1320 static int update_counters(struct net *net, const void __user *user,
1321 unsigned int len)
1322 {
1323 struct ebt_replace hlp;
1324
1325 if (copy_from_user(&hlp, user, sizeof(hlp)))
1326 return -EFAULT;
1327
1328 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1329 return -EINVAL;
1330
1331 return do_update_counters(net, hlp.name, hlp.counters,
1332 hlp.num_counters, user, len);
1333 }
1334
1335 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1336 const char *base, char __user *ubase)
1337 {
1338 char __user *hlp = ubase + ((char *)m - base);
1339 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1340 return -EFAULT;
1341 return 0;
1342 }
1343
1344 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1345 const char *base, char __user *ubase)
1346 {
1347 char __user *hlp = ubase + ((char *)w - base);
1348 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1349 return -EFAULT;
1350 return 0;
1351 }
1352
1353 static inline int
1354 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1355 {
1356 int ret;
1357 char __user *hlp;
1358 const struct ebt_entry_target *t;
1359
1360 if (e->bitmask == 0)
1361 return 0;
1362
1363 hlp = ubase + (((char *)e + e->target_offset) - base);
1364 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1365
1366 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1367 if (ret != 0)
1368 return ret;
1369 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1370 if (ret != 0)
1371 return ret;
1372 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1373 return -EFAULT;
1374 return 0;
1375 }
1376
1377 static int copy_counters_to_user(struct ebt_table *t,
1378 const struct ebt_counter *oldcounters,
1379 void __user *user, unsigned int num_counters,
1380 unsigned int nentries)
1381 {
1382 struct ebt_counter *counterstmp;
1383 int ret = 0;
1384
1385 /* userspace might not need the counters */
1386 if (num_counters == 0)
1387 return 0;
1388
1389 if (num_counters != nentries) {
1390 BUGPRINT("Num_counters wrong\n");
1391 return -EINVAL;
1392 }
1393
1394 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1395 if (!counterstmp)
1396 return -ENOMEM;
1397
1398 write_lock_bh(&t->lock);
1399 get_counters(oldcounters, counterstmp, nentries);
1400 write_unlock_bh(&t->lock);
1401
1402 if (copy_to_user(user, counterstmp,
1403 nentries * sizeof(struct ebt_counter)))
1404 ret = -EFAULT;
1405 vfree(counterstmp);
1406 return ret;
1407 }
1408
1409 /* called with ebt_mutex locked */
1410 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1411 const int *len, int cmd)
1412 {
1413 struct ebt_replace tmp;
1414 const struct ebt_counter *oldcounters;
1415 unsigned int entries_size, nentries;
1416 int ret;
1417 char *entries;
1418
1419 if (cmd == EBT_SO_GET_ENTRIES) {
1420 entries_size = t->private->entries_size;
1421 nentries = t->private->nentries;
1422 entries = t->private->entries;
1423 oldcounters = t->private->counters;
1424 } else {
1425 entries_size = t->table->entries_size;
1426 nentries = t->table->nentries;
1427 entries = t->table->entries;
1428 oldcounters = t->table->counters;
1429 }
1430
1431 if (copy_from_user(&tmp, user, sizeof(tmp)))
1432 return -EFAULT;
1433
1434 if (*len != sizeof(struct ebt_replace) + entries_size +
1435 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1436 return -EINVAL;
1437
1438 if (tmp.nentries != nentries) {
1439 BUGPRINT("Nentries wrong\n");
1440 return -EINVAL;
1441 }
1442
1443 if (tmp.entries_size != entries_size) {
1444 BUGPRINT("Wrong size\n");
1445 return -EINVAL;
1446 }
1447
1448 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1449 tmp.num_counters, nentries);
1450 if (ret)
1451 return ret;
1452
1453 if (copy_to_user(tmp.entries, entries, entries_size)) {
1454 BUGPRINT("Couldn't copy entries to userspace\n");
1455 return -EFAULT;
1456 }
1457 /* set the match/watcher/target names right */
1458 return EBT_ENTRY_ITERATE(entries, entries_size,
1459 ebt_make_names, entries, tmp.entries);
1460 }
1461
1462 static int do_ebt_set_ctl(struct sock *sk,
1463 int cmd, void __user *user, unsigned int len)
1464 {
1465 int ret;
1466
1467 if (!capable(CAP_NET_ADMIN))
1468 return -EPERM;
1469
1470 switch(cmd) {
1471 case EBT_SO_SET_ENTRIES:
1472 ret = do_replace(sock_net(sk), user, len);
1473 break;
1474 case EBT_SO_SET_COUNTERS:
1475 ret = update_counters(sock_net(sk), user, len);
1476 break;
1477 default:
1478 ret = -EINVAL;
1479 }
1480 return ret;
1481 }
1482
1483 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1484 {
1485 int ret;
1486 struct ebt_replace tmp;
1487 struct ebt_table *t;
1488
1489 if (!capable(CAP_NET_ADMIN))
1490 return -EPERM;
1491
1492 if (copy_from_user(&tmp, user, sizeof(tmp)))
1493 return -EFAULT;
1494
1495 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1496 if (!t)
1497 return ret;
1498
1499 switch(cmd) {
1500 case EBT_SO_GET_INFO:
1501 case EBT_SO_GET_INIT_INFO:
1502 if (*len != sizeof(struct ebt_replace)){
1503 ret = -EINVAL;
1504 mutex_unlock(&ebt_mutex);
1505 break;
1506 }
1507 if (cmd == EBT_SO_GET_INFO) {
1508 tmp.nentries = t->private->nentries;
1509 tmp.entries_size = t->private->entries_size;
1510 tmp.valid_hooks = t->valid_hooks;
1511 } else {
1512 tmp.nentries = t->table->nentries;
1513 tmp.entries_size = t->table->entries_size;
1514 tmp.valid_hooks = t->table->valid_hooks;
1515 }
1516 mutex_unlock(&ebt_mutex);
1517 if (copy_to_user(user, &tmp, *len) != 0){
1518 BUGPRINT("c2u Didn't work\n");
1519 ret = -EFAULT;
1520 break;
1521 }
1522 ret = 0;
1523 break;
1524
1525 case EBT_SO_GET_ENTRIES:
1526 case EBT_SO_GET_INIT_ENTRIES:
1527 ret = copy_everything_to_user(t, user, len, cmd);
1528 mutex_unlock(&ebt_mutex);
1529 break;
1530
1531 default:
1532 mutex_unlock(&ebt_mutex);
1533 ret = -EINVAL;
1534 }
1535
1536 return ret;
1537 }
1538
1539 #ifdef CONFIG_COMPAT
1540 /* 32 bit-userspace compatibility definitions. */
1541 struct compat_ebt_replace {
1542 char name[EBT_TABLE_MAXNAMELEN];
1543 compat_uint_t valid_hooks;
1544 compat_uint_t nentries;
1545 compat_uint_t entries_size;
1546 /* start of the chains */
1547 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1548 /* nr of counters userspace expects back */
1549 compat_uint_t num_counters;
1550 /* where the kernel will put the old counters. */
1551 compat_uptr_t counters;
1552 compat_uptr_t entries;
1553 };
1554
1555 /* struct ebt_entry_match, _target and _watcher have same layout */
1556 struct compat_ebt_entry_mwt {
1557 union {
1558 char name[EBT_FUNCTION_MAXNAMELEN];
1559 compat_uptr_t ptr;
1560 } u;
1561 compat_uint_t match_size;
1562 compat_uint_t data[0];
1563 };
1564
1565 /* account for possible padding between match_size and ->data */
1566 static int ebt_compat_entry_padsize(void)
1567 {
1568 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1569 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1570 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1571 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1572 }
1573
1574 static int ebt_compat_match_offset(const struct xt_match *match,
1575 unsigned int userlen)
1576 {
1577 /*
1578 * ebt_among needs special handling. The kernel .matchsize is
1579 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1580 * value is expected.
1581 * Example: userspace sends 4500, ebt_among.c wants 4504.
1582 */
1583 if (unlikely(match->matchsize == -1))
1584 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1585 return xt_compat_match_offset(match);
1586 }
1587
1588 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1589 unsigned int *size)
1590 {
1591 const struct xt_match *match = m->u.match;
1592 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1593 int off = ebt_compat_match_offset(match, m->match_size);
1594 compat_uint_t msize = m->match_size - off;
1595
1596 BUG_ON(off >= m->match_size);
1597
1598 if (copy_to_user(cm->u.name, match->name,
1599 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1600 return -EFAULT;
1601
1602 if (match->compat_to_user) {
1603 if (match->compat_to_user(cm->data, m->data))
1604 return -EFAULT;
1605 } else if (copy_to_user(cm->data, m->data, msize))
1606 return -EFAULT;
1607
1608 *size -= ebt_compat_entry_padsize() + off;
1609 *dstptr = cm->data;
1610 *dstptr += msize;
1611 return 0;
1612 }
1613
1614 static int compat_target_to_user(struct ebt_entry_target *t,
1615 void __user **dstptr,
1616 unsigned int *size)
1617 {
1618 const struct xt_target *target = t->u.target;
1619 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1620 int off = xt_compat_target_offset(target);
1621 compat_uint_t tsize = t->target_size - off;
1622
1623 BUG_ON(off >= t->target_size);
1624
1625 if (copy_to_user(cm->u.name, target->name,
1626 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1627 return -EFAULT;
1628
1629 if (target->compat_to_user) {
1630 if (target->compat_to_user(cm->data, t->data))
1631 return -EFAULT;
1632 } else if (copy_to_user(cm->data, t->data, tsize))
1633 return -EFAULT;
1634
1635 *size -= ebt_compat_entry_padsize() + off;
1636 *dstptr = cm->data;
1637 *dstptr += tsize;
1638 return 0;
1639 }
1640
1641 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1642 void __user **dstptr,
1643 unsigned int *size)
1644 {
1645 return compat_target_to_user((struct ebt_entry_target *)w,
1646 dstptr, size);
1647 }
1648
1649 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1650 unsigned int *size)
1651 {
1652 struct ebt_entry_target *t;
1653 struct ebt_entry __user *ce;
1654 u32 watchers_offset, target_offset, next_offset;
1655 compat_uint_t origsize;
1656 int ret;
1657
1658 if (e->bitmask == 0) {
1659 if (*size < sizeof(struct ebt_entries))
1660 return -EINVAL;
1661 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1662 return -EFAULT;
1663
1664 *dstptr += sizeof(struct ebt_entries);
1665 *size -= sizeof(struct ebt_entries);
1666 return 0;
1667 }
1668
1669 if (*size < sizeof(*ce))
1670 return -EINVAL;
1671
1672 ce = (struct ebt_entry __user *)*dstptr;
1673 if (copy_to_user(ce, e, sizeof(*ce)))
1674 return -EFAULT;
1675
1676 origsize = *size;
1677 *dstptr += sizeof(*ce);
1678
1679 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1680 if (ret)
1681 return ret;
1682 watchers_offset = e->watchers_offset - (origsize - *size);
1683
1684 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1685 if (ret)
1686 return ret;
1687 target_offset = e->target_offset - (origsize - *size);
1688
1689 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1690
1691 ret = compat_target_to_user(t, dstptr, size);
1692 if (ret)
1693 return ret;
1694 next_offset = e->next_offset - (origsize - *size);
1695
1696 if (put_user(watchers_offset, &ce->watchers_offset) ||
1697 put_user(target_offset, &ce->target_offset) ||
1698 put_user(next_offset, &ce->next_offset))
1699 return -EFAULT;
1700
1701 *size -= sizeof(*ce);
1702 return 0;
1703 }
1704
1705 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1706 {
1707 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1708 *off += ebt_compat_entry_padsize();
1709 return 0;
1710 }
1711
1712 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1713 {
1714 *off += xt_compat_target_offset(w->u.watcher);
1715 *off += ebt_compat_entry_padsize();
1716 return 0;
1717 }
1718
1719 static int compat_calc_entry(const struct ebt_entry *e,
1720 const struct ebt_table_info *info,
1721 const void *base,
1722 struct compat_ebt_replace *newinfo)
1723 {
1724 const struct ebt_entry_target *t;
1725 unsigned int entry_offset;
1726 int off, ret, i;
1727
1728 if (e->bitmask == 0)
1729 return 0;
1730
1731 off = 0;
1732 entry_offset = (void *)e - base;
1733
1734 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1735 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1736
1737 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1738
1739 off += xt_compat_target_offset(t->u.target);
1740 off += ebt_compat_entry_padsize();
1741
1742 newinfo->entries_size -= off;
1743
1744 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1745 if (ret)
1746 return ret;
1747
1748 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1749 const void *hookptr = info->hook_entry[i];
1750 if (info->hook_entry[i] &&
1751 (e < (struct ebt_entry *)(base - hookptr))) {
1752 newinfo->hook_entry[i] -= off;
1753 pr_debug("0x%08X -> 0x%08X\n",
1754 newinfo->hook_entry[i] + off,
1755 newinfo->hook_entry[i]);
1756 }
1757 }
1758
1759 return 0;
1760 }
1761
1762
1763 static int compat_table_info(const struct ebt_table_info *info,
1764 struct compat_ebt_replace *newinfo)
1765 {
1766 unsigned int size = info->entries_size;
1767 const void *entries = info->entries;
1768
1769 newinfo->entries_size = size;
1770
1771 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1772 entries, newinfo);
1773 }
1774
1775 static int compat_copy_everything_to_user(struct ebt_table *t,
1776 void __user *user, int *len, int cmd)
1777 {
1778 struct compat_ebt_replace repl, tmp;
1779 struct ebt_counter *oldcounters;
1780 struct ebt_table_info tinfo;
1781 int ret;
1782 void __user *pos;
1783
1784 memset(&tinfo, 0, sizeof(tinfo));
1785
1786 if (cmd == EBT_SO_GET_ENTRIES) {
1787 tinfo.entries_size = t->private->entries_size;
1788 tinfo.nentries = t->private->nentries;
1789 tinfo.entries = t->private->entries;
1790 oldcounters = t->private->counters;
1791 } else {
1792 tinfo.entries_size = t->table->entries_size;
1793 tinfo.nentries = t->table->nentries;
1794 tinfo.entries = t->table->entries;
1795 oldcounters = t->table->counters;
1796 }
1797
1798 if (copy_from_user(&tmp, user, sizeof(tmp)))
1799 return -EFAULT;
1800
1801 if (tmp.nentries != tinfo.nentries ||
1802 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1803 return -EINVAL;
1804
1805 memcpy(&repl, &tmp, sizeof(repl));
1806 if (cmd == EBT_SO_GET_ENTRIES)
1807 ret = compat_table_info(t->private, &repl);
1808 else
1809 ret = compat_table_info(&tinfo, &repl);
1810 if (ret)
1811 return ret;
1812
1813 if (*len != sizeof(tmp) + repl.entries_size +
1814 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1815 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1816 *len, tinfo.entries_size, repl.entries_size);
1817 return -EINVAL;
1818 }
1819
1820 /* userspace might not need the counters */
1821 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1822 tmp.num_counters, tinfo.nentries);
1823 if (ret)
1824 return ret;
1825
1826 pos = compat_ptr(tmp.entries);
1827 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1828 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1829 }
1830
1831 struct ebt_entries_buf_state {
1832 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1833 u32 buf_kern_len; /* total size of kernel buffer */
1834 u32 buf_kern_offset; /* amount of data copied so far */
1835 u32 buf_user_offset; /* read position in userspace buffer */
1836 };
1837
1838 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1839 {
1840 state->buf_kern_offset += sz;
1841 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1842 }
1843
1844 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1845 void *data, unsigned int sz)
1846 {
1847 if (state->buf_kern_start == NULL)
1848 goto count_only;
1849
1850 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1851
1852 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1853
1854 count_only:
1855 state->buf_user_offset += sz;
1856 return ebt_buf_count(state, sz);
1857 }
1858
1859 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1860 {
1861 char *b = state->buf_kern_start;
1862
1863 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1864
1865 if (b != NULL && sz > 0)
1866 memset(b + state->buf_kern_offset, 0, sz);
1867 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1868 return ebt_buf_count(state, sz);
1869 }
1870
1871 enum compat_mwt {
1872 EBT_COMPAT_MATCH,
1873 EBT_COMPAT_WATCHER,
1874 EBT_COMPAT_TARGET,
1875 };
1876
1877 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1878 enum compat_mwt compat_mwt,
1879 struct ebt_entries_buf_state *state,
1880 const unsigned char *base)
1881 {
1882 char name[EBT_FUNCTION_MAXNAMELEN];
1883 struct xt_match *match;
1884 struct xt_target *wt;
1885 void *dst = NULL;
1886 int off, pad = 0, ret = 0;
1887 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1888
1889 strlcpy(name, mwt->u.name, sizeof(name));
1890
1891 if (state->buf_kern_start)
1892 dst = state->buf_kern_start + state->buf_kern_offset;
1893
1894 entry_offset = (unsigned char *) mwt - base;
1895 switch (compat_mwt) {
1896 case EBT_COMPAT_MATCH:
1897 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1898 name, 0), "ebt_%s", name);
1899 if (match == NULL)
1900 return -ENOENT;
1901 if (IS_ERR(match))
1902 return PTR_ERR(match);
1903
1904 off = ebt_compat_match_offset(match, match_size);
1905 if (dst) {
1906 if (match->compat_from_user)
1907 match->compat_from_user(dst, mwt->data);
1908 else
1909 memcpy(dst, mwt->data, match_size);
1910 }
1911
1912 size_kern = match->matchsize;
1913 if (unlikely(size_kern == -1))
1914 size_kern = match_size;
1915 module_put(match->me);
1916 break;
1917 case EBT_COMPAT_WATCHER: /* fallthrough */
1918 case EBT_COMPAT_TARGET:
1919 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1920 name, 0), "ebt_%s", name);
1921 if (wt == NULL)
1922 return -ENOENT;
1923 if (IS_ERR(wt))
1924 return PTR_ERR(wt);
1925 off = xt_compat_target_offset(wt);
1926
1927 if (dst) {
1928 if (wt->compat_from_user)
1929 wt->compat_from_user(dst, mwt->data);
1930 else
1931 memcpy(dst, mwt->data, match_size);
1932 }
1933
1934 size_kern = wt->targetsize;
1935 module_put(wt->me);
1936 break;
1937 }
1938
1939 if (!dst) {
1940 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1941 off + ebt_compat_entry_padsize());
1942 if (ret < 0)
1943 return ret;
1944 }
1945
1946 state->buf_kern_offset += match_size + off;
1947 state->buf_user_offset += match_size;
1948 pad = XT_ALIGN(size_kern) - size_kern;
1949
1950 if (pad > 0 && dst) {
1951 BUG_ON(state->buf_kern_len <= pad);
1952 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1953 memset(dst + size_kern, 0, pad);
1954 }
1955 return off + match_size;
1956 }
1957
1958 /*
1959 * return size of all matches, watchers or target, including necessary
1960 * alignment and padding.
1961 */
1962 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1963 unsigned int size_left, enum compat_mwt type,
1964 struct ebt_entries_buf_state *state, const void *base)
1965 {
1966 int growth = 0;
1967 char *buf;
1968
1969 if (size_left == 0)
1970 return 0;
1971
1972 buf = (char *) match32;
1973
1974 while (size_left >= sizeof(*match32)) {
1975 struct ebt_entry_match *match_kern;
1976 int ret;
1977
1978 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1979 if (match_kern) {
1980 char *tmp;
1981 tmp = state->buf_kern_start + state->buf_kern_offset;
1982 match_kern = (struct ebt_entry_match *) tmp;
1983 }
1984 ret = ebt_buf_add(state, buf, sizeof(*match32));
1985 if (ret < 0)
1986 return ret;
1987 size_left -= sizeof(*match32);
1988
1989 /* add padding before match->data (if any) */
1990 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1991 if (ret < 0)
1992 return ret;
1993
1994 if (match32->match_size > size_left)
1995 return -EINVAL;
1996
1997 size_left -= match32->match_size;
1998
1999 ret = compat_mtw_from_user(match32, type, state, base);
2000 if (ret < 0)
2001 return ret;
2002
2003 BUG_ON(ret < match32->match_size);
2004 growth += ret - match32->match_size;
2005 growth += ebt_compat_entry_padsize();
2006
2007 buf += sizeof(*match32);
2008 buf += match32->match_size;
2009
2010 if (match_kern)
2011 match_kern->match_size = ret;
2012
2013 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2014 match32 = (struct compat_ebt_entry_mwt *) buf;
2015 }
2016
2017 return growth;
2018 }
2019
2020 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2021 ({ \
2022 unsigned int __i; \
2023 int __ret = 0; \
2024 struct compat_ebt_entry_mwt *__watcher; \
2025 \
2026 for (__i = e->watchers_offset; \
2027 __i < (e)->target_offset; \
2028 __i += __watcher->watcher_size + \
2029 sizeof(struct compat_ebt_entry_mwt)) { \
2030 __watcher = (void *)(e) + __i; \
2031 __ret = fn(__watcher , ## args); \
2032 if (__ret != 0) \
2033 break; \
2034 } \
2035 if (__ret == 0) { \
2036 if (__i != (e)->target_offset) \
2037 __ret = -EINVAL; \
2038 } \
2039 __ret; \
2040 })
2041
2042 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2043 ({ \
2044 unsigned int __i; \
2045 int __ret = 0; \
2046 struct compat_ebt_entry_mwt *__match; \
2047 \
2048 for (__i = sizeof(struct ebt_entry); \
2049 __i < (e)->watchers_offset; \
2050 __i += __match->match_size + \
2051 sizeof(struct compat_ebt_entry_mwt)) { \
2052 __match = (void *)(e) + __i; \
2053 __ret = fn(__match , ## args); \
2054 if (__ret != 0) \
2055 break; \
2056 } \
2057 if (__ret == 0) { \
2058 if (__i != (e)->watchers_offset) \
2059 __ret = -EINVAL; \
2060 } \
2061 __ret; \
2062 })
2063
2064 /* called for all ebt_entry structures. */
2065 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2066 unsigned int *total,
2067 struct ebt_entries_buf_state *state)
2068 {
2069 unsigned int i, j, startoff, new_offset = 0;
2070 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2071 unsigned int offsets[4];
2072 unsigned int *offsets_update = NULL;
2073 int ret;
2074 char *buf_start;
2075
2076 if (*total < sizeof(struct ebt_entries))
2077 return -EINVAL;
2078
2079 if (!entry->bitmask) {
2080 *total -= sizeof(struct ebt_entries);
2081 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2082 }
2083 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2084 return -EINVAL;
2085
2086 startoff = state->buf_user_offset;
2087 /* pull in most part of ebt_entry, it does not need to be changed. */
2088 ret = ebt_buf_add(state, entry,
2089 offsetof(struct ebt_entry, watchers_offset));
2090 if (ret < 0)
2091 return ret;
2092
2093 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2094 memcpy(&offsets[1], &entry->watchers_offset,
2095 sizeof(offsets) - sizeof(offsets[0]));
2096
2097 if (state->buf_kern_start) {
2098 buf_start = state->buf_kern_start + state->buf_kern_offset;
2099 offsets_update = (unsigned int *) buf_start;
2100 }
2101 ret = ebt_buf_add(state, &offsets[1],
2102 sizeof(offsets) - sizeof(offsets[0]));
2103 if (ret < 0)
2104 return ret;
2105 buf_start = (char *) entry;
2106 /*
2107 * 0: matches offset, always follows ebt_entry.
2108 * 1: watchers offset, from ebt_entry structure
2109 * 2: target offset, from ebt_entry structure
2110 * 3: next ebt_entry offset, from ebt_entry structure
2111 *
2112 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2113 */
2114 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2115 struct compat_ebt_entry_mwt *match32;
2116 unsigned int size;
2117 char *buf = buf_start;
2118
2119 buf = buf_start + offsets[i];
2120 if (offsets[i] > offsets[j])
2121 return -EINVAL;
2122
2123 match32 = (struct compat_ebt_entry_mwt *) buf;
2124 size = offsets[j] - offsets[i];
2125 ret = ebt_size_mwt(match32, size, i, state, base);
2126 if (ret < 0)
2127 return ret;
2128 new_offset += ret;
2129 if (offsets_update && new_offset) {
2130 pr_debug("ebtables: change offset %d to %d\n",
2131 offsets_update[i], offsets[j] + new_offset);
2132 offsets_update[i] = offsets[j] + new_offset;
2133 }
2134 }
2135
2136 startoff = state->buf_user_offset - startoff;
2137
2138 BUG_ON(*total < startoff);
2139 *total -= startoff;
2140 return 0;
2141 }
2142
2143 /*
2144 * repl->entries_size is the size of the ebt_entry blob in userspace.
2145 * It might need more memory when copied to a 64 bit kernel in case
2146 * userspace is 32-bit. So, first task: find out how much memory is needed.
2147 *
2148 * Called before validation is performed.
2149 */
2150 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2151 struct ebt_entries_buf_state *state)
2152 {
2153 unsigned int size_remaining = size_user;
2154 int ret;
2155
2156 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2157 &size_remaining, state);
2158 if (ret < 0)
2159 return ret;
2160
2161 WARN_ON(size_remaining);
2162 return state->buf_kern_offset;
2163 }
2164
2165
2166 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2167 void __user *user, unsigned int len)
2168 {
2169 struct compat_ebt_replace tmp;
2170 int i;
2171
2172 if (len < sizeof(tmp))
2173 return -EINVAL;
2174
2175 if (copy_from_user(&tmp, user, sizeof(tmp)))
2176 return -EFAULT;
2177
2178 if (len != sizeof(tmp) + tmp.entries_size)
2179 return -EINVAL;
2180
2181 if (tmp.entries_size == 0)
2182 return -EINVAL;
2183
2184 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2185 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2186 return -ENOMEM;
2187 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2188 return -ENOMEM;
2189
2190 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2191
2192 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2193 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2194 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2195
2196 repl->num_counters = tmp.num_counters;
2197 repl->counters = compat_ptr(tmp.counters);
2198 repl->entries = compat_ptr(tmp.entries);
2199 return 0;
2200 }
2201
2202 static int compat_do_replace(struct net *net, void __user *user,
2203 unsigned int len)
2204 {
2205 int ret, i, countersize, size64;
2206 struct ebt_table_info *newinfo;
2207 struct ebt_replace tmp;
2208 struct ebt_entries_buf_state state;
2209 void *entries_tmp;
2210
2211 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2212 if (ret) {
2213 /* try real handler in case userland supplied needed padding */
2214 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2215 ret = 0;
2216 return ret;
2217 }
2218
2219 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2220 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2221 if (!newinfo)
2222 return -ENOMEM;
2223
2224 if (countersize)
2225 memset(newinfo->counters, 0, countersize);
2226
2227 memset(&state, 0, sizeof(state));
2228
2229 newinfo->entries = vmalloc(tmp.entries_size);
2230 if (!newinfo->entries) {
2231 ret = -ENOMEM;
2232 goto free_newinfo;
2233 }
2234 if (copy_from_user(
2235 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2236 ret = -EFAULT;
2237 goto free_entries;
2238 }
2239
2240 entries_tmp = newinfo->entries;
2241
2242 xt_compat_lock(NFPROTO_BRIDGE);
2243
2244 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2245 if (ret < 0)
2246 goto out_unlock;
2247
2248 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2249 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2250 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2251
2252 size64 = ret;
2253 newinfo->entries = vmalloc(size64);
2254 if (!newinfo->entries) {
2255 vfree(entries_tmp);
2256 ret = -ENOMEM;
2257 goto out_unlock;
2258 }
2259
2260 memset(&state, 0, sizeof(state));
2261 state.buf_kern_start = newinfo->entries;
2262 state.buf_kern_len = size64;
2263
2264 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2265 BUG_ON(ret < 0); /* parses same data again */
2266
2267 vfree(entries_tmp);
2268 tmp.entries_size = size64;
2269
2270 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2271 char __user *usrptr;
2272 if (tmp.hook_entry[i]) {
2273 unsigned int delta;
2274 usrptr = (char __user *) tmp.hook_entry[i];
2275 delta = usrptr - tmp.entries;
2276 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2277 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2278 }
2279 }
2280
2281 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2282 xt_compat_unlock(NFPROTO_BRIDGE);
2283
2284 ret = do_replace_finish(net, &tmp, newinfo);
2285 if (ret == 0)
2286 return ret;
2287 free_entries:
2288 vfree(newinfo->entries);
2289 free_newinfo:
2290 vfree(newinfo);
2291 return ret;
2292 out_unlock:
2293 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2294 xt_compat_unlock(NFPROTO_BRIDGE);
2295 goto free_entries;
2296 }
2297
2298 static int compat_update_counters(struct net *net, void __user *user,
2299 unsigned int len)
2300 {
2301 struct compat_ebt_replace hlp;
2302
2303 if (copy_from_user(&hlp, user, sizeof(hlp)))
2304 return -EFAULT;
2305
2306 /* try real handler in case userland supplied needed padding */
2307 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2308 return update_counters(net, user, len);
2309
2310 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2311 hlp.num_counters, user, len);
2312 }
2313
2314 static int compat_do_ebt_set_ctl(struct sock *sk,
2315 int cmd, void __user *user, unsigned int len)
2316 {
2317 int ret;
2318
2319 if (!capable(CAP_NET_ADMIN))
2320 return -EPERM;
2321
2322 switch (cmd) {
2323 case EBT_SO_SET_ENTRIES:
2324 ret = compat_do_replace(sock_net(sk), user, len);
2325 break;
2326 case EBT_SO_SET_COUNTERS:
2327 ret = compat_update_counters(sock_net(sk), user, len);
2328 break;
2329 default:
2330 ret = -EINVAL;
2331 }
2332 return ret;
2333 }
2334
2335 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2336 void __user *user, int *len)
2337 {
2338 int ret;
2339 struct compat_ebt_replace tmp;
2340 struct ebt_table *t;
2341
2342 if (!capable(CAP_NET_ADMIN))
2343 return -EPERM;
2344
2345 /* try real handler in case userland supplied needed padding */
2346 if ((cmd == EBT_SO_GET_INFO ||
2347 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2348 return do_ebt_get_ctl(sk, cmd, user, len);
2349
2350 if (copy_from_user(&tmp, user, sizeof(tmp)))
2351 return -EFAULT;
2352
2353 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2354 if (!t)
2355 return ret;
2356
2357 xt_compat_lock(NFPROTO_BRIDGE);
2358 switch (cmd) {
2359 case EBT_SO_GET_INFO:
2360 tmp.nentries = t->private->nentries;
2361 ret = compat_table_info(t->private, &tmp);
2362 if (ret)
2363 goto out;
2364 tmp.valid_hooks = t->valid_hooks;
2365
2366 if (copy_to_user(user, &tmp, *len) != 0) {
2367 ret = -EFAULT;
2368 break;
2369 }
2370 ret = 0;
2371 break;
2372 case EBT_SO_GET_INIT_INFO:
2373 tmp.nentries = t->table->nentries;
2374 tmp.entries_size = t->table->entries_size;
2375 tmp.valid_hooks = t->table->valid_hooks;
2376
2377 if (copy_to_user(user, &tmp, *len) != 0) {
2378 ret = -EFAULT;
2379 break;
2380 }
2381 ret = 0;
2382 break;
2383 case EBT_SO_GET_ENTRIES:
2384 case EBT_SO_GET_INIT_ENTRIES:
2385 /*
2386 * try real handler first in case of userland-side padding.
2387 * in case we are dealing with an 'ordinary' 32 bit binary
2388 * without 64bit compatibility padding, this will fail right
2389 * after copy_from_user when the *len argument is validated.
2390 *
2391 * the compat_ variant needs to do one pass over the kernel
2392 * data set to adjust for size differences before it the check.
2393 */
2394 if (copy_everything_to_user(t, user, len, cmd) == 0)
2395 ret = 0;
2396 else
2397 ret = compat_copy_everything_to_user(t, user, len, cmd);
2398 break;
2399 default:
2400 ret = -EINVAL;
2401 }
2402 out:
2403 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2404 xt_compat_unlock(NFPROTO_BRIDGE);
2405 mutex_unlock(&ebt_mutex);
2406 return ret;
2407 }
2408 #endif
2409
2410 static struct nf_sockopt_ops ebt_sockopts =
2411 {
2412 .pf = PF_INET,
2413 .set_optmin = EBT_BASE_CTL,
2414 .set_optmax = EBT_SO_SET_MAX + 1,
2415 .set = do_ebt_set_ctl,
2416 #ifdef CONFIG_COMPAT
2417 .compat_set = compat_do_ebt_set_ctl,
2418 #endif
2419 .get_optmin = EBT_BASE_CTL,
2420 .get_optmax = EBT_SO_GET_MAX + 1,
2421 .get = do_ebt_get_ctl,
2422 #ifdef CONFIG_COMPAT
2423 .compat_get = compat_do_ebt_get_ctl,
2424 #endif
2425 .owner = THIS_MODULE,
2426 };
2427
2428 static int __init ebtables_init(void)
2429 {
2430 int ret;
2431
2432 ret = xt_register_target(&ebt_standard_target);
2433 if (ret < 0)
2434 return ret;
2435 ret = nf_register_sockopt(&ebt_sockopts);
2436 if (ret < 0) {
2437 xt_unregister_target(&ebt_standard_target);
2438 return ret;
2439 }
2440
2441 printk(KERN_INFO "Ebtables v2.0 registered\n");
2442 return 0;
2443 }
2444
2445 static void __exit ebtables_fini(void)
2446 {
2447 nf_unregister_sockopt(&ebt_sockopts);
2448 xt_unregister_target(&ebt_standard_target);
2449 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2450 }
2451
2452 EXPORT_SYMBOL(ebt_register_table);
2453 EXPORT_SYMBOL(ebt_unregister_table);
2454 EXPORT_SYMBOL(ebt_do_table);
2455 module_init(ebtables_init);
2456 module_exit(ebtables_fini);
2457 MODULE_LICENSE("GPL");