]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
netfilter: xtables: move extension arguments into compound structure (3/6)
[mirror_ubuntu-hirsute-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
36
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
40
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
46
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
52
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 /*
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
76
77 Hence the start of any table is given by get_table() below. */
78
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
82 {
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
90 }
91
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
101 {
102 size_t i;
103 unsigned long ret;
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
113 /*
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
120 return false;
121 }
122
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
128 }
129
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
134 return false;
135 }
136
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
141 }
142
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 return false;
148 }
149
150 /* ... might want to do something with class and flowlabel here ... */
151
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
154 int protohdr;
155 unsigned short _frag_off;
156
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
158 if (protohdr < 0) {
159 if (_frag_off == 0)
160 *hotdrop = true;
161 return false;
162 }
163 *fragoff = _frag_off;
164
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
166 protohdr,
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
168 ip6info->proto);
169
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
172 return false;
173 }
174 return true;
175 }
176
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
180 return false;
181 }
182 return true;
183 }
184
185 /* should be ip6 safe */
186 static bool
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
188 {
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
192 return false;
193 }
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
197 return false;
198 }
199 return true;
200 }
201
202 static unsigned int
203 ip6t_error(struct sk_buff *skb,
204 const struct net_device *in,
205 const struct net_device *out,
206 unsigned int hooknum,
207 const struct xt_target *target,
208 const void *targinfo)
209 {
210 if (net_ratelimit())
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
212
213 return NF_DROP;
214 }
215
216 /* Performance critical - called for every packet */
217 static inline bool
218 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
219 struct xt_match_param *par)
220 {
221 par->match = m->u.kernel.match;
222 par->matchinfo = m->data;
223
224 /* Stop iteration if it doesn't match */
225 if (!m->u.kernel.match->match(skb, par))
226 return true;
227 else
228 return false;
229 }
230
231 static inline struct ip6t_entry *
232 get_entry(void *base, unsigned int offset)
233 {
234 return (struct ip6t_entry *)(base + offset);
235 }
236
237 /* All zeroes == unconditional rule. */
238 /* Mildly perf critical (only if packet tracing is on) */
239 static inline int
240 unconditional(const struct ip6t_ip6 *ipv6)
241 {
242 unsigned int i;
243
244 for (i = 0; i < sizeof(*ipv6); i++)
245 if (((char *)ipv6)[i])
246 break;
247
248 return (i == sizeof(*ipv6));
249 }
250
251 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
252 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
253 /* This cries for unification! */
254 static const char *const hooknames[] = {
255 [NF_INET_PRE_ROUTING] = "PREROUTING",
256 [NF_INET_LOCAL_IN] = "INPUT",
257 [NF_INET_FORWARD] = "FORWARD",
258 [NF_INET_LOCAL_OUT] = "OUTPUT",
259 [NF_INET_POST_ROUTING] = "POSTROUTING",
260 };
261
262 enum nf_ip_trace_comments {
263 NF_IP6_TRACE_COMMENT_RULE,
264 NF_IP6_TRACE_COMMENT_RETURN,
265 NF_IP6_TRACE_COMMENT_POLICY,
266 };
267
268 static const char *const comments[] = {
269 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
270 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
271 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
272 };
273
274 static struct nf_loginfo trace_loginfo = {
275 .type = NF_LOG_TYPE_LOG,
276 .u = {
277 .log = {
278 .level = 4,
279 .logflags = NF_LOG_MASK,
280 },
281 },
282 };
283
284 /* Mildly perf critical (only if packet tracing is on) */
285 static inline int
286 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
287 char *hookname, char **chainname,
288 char **comment, unsigned int *rulenum)
289 {
290 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
291
292 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
293 /* Head of user chain: ERROR target with chainname */
294 *chainname = t->target.data;
295 (*rulenum) = 0;
296 } else if (s == e) {
297 (*rulenum)++;
298
299 if (s->target_offset == sizeof(struct ip6t_entry)
300 && strcmp(t->target.u.kernel.target->name,
301 IP6T_STANDARD_TARGET) == 0
302 && t->verdict < 0
303 && unconditional(&s->ipv6)) {
304 /* Tail of chains: STANDARD target (return/policy) */
305 *comment = *chainname == hookname
306 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
307 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
308 }
309 return 1;
310 } else
311 (*rulenum)++;
312
313 return 0;
314 }
315
316 static void trace_packet(struct sk_buff *skb,
317 unsigned int hook,
318 const struct net_device *in,
319 const struct net_device *out,
320 const char *tablename,
321 struct xt_table_info *private,
322 struct ip6t_entry *e)
323 {
324 void *table_base;
325 const struct ip6t_entry *root;
326 char *hookname, *chainname, *comment;
327 unsigned int rulenum = 0;
328
329 table_base = (void *)private->entries[smp_processor_id()];
330 root = get_entry(table_base, private->hook_entry[hook]);
331
332 hookname = chainname = (char *)hooknames[hook];
333 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
334
335 IP6T_ENTRY_ITERATE(root,
336 private->size - private->hook_entry[hook],
337 get_chainname_rulenum,
338 e, hookname, &chainname, &comment, &rulenum);
339
340 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
341 "TRACE: %s:%s:%s:%u ",
342 tablename, chainname, comment, rulenum);
343 }
344 #endif
345
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
347 unsigned int
348 ip6t_do_table(struct sk_buff *skb,
349 unsigned int hook,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
353 {
354 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
355 bool hotdrop = false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict = NF_DROP;
358 const char *indev, *outdev;
359 void *table_base;
360 struct ip6t_entry *e, *back;
361 struct xt_table_info *private;
362 struct xt_match_param mtpar;
363
364 /* Initialization */
365 indev = in ? in->name : nulldevname;
366 outdev = out ? out->name : nulldevname;
367 /* We handle fragments by dealing with the first fragment as
368 * if it was a normal packet. All other fragments are treated
369 * normally, except that they will NEVER match rules that ask
370 * things we don't know, ie. tcp syn flag or ports). If the
371 * rule is also a fragment-specific rule, non-fragments won't
372 * match it. */
373 mtpar.hotdrop = &hotdrop;
374 mtpar.in = in;
375 mtpar.out = out;
376
377 read_lock_bh(&table->lock);
378 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
379 private = table->private;
380 table_base = (void *)private->entries[smp_processor_id()];
381 e = get_entry(table_base, private->hook_entry[hook]);
382
383 /* For return from builtin chain */
384 back = get_entry(table_base, private->underflow[hook]);
385
386 do {
387 IP_NF_ASSERT(e);
388 IP_NF_ASSERT(back);
389 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
390 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
391 struct ip6t_entry_target *t;
392
393 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
394 goto no_match;
395
396 ADD_COUNTER(e->counters,
397 ntohs(ipv6_hdr(skb)->payload_len) +
398 sizeof(struct ipv6hdr), 1);
399
400 t = ip6t_get_target(e);
401 IP_NF_ASSERT(t->u.kernel.target);
402
403 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
404 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
405 /* The packet is traced: log it */
406 if (unlikely(skb->nf_trace))
407 trace_packet(skb, hook, in, out,
408 table->name, private, e);
409 #endif
410 /* Standard target? */
411 if (!t->u.kernel.target->target) {
412 int v;
413
414 v = ((struct ip6t_standard_target *)t)->verdict;
415 if (v < 0) {
416 /* Pop from stack? */
417 if (v != IP6T_RETURN) {
418 verdict = (unsigned)(-v) - 1;
419 break;
420 }
421 e = back;
422 back = get_entry(table_base,
423 back->comefrom);
424 continue;
425 }
426 if (table_base + v != (void *)e + e->next_offset
427 && !(e->ipv6.flags & IP6T_F_GOTO)) {
428 /* Save old back ptr in next entry */
429 struct ip6t_entry *next
430 = (void *)e + e->next_offset;
431 next->comefrom
432 = (void *)back - table_base;
433 /* set back pointer to next entry */
434 back = next;
435 }
436
437 e = get_entry(table_base, v);
438 } else {
439 /* Targets which reenter must return
440 abs. verdicts */
441 #ifdef CONFIG_NETFILTER_DEBUG
442 ((struct ip6t_entry *)table_base)->comefrom
443 = 0xeeeeeeec;
444 #endif
445 verdict = t->u.kernel.target->target(skb,
446 in, out,
447 hook,
448 t->u.kernel.target,
449 t->data);
450
451 #ifdef CONFIG_NETFILTER_DEBUG
452 if (((struct ip6t_entry *)table_base)->comefrom
453 != 0xeeeeeeec
454 && verdict == IP6T_CONTINUE) {
455 printk("Target %s reentered!\n",
456 t->u.kernel.target->name);
457 verdict = NF_DROP;
458 }
459 ((struct ip6t_entry *)table_base)->comefrom
460 = 0x57acc001;
461 #endif
462 if (verdict == IP6T_CONTINUE)
463 e = (void *)e + e->next_offset;
464 else
465 /* Verdict */
466 break;
467 }
468 } else {
469
470 no_match:
471 e = (void *)e + e->next_offset;
472 }
473 } while (!hotdrop);
474
475 #ifdef CONFIG_NETFILTER_DEBUG
476 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
477 #endif
478 read_unlock_bh(&table->lock);
479
480 #ifdef DEBUG_ALLOW_ALL
481 return NF_ACCEPT;
482 #else
483 if (hotdrop)
484 return NF_DROP;
485 else return verdict;
486 #endif
487 }
488
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
491 static int
492 mark_source_chains(struct xt_table_info *newinfo,
493 unsigned int valid_hooks, void *entry0)
494 {
495 unsigned int hook;
496
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
500 unsigned int pos = newinfo->hook_entry[hook];
501 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
502
503 if (!(valid_hooks & (1 << hook)))
504 continue;
505
506 /* Set initial back pointer. */
507 e->counters.pcnt = pos;
508
509 for (;;) {
510 struct ip6t_standard_target *t
511 = (void *)ip6t_get_target(e);
512 int visited = e->comefrom & (1 << hook);
513
514 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook, pos, e->comefrom);
517 return 0;
518 }
519 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
520
521 /* Unconditional return/END. */
522 if ((e->target_offset == sizeof(struct ip6t_entry)
523 && (strcmp(t->target.u.user.name,
524 IP6T_STANDARD_TARGET) == 0)
525 && t->verdict < 0
526 && unconditional(&e->ipv6)) || visited) {
527 unsigned int oldpos, size;
528
529 if (t->verdict < -NF_MAX_VERDICT - 1) {
530 duprintf("mark_source_chains: bad "
531 "negative verdict (%i)\n",
532 t->verdict);
533 return 0;
534 }
535
536 /* Return: backtrack through the last
537 big jump. */
538 do {
539 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
540 #ifdef DEBUG_IP_FIREWALL_USER
541 if (e->comefrom
542 & (1 << NF_INET_NUMHOOKS)) {
543 duprintf("Back unset "
544 "on hook %u "
545 "rule %u\n",
546 hook, pos);
547 }
548 #endif
549 oldpos = pos;
550 pos = e->counters.pcnt;
551 e->counters.pcnt = 0;
552
553 /* We're at the start. */
554 if (pos == oldpos)
555 goto next;
556
557 e = (struct ip6t_entry *)
558 (entry0 + pos);
559 } while (oldpos == pos + e->next_offset);
560
561 /* Move along one */
562 size = e->next_offset;
563 e = (struct ip6t_entry *)
564 (entry0 + pos + size);
565 e->counters.pcnt = pos;
566 pos += size;
567 } else {
568 int newpos = t->verdict;
569
570 if (strcmp(t->target.u.user.name,
571 IP6T_STANDARD_TARGET) == 0
572 && newpos >= 0) {
573 if (newpos > newinfo->size -
574 sizeof(struct ip6t_entry)) {
575 duprintf("mark_source_chains: "
576 "bad verdict (%i)\n",
577 newpos);
578 return 0;
579 }
580 /* This a jump; chase it. */
581 duprintf("Jump rule %u -> %u\n",
582 pos, newpos);
583 } else {
584 /* ... this is a fallthru */
585 newpos = pos + e->next_offset;
586 }
587 e = (struct ip6t_entry *)
588 (entry0 + newpos);
589 e->counters.pcnt = pos;
590 pos = newpos;
591 }
592 }
593 next:
594 duprintf("Finished chain %u\n", hook);
595 }
596 return 1;
597 }
598
599 static int
600 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
601 {
602 struct xt_mtdtor_param par;
603
604 if (i && (*i)-- == 0)
605 return 1;
606
607 par.match = m->u.kernel.match;
608 par.matchinfo = m->data;
609 if (par.match->destroy != NULL)
610 par.match->destroy(&par);
611 module_put(par.match->me);
612 return 0;
613 }
614
615 static int
616 check_entry(struct ip6t_entry *e, const char *name)
617 {
618 struct ip6t_entry_target *t;
619
620 if (!ip6_checkentry(&e->ipv6)) {
621 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
622 return -EINVAL;
623 }
624
625 if (e->target_offset + sizeof(struct ip6t_entry_target) >
626 e->next_offset)
627 return -EINVAL;
628
629 t = ip6t_get_target(e);
630 if (e->target_offset + t->u.target_size > e->next_offset)
631 return -EINVAL;
632
633 return 0;
634 }
635
636 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
637 unsigned int *i)
638 {
639 const struct ip6t_ip6 *ipv6 = par->entryinfo;
640 int ret;
641
642 par->match = m->u.kernel.match;
643 par->matchinfo = m->data;
644
645 ret = xt_check_match(par, NFPROTO_IPV6, m->u.match_size - sizeof(*m),
646 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
647 if (ret < 0) {
648 duprintf("ip_tables: check failed for `%s'.\n",
649 par.match->name);
650 return ret;
651 }
652 ++*i;
653 return 0;
654 }
655
656 static int
657 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 unsigned int *i)
659 {
660 struct xt_match *match;
661 int ret;
662
663 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
664 m->u.user.revision),
665 "ip6t_%s", m->u.user.name);
666 if (IS_ERR(match) || !match) {
667 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
668 return match ? PTR_ERR(match) : -ENOENT;
669 }
670 m->u.kernel.match = match;
671
672 ret = check_match(m, par, i);
673 if (ret)
674 goto err;
675
676 return 0;
677 err:
678 module_put(m->u.kernel.match->me);
679 return ret;
680 }
681
682 static int check_target(struct ip6t_entry *e, const char *name)
683 {
684 struct ip6t_entry_target *t;
685 struct xt_target *target;
686 int ret;
687
688 t = ip6t_get_target(e);
689 target = t->u.kernel.target;
690 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
691 name, e->comefrom, e->ipv6.proto,
692 e->ipv6.invflags & IP6T_INV_PROTO, e, t->data);
693 if (ret < 0) {
694 duprintf("ip_tables: check failed for `%s'.\n",
695 t->u.kernel.target->name);
696 return ret;
697 }
698 return 0;
699 }
700
701 static int
702 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
703 unsigned int *i)
704 {
705 struct ip6t_entry_target *t;
706 struct xt_target *target;
707 int ret;
708 unsigned int j;
709 struct xt_mtchk_param mtpar;
710
711 ret = check_entry(e, name);
712 if (ret)
713 return ret;
714
715 j = 0;
716 mtpar.table = name;
717 mtpar.entryinfo = &e->ipv6;
718 mtpar.hook_mask = e->comefrom;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
720 if (ret != 0)
721 goto cleanup_matches;
722
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
725 t->u.user.name,
726 t->u.user.revision),
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
732 }
733 t->u.kernel.target = target;
734
735 ret = check_target(e, name);
736 if (ret)
737 goto err;
738
739 (*i)++;
740 return 0;
741 err:
742 module_put(t->u.kernel.target->me);
743 cleanup_matches:
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
745 return ret;
746 }
747
748 static int
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
751 unsigned char *base,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
755 unsigned int *i)
756 {
757 unsigned int h;
758
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
762 return -EINVAL;
763 }
764
765 if (e->next_offset
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
768 e, e->next_offset);
769 return -EINVAL;
770 }
771
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
778 }
779
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
782
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
785 e->comefrom = 0;
786
787 (*i)++;
788 return 0;
789 }
790
791 static int
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
793 {
794 struct ip6t_entry_target *t;
795
796 if (i && (*i)-- == 0)
797 return 1;
798
799 /* Cleanup all matches */
800 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
801 t = ip6t_get_target(e);
802 if (t->u.kernel.target->destroy)
803 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
804 module_put(t->u.kernel.target->me);
805 return 0;
806 }
807
808 /* Checks and translates the user-supplied table segment (held in
809 newinfo) */
810 static int
811 translate_table(const char *name,
812 unsigned int valid_hooks,
813 struct xt_table_info *newinfo,
814 void *entry0,
815 unsigned int size,
816 unsigned int number,
817 const unsigned int *hook_entries,
818 const unsigned int *underflows)
819 {
820 unsigned int i;
821 int ret;
822
823 newinfo->size = size;
824 newinfo->number = number;
825
826 /* Init all hooks to impossible value. */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 newinfo->hook_entry[i] = 0xFFFFFFFF;
829 newinfo->underflow[i] = 0xFFFFFFFF;
830 }
831
832 duprintf("translate_table: size %u\n", newinfo->size);
833 i = 0;
834 /* Walk through entries, checking offsets. */
835 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
836 check_entry_size_and_hooks,
837 newinfo,
838 entry0,
839 entry0 + size,
840 hook_entries, underflows, &i);
841 if (ret != 0)
842 return ret;
843
844 if (i != number) {
845 duprintf("translate_table: %u not %u entries\n",
846 i, number);
847 return -EINVAL;
848 }
849
850 /* Check hooks all assigned */
851 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
852 /* Only hooks which are valid */
853 if (!(valid_hooks & (1 << i)))
854 continue;
855 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
856 duprintf("Invalid hook entry %u %u\n",
857 i, hook_entries[i]);
858 return -EINVAL;
859 }
860 if (newinfo->underflow[i] == 0xFFFFFFFF) {
861 duprintf("Invalid underflow %u %u\n",
862 i, underflows[i]);
863 return -EINVAL;
864 }
865 }
866
867 if (!mark_source_chains(newinfo, valid_hooks, entry0))
868 return -ELOOP;
869
870 /* Finally, each sanity check must pass */
871 i = 0;
872 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
873 find_check_entry, name, size, &i);
874
875 if (ret != 0) {
876 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
877 cleanup_entry, &i);
878 return ret;
879 }
880
881 /* And one copy for every other CPU */
882 for_each_possible_cpu(i) {
883 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
884 memcpy(newinfo->entries[i], entry0, newinfo->size);
885 }
886
887 return ret;
888 }
889
890 /* Gets counters. */
891 static inline int
892 add_entry_to_counter(const struct ip6t_entry *e,
893 struct xt_counters total[],
894 unsigned int *i)
895 {
896 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
897
898 (*i)++;
899 return 0;
900 }
901
902 static inline int
903 set_entry_to_counter(const struct ip6t_entry *e,
904 struct ip6t_counters total[],
905 unsigned int *i)
906 {
907 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
908
909 (*i)++;
910 return 0;
911 }
912
913 static void
914 get_counters(const struct xt_table_info *t,
915 struct xt_counters counters[])
916 {
917 unsigned int cpu;
918 unsigned int i;
919 unsigned int curcpu;
920
921 /* Instead of clearing (by a previous call to memset())
922 * the counters and using adds, we set the counters
923 * with data used by 'current' CPU
924 * We dont care about preemption here.
925 */
926 curcpu = raw_smp_processor_id();
927
928 i = 0;
929 IP6T_ENTRY_ITERATE(t->entries[curcpu],
930 t->size,
931 set_entry_to_counter,
932 counters,
933 &i);
934
935 for_each_possible_cpu(cpu) {
936 if (cpu == curcpu)
937 continue;
938 i = 0;
939 IP6T_ENTRY_ITERATE(t->entries[cpu],
940 t->size,
941 add_entry_to_counter,
942 counters,
943 &i);
944 }
945 }
946
947 static struct xt_counters *alloc_counters(struct xt_table *table)
948 {
949 unsigned int countersize;
950 struct xt_counters *counters;
951 const struct xt_table_info *private = table->private;
952
953 /* We need atomic snapshot of counters: rest doesn't change
954 (other than comefrom, which userspace doesn't care
955 about). */
956 countersize = sizeof(struct xt_counters) * private->number;
957 counters = vmalloc_node(countersize, numa_node_id());
958
959 if (counters == NULL)
960 return ERR_PTR(-ENOMEM);
961
962 /* First, sum counters... */
963 write_lock_bh(&table->lock);
964 get_counters(private, counters);
965 write_unlock_bh(&table->lock);
966
967 return counters;
968 }
969
970 static int
971 copy_entries_to_user(unsigned int total_size,
972 struct xt_table *table,
973 void __user *userptr)
974 {
975 unsigned int off, num;
976 struct ip6t_entry *e;
977 struct xt_counters *counters;
978 const struct xt_table_info *private = table->private;
979 int ret = 0;
980 const void *loc_cpu_entry;
981
982 counters = alloc_counters(table);
983 if (IS_ERR(counters))
984 return PTR_ERR(counters);
985
986 /* choose the copy that is on our node/cpu, ...
987 * This choice is lazy (because current thread is
988 * allowed to migrate to another cpu)
989 */
990 loc_cpu_entry = private->entries[raw_smp_processor_id()];
991 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
992 ret = -EFAULT;
993 goto free_counters;
994 }
995
996 /* FIXME: use iterator macros --RR */
997 /* ... then go back and fix counters and names */
998 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
999 unsigned int i;
1000 const struct ip6t_entry_match *m;
1001 const struct ip6t_entry_target *t;
1002
1003 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1004 if (copy_to_user(userptr + off
1005 + offsetof(struct ip6t_entry, counters),
1006 &counters[num],
1007 sizeof(counters[num])) != 0) {
1008 ret = -EFAULT;
1009 goto free_counters;
1010 }
1011
1012 for (i = sizeof(struct ip6t_entry);
1013 i < e->target_offset;
1014 i += m->u.match_size) {
1015 m = (void *)e + i;
1016
1017 if (copy_to_user(userptr + off + i
1018 + offsetof(struct ip6t_entry_match,
1019 u.user.name),
1020 m->u.kernel.match->name,
1021 strlen(m->u.kernel.match->name)+1)
1022 != 0) {
1023 ret = -EFAULT;
1024 goto free_counters;
1025 }
1026 }
1027
1028 t = ip6t_get_target(e);
1029 if (copy_to_user(userptr + off + e->target_offset
1030 + offsetof(struct ip6t_entry_target,
1031 u.user.name),
1032 t->u.kernel.target->name,
1033 strlen(t->u.kernel.target->name)+1) != 0) {
1034 ret = -EFAULT;
1035 goto free_counters;
1036 }
1037 }
1038
1039 free_counters:
1040 vfree(counters);
1041 return ret;
1042 }
1043
1044 #ifdef CONFIG_COMPAT
1045 static void compat_standard_from_user(void *dst, void *src)
1046 {
1047 int v = *(compat_int_t *)src;
1048
1049 if (v > 0)
1050 v += xt_compat_calc_jump(AF_INET6, v);
1051 memcpy(dst, &v, sizeof(v));
1052 }
1053
1054 static int compat_standard_to_user(void __user *dst, void *src)
1055 {
1056 compat_int_t cv = *(int *)src;
1057
1058 if (cv > 0)
1059 cv -= xt_compat_calc_jump(AF_INET6, cv);
1060 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1061 }
1062
1063 static inline int
1064 compat_calc_match(struct ip6t_entry_match *m, int *size)
1065 {
1066 *size += xt_compat_match_offset(m->u.kernel.match);
1067 return 0;
1068 }
1069
1070 static int compat_calc_entry(struct ip6t_entry *e,
1071 const struct xt_table_info *info,
1072 void *base, struct xt_table_info *newinfo)
1073 {
1074 struct ip6t_entry_target *t;
1075 unsigned int entry_offset;
1076 int off, i, ret;
1077
1078 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1079 entry_offset = (void *)e - base;
1080 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1081 t = ip6t_get_target(e);
1082 off += xt_compat_target_offset(t->u.kernel.target);
1083 newinfo->size -= off;
1084 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1085 if (ret)
1086 return ret;
1087
1088 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1089 if (info->hook_entry[i] &&
1090 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1091 newinfo->hook_entry[i] -= off;
1092 if (info->underflow[i] &&
1093 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1094 newinfo->underflow[i] -= off;
1095 }
1096 return 0;
1097 }
1098
1099 static int compat_table_info(const struct xt_table_info *info,
1100 struct xt_table_info *newinfo)
1101 {
1102 void *loc_cpu_entry;
1103
1104 if (!newinfo || !info)
1105 return -EINVAL;
1106
1107 /* we dont care about newinfo->entries[] */
1108 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1109 newinfo->initial_entries = 0;
1110 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1111 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1112 compat_calc_entry, info, loc_cpu_entry,
1113 newinfo);
1114 }
1115 #endif
1116
1117 static int get_info(struct net *net, void __user *user, int *len, int compat)
1118 {
1119 char name[IP6T_TABLE_MAXNAMELEN];
1120 struct xt_table *t;
1121 int ret;
1122
1123 if (*len != sizeof(struct ip6t_getinfo)) {
1124 duprintf("length %u != %zu\n", *len,
1125 sizeof(struct ip6t_getinfo));
1126 return -EINVAL;
1127 }
1128
1129 if (copy_from_user(name, user, sizeof(name)) != 0)
1130 return -EFAULT;
1131
1132 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1133 #ifdef CONFIG_COMPAT
1134 if (compat)
1135 xt_compat_lock(AF_INET6);
1136 #endif
1137 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1138 "ip6table_%s", name);
1139 if (t && !IS_ERR(t)) {
1140 struct ip6t_getinfo info;
1141 const struct xt_table_info *private = t->private;
1142
1143 #ifdef CONFIG_COMPAT
1144 if (compat) {
1145 struct xt_table_info tmp;
1146 ret = compat_table_info(private, &tmp);
1147 xt_compat_flush_offsets(AF_INET6);
1148 private = &tmp;
1149 }
1150 #endif
1151 info.valid_hooks = t->valid_hooks;
1152 memcpy(info.hook_entry, private->hook_entry,
1153 sizeof(info.hook_entry));
1154 memcpy(info.underflow, private->underflow,
1155 sizeof(info.underflow));
1156 info.num_entries = private->number;
1157 info.size = private->size;
1158 strcpy(info.name, name);
1159
1160 if (copy_to_user(user, &info, *len) != 0)
1161 ret = -EFAULT;
1162 else
1163 ret = 0;
1164
1165 xt_table_unlock(t);
1166 module_put(t->me);
1167 } else
1168 ret = t ? PTR_ERR(t) : -ENOENT;
1169 #ifdef CONFIG_COMPAT
1170 if (compat)
1171 xt_compat_unlock(AF_INET6);
1172 #endif
1173 return ret;
1174 }
1175
1176 static int
1177 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1178 {
1179 int ret;
1180 struct ip6t_get_entries get;
1181 struct xt_table *t;
1182
1183 if (*len < sizeof(get)) {
1184 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1185 return -EINVAL;
1186 }
1187 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1188 return -EFAULT;
1189 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1190 duprintf("get_entries: %u != %zu\n",
1191 *len, sizeof(get) + get.size);
1192 return -EINVAL;
1193 }
1194
1195 t = xt_find_table_lock(net, AF_INET6, get.name);
1196 if (t && !IS_ERR(t)) {
1197 struct xt_table_info *private = t->private;
1198 duprintf("t->private->number = %u\n", private->number);
1199 if (get.size == private->size)
1200 ret = copy_entries_to_user(private->size,
1201 t, uptr->entrytable);
1202 else {
1203 duprintf("get_entries: I've got %u not %u!\n",
1204 private->size, get.size);
1205 ret = -EAGAIN;
1206 }
1207 module_put(t->me);
1208 xt_table_unlock(t);
1209 } else
1210 ret = t ? PTR_ERR(t) : -ENOENT;
1211
1212 return ret;
1213 }
1214
1215 static int
1216 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1217 struct xt_table_info *newinfo, unsigned int num_counters,
1218 void __user *counters_ptr)
1219 {
1220 int ret;
1221 struct xt_table *t;
1222 struct xt_table_info *oldinfo;
1223 struct xt_counters *counters;
1224 const void *loc_cpu_old_entry;
1225
1226 ret = 0;
1227 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1228 numa_node_id());
1229 if (!counters) {
1230 ret = -ENOMEM;
1231 goto out;
1232 }
1233
1234 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1235 "ip6table_%s", name);
1236 if (!t || IS_ERR(t)) {
1237 ret = t ? PTR_ERR(t) : -ENOENT;
1238 goto free_newinfo_counters_untrans;
1239 }
1240
1241 /* You lied! */
1242 if (valid_hooks != t->valid_hooks) {
1243 duprintf("Valid hook crap: %08X vs %08X\n",
1244 valid_hooks, t->valid_hooks);
1245 ret = -EINVAL;
1246 goto put_module;
1247 }
1248
1249 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1250 if (!oldinfo)
1251 goto put_module;
1252
1253 /* Update module usage count based on number of rules */
1254 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1255 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1256 if ((oldinfo->number > oldinfo->initial_entries) ||
1257 (newinfo->number <= oldinfo->initial_entries))
1258 module_put(t->me);
1259 if ((oldinfo->number > oldinfo->initial_entries) &&
1260 (newinfo->number <= oldinfo->initial_entries))
1261 module_put(t->me);
1262
1263 /* Get the old counters. */
1264 get_counters(oldinfo, counters);
1265 /* Decrease module usage counts and free resource */
1266 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1267 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1268 NULL);
1269 xt_free_table_info(oldinfo);
1270 if (copy_to_user(counters_ptr, counters,
1271 sizeof(struct xt_counters) * num_counters) != 0)
1272 ret = -EFAULT;
1273 vfree(counters);
1274 xt_table_unlock(t);
1275 return ret;
1276
1277 put_module:
1278 module_put(t->me);
1279 xt_table_unlock(t);
1280 free_newinfo_counters_untrans:
1281 vfree(counters);
1282 out:
1283 return ret;
1284 }
1285
1286 static int
1287 do_replace(struct net *net, void __user *user, unsigned int len)
1288 {
1289 int ret;
1290 struct ip6t_replace tmp;
1291 struct xt_table_info *newinfo;
1292 void *loc_cpu_entry;
1293
1294 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1295 return -EFAULT;
1296
1297 /* overflow check */
1298 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1299 return -ENOMEM;
1300
1301 newinfo = xt_alloc_table_info(tmp.size);
1302 if (!newinfo)
1303 return -ENOMEM;
1304
1305 /* choose the copy that is on our node/cpu */
1306 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1307 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1308 tmp.size) != 0) {
1309 ret = -EFAULT;
1310 goto free_newinfo;
1311 }
1312
1313 ret = translate_table(tmp.name, tmp.valid_hooks,
1314 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1315 tmp.hook_entry, tmp.underflow);
1316 if (ret != 0)
1317 goto free_newinfo;
1318
1319 duprintf("ip_tables: Translated table\n");
1320
1321 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1322 tmp.num_counters, tmp.counters);
1323 if (ret)
1324 goto free_newinfo_untrans;
1325 return 0;
1326
1327 free_newinfo_untrans:
1328 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1329 free_newinfo:
1330 xt_free_table_info(newinfo);
1331 return ret;
1332 }
1333
1334 /* We're lazy, and add to the first CPU; overflow works its fey magic
1335 * and everything is OK. */
1336 static inline int
1337 add_counter_to_entry(struct ip6t_entry *e,
1338 const struct xt_counters addme[],
1339 unsigned int *i)
1340 {
1341 #if 0
1342 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1343 *i,
1344 (long unsigned int)e->counters.pcnt,
1345 (long unsigned int)e->counters.bcnt,
1346 (long unsigned int)addme[*i].pcnt,
1347 (long unsigned int)addme[*i].bcnt);
1348 #endif
1349
1350 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1351
1352 (*i)++;
1353 return 0;
1354 }
1355
1356 static int
1357 do_add_counters(struct net *net, void __user *user, unsigned int len,
1358 int compat)
1359 {
1360 unsigned int i;
1361 struct xt_counters_info tmp;
1362 struct xt_counters *paddc;
1363 unsigned int num_counters;
1364 char *name;
1365 int size;
1366 void *ptmp;
1367 struct xt_table *t;
1368 const struct xt_table_info *private;
1369 int ret = 0;
1370 const void *loc_cpu_entry;
1371 #ifdef CONFIG_COMPAT
1372 struct compat_xt_counters_info compat_tmp;
1373
1374 if (compat) {
1375 ptmp = &compat_tmp;
1376 size = sizeof(struct compat_xt_counters_info);
1377 } else
1378 #endif
1379 {
1380 ptmp = &tmp;
1381 size = sizeof(struct xt_counters_info);
1382 }
1383
1384 if (copy_from_user(ptmp, user, size) != 0)
1385 return -EFAULT;
1386
1387 #ifdef CONFIG_COMPAT
1388 if (compat) {
1389 num_counters = compat_tmp.num_counters;
1390 name = compat_tmp.name;
1391 } else
1392 #endif
1393 {
1394 num_counters = tmp.num_counters;
1395 name = tmp.name;
1396 }
1397
1398 if (len != size + num_counters * sizeof(struct xt_counters))
1399 return -EINVAL;
1400
1401 paddc = vmalloc_node(len - size, numa_node_id());
1402 if (!paddc)
1403 return -ENOMEM;
1404
1405 if (copy_from_user(paddc, user + size, len - size) != 0) {
1406 ret = -EFAULT;
1407 goto free;
1408 }
1409
1410 t = xt_find_table_lock(net, AF_INET6, name);
1411 if (!t || IS_ERR(t)) {
1412 ret = t ? PTR_ERR(t) : -ENOENT;
1413 goto free;
1414 }
1415
1416 write_lock_bh(&t->lock);
1417 private = t->private;
1418 if (private->number != num_counters) {
1419 ret = -EINVAL;
1420 goto unlock_up_free;
1421 }
1422
1423 i = 0;
1424 /* Choose the copy that is on our node */
1425 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1426 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1427 private->size,
1428 add_counter_to_entry,
1429 paddc,
1430 &i);
1431 unlock_up_free:
1432 write_unlock_bh(&t->lock);
1433 xt_table_unlock(t);
1434 module_put(t->me);
1435 free:
1436 vfree(paddc);
1437
1438 return ret;
1439 }
1440
1441 #ifdef CONFIG_COMPAT
1442 struct compat_ip6t_replace {
1443 char name[IP6T_TABLE_MAXNAMELEN];
1444 u32 valid_hooks;
1445 u32 num_entries;
1446 u32 size;
1447 u32 hook_entry[NF_INET_NUMHOOKS];
1448 u32 underflow[NF_INET_NUMHOOKS];
1449 u32 num_counters;
1450 compat_uptr_t counters; /* struct ip6t_counters * */
1451 struct compat_ip6t_entry entries[0];
1452 };
1453
1454 static int
1455 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1456 unsigned int *size, struct xt_counters *counters,
1457 unsigned int *i)
1458 {
1459 struct ip6t_entry_target *t;
1460 struct compat_ip6t_entry __user *ce;
1461 u_int16_t target_offset, next_offset;
1462 compat_uint_t origsize;
1463 int ret;
1464
1465 ret = -EFAULT;
1466 origsize = *size;
1467 ce = (struct compat_ip6t_entry __user *)*dstptr;
1468 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1469 goto out;
1470
1471 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1472 goto out;
1473
1474 *dstptr += sizeof(struct compat_ip6t_entry);
1475 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1476
1477 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1478 target_offset = e->target_offset - (origsize - *size);
1479 if (ret)
1480 goto out;
1481 t = ip6t_get_target(e);
1482 ret = xt_compat_target_to_user(t, dstptr, size);
1483 if (ret)
1484 goto out;
1485 ret = -EFAULT;
1486 next_offset = e->next_offset - (origsize - *size);
1487 if (put_user(target_offset, &ce->target_offset))
1488 goto out;
1489 if (put_user(next_offset, &ce->next_offset))
1490 goto out;
1491
1492 (*i)++;
1493 return 0;
1494 out:
1495 return ret;
1496 }
1497
1498 static int
1499 compat_find_calc_match(struct ip6t_entry_match *m,
1500 const char *name,
1501 const struct ip6t_ip6 *ipv6,
1502 unsigned int hookmask,
1503 int *size, unsigned int *i)
1504 {
1505 struct xt_match *match;
1506
1507 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1508 m->u.user.revision),
1509 "ip6t_%s", m->u.user.name);
1510 if (IS_ERR(match) || !match) {
1511 duprintf("compat_check_calc_match: `%s' not found\n",
1512 m->u.user.name);
1513 return match ? PTR_ERR(match) : -ENOENT;
1514 }
1515 m->u.kernel.match = match;
1516 *size += xt_compat_match_offset(match);
1517
1518 (*i)++;
1519 return 0;
1520 }
1521
1522 static int
1523 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1524 {
1525 if (i && (*i)-- == 0)
1526 return 1;
1527
1528 module_put(m->u.kernel.match->me);
1529 return 0;
1530 }
1531
1532 static int
1533 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1534 {
1535 struct ip6t_entry_target *t;
1536
1537 if (i && (*i)-- == 0)
1538 return 1;
1539
1540 /* Cleanup all matches */
1541 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1542 t = compat_ip6t_get_target(e);
1543 module_put(t->u.kernel.target->me);
1544 return 0;
1545 }
1546
1547 static int
1548 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1549 struct xt_table_info *newinfo,
1550 unsigned int *size,
1551 unsigned char *base,
1552 unsigned char *limit,
1553 unsigned int *hook_entries,
1554 unsigned int *underflows,
1555 unsigned int *i,
1556 const char *name)
1557 {
1558 struct ip6t_entry_target *t;
1559 struct xt_target *target;
1560 unsigned int entry_offset;
1561 unsigned int j;
1562 int ret, off, h;
1563
1564 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1565 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1566 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1567 duprintf("Bad offset %p, limit = %p\n", e, limit);
1568 return -EINVAL;
1569 }
1570
1571 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1572 sizeof(struct compat_xt_entry_target)) {
1573 duprintf("checking: element %p size %u\n",
1574 e, e->next_offset);
1575 return -EINVAL;
1576 }
1577
1578 /* For purposes of check_entry casting the compat entry is fine */
1579 ret = check_entry((struct ip6t_entry *)e, name);
1580 if (ret)
1581 return ret;
1582
1583 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1584 entry_offset = (void *)e - (void *)base;
1585 j = 0;
1586 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1587 &e->ipv6, e->comefrom, &off, &j);
1588 if (ret != 0)
1589 goto release_matches;
1590
1591 t = compat_ip6t_get_target(e);
1592 target = try_then_request_module(xt_find_target(AF_INET6,
1593 t->u.user.name,
1594 t->u.user.revision),
1595 "ip6t_%s", t->u.user.name);
1596 if (IS_ERR(target) || !target) {
1597 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1598 t->u.user.name);
1599 ret = target ? PTR_ERR(target) : -ENOENT;
1600 goto release_matches;
1601 }
1602 t->u.kernel.target = target;
1603
1604 off += xt_compat_target_offset(target);
1605 *size += off;
1606 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1607 if (ret)
1608 goto out;
1609
1610 /* Check hooks & underflows */
1611 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1612 if ((unsigned char *)e - base == hook_entries[h])
1613 newinfo->hook_entry[h] = hook_entries[h];
1614 if ((unsigned char *)e - base == underflows[h])
1615 newinfo->underflow[h] = underflows[h];
1616 }
1617
1618 /* Clear counters and comefrom */
1619 memset(&e->counters, 0, sizeof(e->counters));
1620 e->comefrom = 0;
1621
1622 (*i)++;
1623 return 0;
1624
1625 out:
1626 module_put(t->u.kernel.target->me);
1627 release_matches:
1628 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1629 return ret;
1630 }
1631
1632 static int
1633 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1634 unsigned int *size, const char *name,
1635 struct xt_table_info *newinfo, unsigned char *base)
1636 {
1637 struct ip6t_entry_target *t;
1638 struct xt_target *target;
1639 struct ip6t_entry *de;
1640 unsigned int origsize;
1641 int ret, h;
1642
1643 ret = 0;
1644 origsize = *size;
1645 de = (struct ip6t_entry *)*dstptr;
1646 memcpy(de, e, sizeof(struct ip6t_entry));
1647 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1648
1649 *dstptr += sizeof(struct ip6t_entry);
1650 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1651
1652 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1653 dstptr, size);
1654 if (ret)
1655 return ret;
1656 de->target_offset = e->target_offset - (origsize - *size);
1657 t = compat_ip6t_get_target(e);
1658 target = t->u.kernel.target;
1659 xt_compat_target_from_user(t, dstptr, size);
1660
1661 de->next_offset = e->next_offset - (origsize - *size);
1662 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1663 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1664 newinfo->hook_entry[h] -= origsize - *size;
1665 if ((unsigned char *)de - base < newinfo->underflow[h])
1666 newinfo->underflow[h] -= origsize - *size;
1667 }
1668 return ret;
1669 }
1670
1671 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1672 unsigned int *i)
1673 {
1674 unsigned int j;
1675 int ret;
1676 struct xt_mtchk_param mtpar;
1677
1678 j = 0;
1679 mtpar.table = name;
1680 mtpar.entryinfo = &e->ipv6;
1681 mtpar.hook_mask = e->comefrom;
1682 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1683 if (ret)
1684 goto cleanup_matches;
1685
1686 ret = check_target(e, name);
1687 if (ret)
1688 goto cleanup_matches;
1689
1690 (*i)++;
1691 return 0;
1692
1693 cleanup_matches:
1694 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1695 return ret;
1696 }
1697
1698 static int
1699 translate_compat_table(const char *name,
1700 unsigned int valid_hooks,
1701 struct xt_table_info **pinfo,
1702 void **pentry0,
1703 unsigned int total_size,
1704 unsigned int number,
1705 unsigned int *hook_entries,
1706 unsigned int *underflows)
1707 {
1708 unsigned int i, j;
1709 struct xt_table_info *newinfo, *info;
1710 void *pos, *entry0, *entry1;
1711 unsigned int size;
1712 int ret;
1713
1714 info = *pinfo;
1715 entry0 = *pentry0;
1716 size = total_size;
1717 info->number = number;
1718
1719 /* Init all hooks to impossible value. */
1720 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1721 info->hook_entry[i] = 0xFFFFFFFF;
1722 info->underflow[i] = 0xFFFFFFFF;
1723 }
1724
1725 duprintf("translate_compat_table: size %u\n", info->size);
1726 j = 0;
1727 xt_compat_lock(AF_INET6);
1728 /* Walk through entries, checking offsets. */
1729 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1730 check_compat_entry_size_and_hooks,
1731 info, &size, entry0,
1732 entry0 + total_size,
1733 hook_entries, underflows, &j, name);
1734 if (ret != 0)
1735 goto out_unlock;
1736
1737 ret = -EINVAL;
1738 if (j != number) {
1739 duprintf("translate_compat_table: %u not %u entries\n",
1740 j, number);
1741 goto out_unlock;
1742 }
1743
1744 /* Check hooks all assigned */
1745 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1746 /* Only hooks which are valid */
1747 if (!(valid_hooks & (1 << i)))
1748 continue;
1749 if (info->hook_entry[i] == 0xFFFFFFFF) {
1750 duprintf("Invalid hook entry %u %u\n",
1751 i, hook_entries[i]);
1752 goto out_unlock;
1753 }
1754 if (info->underflow[i] == 0xFFFFFFFF) {
1755 duprintf("Invalid underflow %u %u\n",
1756 i, underflows[i]);
1757 goto out_unlock;
1758 }
1759 }
1760
1761 ret = -ENOMEM;
1762 newinfo = xt_alloc_table_info(size);
1763 if (!newinfo)
1764 goto out_unlock;
1765
1766 newinfo->number = number;
1767 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1768 newinfo->hook_entry[i] = info->hook_entry[i];
1769 newinfo->underflow[i] = info->underflow[i];
1770 }
1771 entry1 = newinfo->entries[raw_smp_processor_id()];
1772 pos = entry1;
1773 size = total_size;
1774 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1775 compat_copy_entry_from_user,
1776 &pos, &size, name, newinfo, entry1);
1777 xt_compat_flush_offsets(AF_INET6);
1778 xt_compat_unlock(AF_INET6);
1779 if (ret)
1780 goto free_newinfo;
1781
1782 ret = -ELOOP;
1783 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1784 goto free_newinfo;
1785
1786 i = 0;
1787 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1788 name, &i);
1789 if (ret) {
1790 j -= i;
1791 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1792 compat_release_entry, &j);
1793 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1794 xt_free_table_info(newinfo);
1795 return ret;
1796 }
1797
1798 /* And one copy for every other CPU */
1799 for_each_possible_cpu(i)
1800 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1801 memcpy(newinfo->entries[i], entry1, newinfo->size);
1802
1803 *pinfo = newinfo;
1804 *pentry0 = entry1;
1805 xt_free_table_info(info);
1806 return 0;
1807
1808 free_newinfo:
1809 xt_free_table_info(newinfo);
1810 out:
1811 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1812 return ret;
1813 out_unlock:
1814 xt_compat_flush_offsets(AF_INET6);
1815 xt_compat_unlock(AF_INET6);
1816 goto out;
1817 }
1818
1819 static int
1820 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1821 {
1822 int ret;
1823 struct compat_ip6t_replace tmp;
1824 struct xt_table_info *newinfo;
1825 void *loc_cpu_entry;
1826
1827 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1828 return -EFAULT;
1829
1830 /* overflow check */
1831 if (tmp.size >= INT_MAX / num_possible_cpus())
1832 return -ENOMEM;
1833 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1834 return -ENOMEM;
1835
1836 newinfo = xt_alloc_table_info(tmp.size);
1837 if (!newinfo)
1838 return -ENOMEM;
1839
1840 /* choose the copy that is on our node/cpu */
1841 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1842 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1843 tmp.size) != 0) {
1844 ret = -EFAULT;
1845 goto free_newinfo;
1846 }
1847
1848 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1849 &newinfo, &loc_cpu_entry, tmp.size,
1850 tmp.num_entries, tmp.hook_entry,
1851 tmp.underflow);
1852 if (ret != 0)
1853 goto free_newinfo;
1854
1855 duprintf("compat_do_replace: Translated table\n");
1856
1857 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1858 tmp.num_counters, compat_ptr(tmp.counters));
1859 if (ret)
1860 goto free_newinfo_untrans;
1861 return 0;
1862
1863 free_newinfo_untrans:
1864 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1865 free_newinfo:
1866 xt_free_table_info(newinfo);
1867 return ret;
1868 }
1869
1870 static int
1871 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1872 unsigned int len)
1873 {
1874 int ret;
1875
1876 if (!capable(CAP_NET_ADMIN))
1877 return -EPERM;
1878
1879 switch (cmd) {
1880 case IP6T_SO_SET_REPLACE:
1881 ret = compat_do_replace(sock_net(sk), user, len);
1882 break;
1883
1884 case IP6T_SO_SET_ADD_COUNTERS:
1885 ret = do_add_counters(sock_net(sk), user, len, 1);
1886 break;
1887
1888 default:
1889 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1890 ret = -EINVAL;
1891 }
1892
1893 return ret;
1894 }
1895
1896 struct compat_ip6t_get_entries {
1897 char name[IP6T_TABLE_MAXNAMELEN];
1898 compat_uint_t size;
1899 struct compat_ip6t_entry entrytable[0];
1900 };
1901
1902 static int
1903 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1904 void __user *userptr)
1905 {
1906 struct xt_counters *counters;
1907 const struct xt_table_info *private = table->private;
1908 void __user *pos;
1909 unsigned int size;
1910 int ret = 0;
1911 const void *loc_cpu_entry;
1912 unsigned int i = 0;
1913
1914 counters = alloc_counters(table);
1915 if (IS_ERR(counters))
1916 return PTR_ERR(counters);
1917
1918 /* choose the copy that is on our node/cpu, ...
1919 * This choice is lazy (because current thread is
1920 * allowed to migrate to another cpu)
1921 */
1922 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1923 pos = userptr;
1924 size = total_size;
1925 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1926 compat_copy_entry_to_user,
1927 &pos, &size, counters, &i);
1928
1929 vfree(counters);
1930 return ret;
1931 }
1932
1933 static int
1934 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1935 int *len)
1936 {
1937 int ret;
1938 struct compat_ip6t_get_entries get;
1939 struct xt_table *t;
1940
1941 if (*len < sizeof(get)) {
1942 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1943 return -EINVAL;
1944 }
1945
1946 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1947 return -EFAULT;
1948
1949 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1950 duprintf("compat_get_entries: %u != %zu\n",
1951 *len, sizeof(get) + get.size);
1952 return -EINVAL;
1953 }
1954
1955 xt_compat_lock(AF_INET6);
1956 t = xt_find_table_lock(net, AF_INET6, get.name);
1957 if (t && !IS_ERR(t)) {
1958 const struct xt_table_info *private = t->private;
1959 struct xt_table_info info;
1960 duprintf("t->private->number = %u\n", private->number);
1961 ret = compat_table_info(private, &info);
1962 if (!ret && get.size == info.size) {
1963 ret = compat_copy_entries_to_user(private->size,
1964 t, uptr->entrytable);
1965 } else if (!ret) {
1966 duprintf("compat_get_entries: I've got %u not %u!\n",
1967 private->size, get.size);
1968 ret = -EAGAIN;
1969 }
1970 xt_compat_flush_offsets(AF_INET6);
1971 module_put(t->me);
1972 xt_table_unlock(t);
1973 } else
1974 ret = t ? PTR_ERR(t) : -ENOENT;
1975
1976 xt_compat_unlock(AF_INET6);
1977 return ret;
1978 }
1979
1980 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1981
1982 static int
1983 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1984 {
1985 int ret;
1986
1987 if (!capable(CAP_NET_ADMIN))
1988 return -EPERM;
1989
1990 switch (cmd) {
1991 case IP6T_SO_GET_INFO:
1992 ret = get_info(sock_net(sk), user, len, 1);
1993 break;
1994 case IP6T_SO_GET_ENTRIES:
1995 ret = compat_get_entries(sock_net(sk), user, len);
1996 break;
1997 default:
1998 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1999 }
2000 return ret;
2001 }
2002 #endif
2003
2004 static int
2005 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2006 {
2007 int ret;
2008
2009 if (!capable(CAP_NET_ADMIN))
2010 return -EPERM;
2011
2012 switch (cmd) {
2013 case IP6T_SO_SET_REPLACE:
2014 ret = do_replace(sock_net(sk), user, len);
2015 break;
2016
2017 case IP6T_SO_SET_ADD_COUNTERS:
2018 ret = do_add_counters(sock_net(sk), user, len, 0);
2019 break;
2020
2021 default:
2022 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2023 ret = -EINVAL;
2024 }
2025
2026 return ret;
2027 }
2028
2029 static int
2030 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2031 {
2032 int ret;
2033
2034 if (!capable(CAP_NET_ADMIN))
2035 return -EPERM;
2036
2037 switch (cmd) {
2038 case IP6T_SO_GET_INFO:
2039 ret = get_info(sock_net(sk), user, len, 0);
2040 break;
2041
2042 case IP6T_SO_GET_ENTRIES:
2043 ret = get_entries(sock_net(sk), user, len);
2044 break;
2045
2046 case IP6T_SO_GET_REVISION_MATCH:
2047 case IP6T_SO_GET_REVISION_TARGET: {
2048 struct ip6t_get_revision rev;
2049 int target;
2050
2051 if (*len != sizeof(rev)) {
2052 ret = -EINVAL;
2053 break;
2054 }
2055 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2056 ret = -EFAULT;
2057 break;
2058 }
2059
2060 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2061 target = 1;
2062 else
2063 target = 0;
2064
2065 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2066 rev.revision,
2067 target, &ret),
2068 "ip6t_%s", rev.name);
2069 break;
2070 }
2071
2072 default:
2073 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2074 ret = -EINVAL;
2075 }
2076
2077 return ret;
2078 }
2079
2080 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2081 const struct ip6t_replace *repl)
2082 {
2083 int ret;
2084 struct xt_table_info *newinfo;
2085 struct xt_table_info bootstrap
2086 = { 0, 0, 0, { 0 }, { 0 }, { } };
2087 void *loc_cpu_entry;
2088 struct xt_table *new_table;
2089
2090 newinfo = xt_alloc_table_info(repl->size);
2091 if (!newinfo) {
2092 ret = -ENOMEM;
2093 goto out;
2094 }
2095
2096 /* choose the copy on our node/cpu, but dont care about preemption */
2097 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2098 memcpy(loc_cpu_entry, repl->entries, repl->size);
2099
2100 ret = translate_table(table->name, table->valid_hooks,
2101 newinfo, loc_cpu_entry, repl->size,
2102 repl->num_entries,
2103 repl->hook_entry,
2104 repl->underflow);
2105 if (ret != 0)
2106 goto out_free;
2107
2108 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2109 if (IS_ERR(new_table)) {
2110 ret = PTR_ERR(new_table);
2111 goto out_free;
2112 }
2113 return new_table;
2114
2115 out_free:
2116 xt_free_table_info(newinfo);
2117 out:
2118 return ERR_PTR(ret);
2119 }
2120
2121 void ip6t_unregister_table(struct xt_table *table)
2122 {
2123 struct xt_table_info *private;
2124 void *loc_cpu_entry;
2125 struct module *table_owner = table->me;
2126
2127 private = xt_unregister_table(table);
2128
2129 /* Decrease module usage counts and free resources */
2130 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2131 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2132 if (private->number > private->initial_entries)
2133 module_put(table_owner);
2134 xt_free_table_info(private);
2135 }
2136
2137 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2138 static inline bool
2139 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2140 u_int8_t type, u_int8_t code,
2141 bool invert)
2142 {
2143 return (type == test_type && code >= min_code && code <= max_code)
2144 ^ invert;
2145 }
2146
2147 static bool
2148 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2149 {
2150 const struct icmp6hdr *ic;
2151 struct icmp6hdr _icmph;
2152 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2153
2154 /* Must not be a fragment. */
2155 if (par->fragoff != 0)
2156 return false;
2157
2158 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2159 if (ic == NULL) {
2160 /* We've been asked to examine this packet, and we
2161 * can't. Hence, no choice but to drop.
2162 */
2163 duprintf("Dropping evil ICMP tinygram.\n");
2164 *par->hotdrop = true;
2165 return false;
2166 }
2167
2168 return icmp6_type_code_match(icmpinfo->type,
2169 icmpinfo->code[0],
2170 icmpinfo->code[1],
2171 ic->icmp6_type, ic->icmp6_code,
2172 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2173 }
2174
2175 /* Called when user tries to insert an entry of this type. */
2176 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2177 {
2178 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2179
2180 /* Must specify no unknown invflags */
2181 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2182 }
2183
2184 /* The built-in targets: standard (NULL) and error. */
2185 static struct xt_target ip6t_standard_target __read_mostly = {
2186 .name = IP6T_STANDARD_TARGET,
2187 .targetsize = sizeof(int),
2188 .family = AF_INET6,
2189 #ifdef CONFIG_COMPAT
2190 .compatsize = sizeof(compat_int_t),
2191 .compat_from_user = compat_standard_from_user,
2192 .compat_to_user = compat_standard_to_user,
2193 #endif
2194 };
2195
2196 static struct xt_target ip6t_error_target __read_mostly = {
2197 .name = IP6T_ERROR_TARGET,
2198 .target = ip6t_error,
2199 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2200 .family = AF_INET6,
2201 };
2202
2203 static struct nf_sockopt_ops ip6t_sockopts = {
2204 .pf = PF_INET6,
2205 .set_optmin = IP6T_BASE_CTL,
2206 .set_optmax = IP6T_SO_SET_MAX+1,
2207 .set = do_ip6t_set_ctl,
2208 #ifdef CONFIG_COMPAT
2209 .compat_set = compat_do_ip6t_set_ctl,
2210 #endif
2211 .get_optmin = IP6T_BASE_CTL,
2212 .get_optmax = IP6T_SO_GET_MAX+1,
2213 .get = do_ip6t_get_ctl,
2214 #ifdef CONFIG_COMPAT
2215 .compat_get = compat_do_ip6t_get_ctl,
2216 #endif
2217 .owner = THIS_MODULE,
2218 };
2219
2220 static struct xt_match icmp6_matchstruct __read_mostly = {
2221 .name = "icmp6",
2222 .match = icmp6_match,
2223 .matchsize = sizeof(struct ip6t_icmp),
2224 .checkentry = icmp6_checkentry,
2225 .proto = IPPROTO_ICMPV6,
2226 .family = AF_INET6,
2227 };
2228
2229 static int __net_init ip6_tables_net_init(struct net *net)
2230 {
2231 return xt_proto_init(net, AF_INET6);
2232 }
2233
2234 static void __net_exit ip6_tables_net_exit(struct net *net)
2235 {
2236 xt_proto_fini(net, AF_INET6);
2237 }
2238
2239 static struct pernet_operations ip6_tables_net_ops = {
2240 .init = ip6_tables_net_init,
2241 .exit = ip6_tables_net_exit,
2242 };
2243
2244 static int __init ip6_tables_init(void)
2245 {
2246 int ret;
2247
2248 ret = register_pernet_subsys(&ip6_tables_net_ops);
2249 if (ret < 0)
2250 goto err1;
2251
2252 /* Noone else will be downing sem now, so we won't sleep */
2253 ret = xt_register_target(&ip6t_standard_target);
2254 if (ret < 0)
2255 goto err2;
2256 ret = xt_register_target(&ip6t_error_target);
2257 if (ret < 0)
2258 goto err3;
2259 ret = xt_register_match(&icmp6_matchstruct);
2260 if (ret < 0)
2261 goto err4;
2262
2263 /* Register setsockopt */
2264 ret = nf_register_sockopt(&ip6t_sockopts);
2265 if (ret < 0)
2266 goto err5;
2267
2268 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2269 return 0;
2270
2271 err5:
2272 xt_unregister_match(&icmp6_matchstruct);
2273 err4:
2274 xt_unregister_target(&ip6t_error_target);
2275 err3:
2276 xt_unregister_target(&ip6t_standard_target);
2277 err2:
2278 unregister_pernet_subsys(&ip6_tables_net_ops);
2279 err1:
2280 return ret;
2281 }
2282
2283 static void __exit ip6_tables_fini(void)
2284 {
2285 nf_unregister_sockopt(&ip6t_sockopts);
2286
2287 xt_unregister_match(&icmp6_matchstruct);
2288 xt_unregister_target(&ip6t_error_target);
2289 xt_unregister_target(&ip6t_standard_target);
2290
2291 unregister_pernet_subsys(&ip6_tables_net_ops);
2292 }
2293
2294 /*
2295 * find the offset to specified header or the protocol number of last header
2296 * if target < 0. "last header" is transport protocol header, ESP, or
2297 * "No next header".
2298 *
2299 * If target header is found, its offset is set in *offset and return protocol
2300 * number. Otherwise, return -1.
2301 *
2302 * If the first fragment doesn't contain the final protocol header or
2303 * NEXTHDR_NONE it is considered invalid.
2304 *
2305 * Note that non-1st fragment is special case that "the protocol number
2306 * of last header" is "next header" field in Fragment header. In this case,
2307 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2308 * isn't NULL.
2309 *
2310 */
2311 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2312 int target, unsigned short *fragoff)
2313 {
2314 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2315 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2316 unsigned int len = skb->len - start;
2317
2318 if (fragoff)
2319 *fragoff = 0;
2320
2321 while (nexthdr != target) {
2322 struct ipv6_opt_hdr _hdr, *hp;
2323 unsigned int hdrlen;
2324
2325 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2326 if (target < 0)
2327 break;
2328 return -ENOENT;
2329 }
2330
2331 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2332 if (hp == NULL)
2333 return -EBADMSG;
2334 if (nexthdr == NEXTHDR_FRAGMENT) {
2335 unsigned short _frag_off;
2336 __be16 *fp;
2337 fp = skb_header_pointer(skb,
2338 start+offsetof(struct frag_hdr,
2339 frag_off),
2340 sizeof(_frag_off),
2341 &_frag_off);
2342 if (fp == NULL)
2343 return -EBADMSG;
2344
2345 _frag_off = ntohs(*fp) & ~0x7;
2346 if (_frag_off) {
2347 if (target < 0 &&
2348 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2349 hp->nexthdr == NEXTHDR_NONE)) {
2350 if (fragoff)
2351 *fragoff = _frag_off;
2352 return hp->nexthdr;
2353 }
2354 return -ENOENT;
2355 }
2356 hdrlen = 8;
2357 } else if (nexthdr == NEXTHDR_AUTH)
2358 hdrlen = (hp->hdrlen + 2) << 2;
2359 else
2360 hdrlen = ipv6_optlen(hp);
2361
2362 nexthdr = hp->nexthdr;
2363 len -= hdrlen;
2364 start += hdrlen;
2365 }
2366
2367 *offset = start;
2368 return nexthdr;
2369 }
2370
2371 EXPORT_SYMBOL(ip6t_register_table);
2372 EXPORT_SYMBOL(ip6t_unregister_table);
2373 EXPORT_SYMBOL(ip6t_do_table);
2374 EXPORT_SYMBOL(ip6t_ext_hdr);
2375 EXPORT_SYMBOL(ipv6_find_hdr);
2376
2377 module_init(ip6_tables_init);
2378 module_exit(ip6_tables_fini);