]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
[NETFILTER]: Introduce NF_INET_ hook values
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <asm/uaccess.h>
23 #include <linux/mutex.h>
24 #include <linux/proc_fs.h>
25 #include <linux/cpumask.h>
26
27 #include <linux/netfilter_ipv6/ip6_tables.h>
28 #include <linux/netfilter/x_tables.h>
29
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv6 packet filter");
33
34 #define IPV6_HDR_LEN (sizeof(struct ipv6hdr))
35 #define IPV6_OPTHDR_LEN (sizeof(struct ipv6_opt_hdr))
36
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
40
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
46
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
52
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __FUNCTION__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 /*
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
76
77 Hence the start of any table is given by get_table() below. */
78
79 #if 0
80 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
81 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
82 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
83 #endif
84
85 /* Check for an extension */
86 int
87 ip6t_ext_hdr(u8 nexthdr)
88 {
89 return ( (nexthdr == IPPROTO_HOPOPTS) ||
90 (nexthdr == IPPROTO_ROUTING) ||
91 (nexthdr == IPPROTO_FRAGMENT) ||
92 (nexthdr == IPPROTO_ESP) ||
93 (nexthdr == IPPROTO_AH) ||
94 (nexthdr == IPPROTO_NONE) ||
95 (nexthdr == IPPROTO_DSTOPTS) );
96 }
97
98 /* Returns whether matches rule or not. */
99 static inline bool
100 ip6_packet_match(const struct sk_buff *skb,
101 const char *indev,
102 const char *outdev,
103 const struct ip6t_ip6 *ip6info,
104 unsigned int *protoff,
105 int *fragoff, bool *hotdrop)
106 {
107 size_t i;
108 unsigned long ret;
109 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
110
111 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
112
113 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
114 &ip6info->src), IP6T_INV_SRCIP)
115 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
116 &ip6info->dst), IP6T_INV_DSTIP)) {
117 dprintf("Source or dest mismatch.\n");
118 /*
119 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
120 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
121 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
122 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
123 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
124 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
125 return false;
126 }
127
128 /* Look for ifname matches; this should unroll nicely. */
129 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
130 ret |= (((const unsigned long *)indev)[i]
131 ^ ((const unsigned long *)ip6info->iniface)[i])
132 & ((const unsigned long *)ip6info->iniface_mask)[i];
133 }
134
135 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
136 dprintf("VIA in mismatch (%s vs %s).%s\n",
137 indev, ip6info->iniface,
138 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
139 return false;
140 }
141
142 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
143 ret |= (((const unsigned long *)outdev)[i]
144 ^ ((const unsigned long *)ip6info->outiface)[i])
145 & ((const unsigned long *)ip6info->outiface_mask)[i];
146 }
147
148 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
149 dprintf("VIA out mismatch (%s vs %s).%s\n",
150 outdev, ip6info->outiface,
151 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
152 return false;
153 }
154
155 /* ... might want to do something with class and flowlabel here ... */
156
157 /* look for the desired protocol header */
158 if((ip6info->flags & IP6T_F_PROTO)) {
159 int protohdr;
160 unsigned short _frag_off;
161
162 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
163 if (protohdr < 0) {
164 if (_frag_off == 0)
165 *hotdrop = true;
166 return false;
167 }
168 *fragoff = _frag_off;
169
170 dprintf("Packet protocol %hi ?= %s%hi.\n",
171 protohdr,
172 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
173 ip6info->proto);
174
175 if (ip6info->proto == protohdr) {
176 if(ip6info->invflags & IP6T_INV_PROTO) {
177 return false;
178 }
179 return true;
180 }
181
182 /* We need match for the '-p all', too! */
183 if ((ip6info->proto != 0) &&
184 !(ip6info->invflags & IP6T_INV_PROTO))
185 return false;
186 }
187 return true;
188 }
189
190 /* should be ip6 safe */
191 static inline bool
192 ip6_checkentry(const struct ip6t_ip6 *ipv6)
193 {
194 if (ipv6->flags & ~IP6T_F_MASK) {
195 duprintf("Unknown flag bits set: %08X\n",
196 ipv6->flags & ~IP6T_F_MASK);
197 return false;
198 }
199 if (ipv6->invflags & ~IP6T_INV_MASK) {
200 duprintf("Unknown invflag bits set: %08X\n",
201 ipv6->invflags & ~IP6T_INV_MASK);
202 return false;
203 }
204 return true;
205 }
206
207 static unsigned int
208 ip6t_error(struct sk_buff *skb,
209 const struct net_device *in,
210 const struct net_device *out,
211 unsigned int hooknum,
212 const struct xt_target *target,
213 const void *targinfo)
214 {
215 if (net_ratelimit())
216 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
217
218 return NF_DROP;
219 }
220
221 static inline
222 bool do_match(struct ip6t_entry_match *m,
223 const struct sk_buff *skb,
224 const struct net_device *in,
225 const struct net_device *out,
226 int offset,
227 unsigned int protoff,
228 bool *hotdrop)
229 {
230 /* Stop iteration if it doesn't match */
231 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
232 offset, protoff, hotdrop))
233 return true;
234 else
235 return false;
236 }
237
238 static inline struct ip6t_entry *
239 get_entry(void *base, unsigned int offset)
240 {
241 return (struct ip6t_entry *)(base + offset);
242 }
243
244 /* All zeroes == unconditional rule. */
245 static inline int
246 unconditional(const struct ip6t_ip6 *ipv6)
247 {
248 unsigned int i;
249
250 for (i = 0; i < sizeof(*ipv6); i++)
251 if (((char *)ipv6)[i])
252 break;
253
254 return (i == sizeof(*ipv6));
255 }
256
257 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
258 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
259 /* This cries for unification! */
260 static const char *hooknames[] = {
261 [NF_INET_PRE_ROUTING] = "PREROUTING",
262 [NF_INET_LOCAL_IN] = "INPUT",
263 [NF_INET_FORWARD] = "FORWARD",
264 [NF_INET_LOCAL_OUT] = "OUTPUT",
265 [NF_INET_POST_ROUTING] = "POSTROUTING",
266 };
267
268 enum nf_ip_trace_comments {
269 NF_IP6_TRACE_COMMENT_RULE,
270 NF_IP6_TRACE_COMMENT_RETURN,
271 NF_IP6_TRACE_COMMENT_POLICY,
272 };
273
274 static const char *comments[] = {
275 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
276 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
277 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
278 };
279
280 static struct nf_loginfo trace_loginfo = {
281 .type = NF_LOG_TYPE_LOG,
282 .u = {
283 .log = {
284 .level = 4,
285 .logflags = NF_LOG_MASK,
286 },
287 },
288 };
289
290 static inline int
291 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
292 char *hookname, char **chainname,
293 char **comment, unsigned int *rulenum)
294 {
295 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
296
297 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
298 /* Head of user chain: ERROR target with chainname */
299 *chainname = t->target.data;
300 (*rulenum) = 0;
301 } else if (s == e) {
302 (*rulenum)++;
303
304 if (s->target_offset == sizeof(struct ip6t_entry)
305 && strcmp(t->target.u.kernel.target->name,
306 IP6T_STANDARD_TARGET) == 0
307 && t->verdict < 0
308 && unconditional(&s->ipv6)) {
309 /* Tail of chains: STANDARD target (return/policy) */
310 *comment = *chainname == hookname
311 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
312 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
313 }
314 return 1;
315 } else
316 (*rulenum)++;
317
318 return 0;
319 }
320
321 static void trace_packet(struct sk_buff *skb,
322 unsigned int hook,
323 const struct net_device *in,
324 const struct net_device *out,
325 char *tablename,
326 struct xt_table_info *private,
327 struct ip6t_entry *e)
328 {
329 void *table_base;
330 struct ip6t_entry *root;
331 char *hookname, *chainname, *comment;
332 unsigned int rulenum = 0;
333
334 table_base = (void *)private->entries[smp_processor_id()];
335 root = get_entry(table_base, private->hook_entry[hook]);
336
337 hookname = chainname = (char *)hooknames[hook];
338 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
339
340 IP6T_ENTRY_ITERATE(root,
341 private->size - private->hook_entry[hook],
342 get_chainname_rulenum,
343 e, hookname, &chainname, &comment, &rulenum);
344
345 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
346 "TRACE: %s:%s:%s:%u ",
347 tablename, chainname, comment, rulenum);
348 }
349 #endif
350
351 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
352 unsigned int
353 ip6t_do_table(struct sk_buff *skb,
354 unsigned int hook,
355 const struct net_device *in,
356 const struct net_device *out,
357 struct xt_table *table)
358 {
359 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
360 int offset = 0;
361 unsigned int protoff = 0;
362 bool hotdrop = false;
363 /* Initializing verdict to NF_DROP keeps gcc happy. */
364 unsigned int verdict = NF_DROP;
365 const char *indev, *outdev;
366 void *table_base;
367 struct ip6t_entry *e, *back;
368 struct xt_table_info *private;
369
370 /* Initialization */
371 indev = in ? in->name : nulldevname;
372 outdev = out ? out->name : nulldevname;
373 /* We handle fragments by dealing with the first fragment as
374 * if it was a normal packet. All other fragments are treated
375 * normally, except that they will NEVER match rules that ask
376 * things we don't know, ie. tcp syn flag or ports). If the
377 * rule is also a fragment-specific rule, non-fragments won't
378 * match it. */
379
380 read_lock_bh(&table->lock);
381 private = table->private;
382 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
383 table_base = (void *)private->entries[smp_processor_id()];
384 e = get_entry(table_base, private->hook_entry[hook]);
385
386 /* For return from builtin chain */
387 back = get_entry(table_base, private->underflow[hook]);
388
389 do {
390 IP_NF_ASSERT(e);
391 IP_NF_ASSERT(back);
392 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
393 &protoff, &offset, &hotdrop)) {
394 struct ip6t_entry_target *t;
395
396 if (IP6T_MATCH_ITERATE(e, do_match,
397 skb, in, out,
398 offset, protoff, &hotdrop) != 0)
399 goto no_match;
400
401 ADD_COUNTER(e->counters,
402 ntohs(ipv6_hdr(skb)->payload_len)
403 + IPV6_HDR_LEN,
404 1);
405
406 t = ip6t_get_target(e);
407 IP_NF_ASSERT(t->u.kernel.target);
408
409 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
410 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
411 /* The packet is traced: log it */
412 if (unlikely(skb->nf_trace))
413 trace_packet(skb, hook, in, out,
414 table->name, private, e);
415 #endif
416 /* Standard target? */
417 if (!t->u.kernel.target->target) {
418 int v;
419
420 v = ((struct ip6t_standard_target *)t)->verdict;
421 if (v < 0) {
422 /* Pop from stack? */
423 if (v != IP6T_RETURN) {
424 verdict = (unsigned)(-v) - 1;
425 break;
426 }
427 e = back;
428 back = get_entry(table_base,
429 back->comefrom);
430 continue;
431 }
432 if (table_base + v != (void *)e + e->next_offset
433 && !(e->ipv6.flags & IP6T_F_GOTO)) {
434 /* Save old back ptr in next entry */
435 struct ip6t_entry *next
436 = (void *)e + e->next_offset;
437 next->comefrom
438 = (void *)back - table_base;
439 /* set back pointer to next entry */
440 back = next;
441 }
442
443 e = get_entry(table_base, v);
444 } else {
445 /* Targets which reenter must return
446 abs. verdicts */
447 #ifdef CONFIG_NETFILTER_DEBUG
448 ((struct ip6t_entry *)table_base)->comefrom
449 = 0xeeeeeeec;
450 #endif
451 verdict = t->u.kernel.target->target(skb,
452 in, out,
453 hook,
454 t->u.kernel.target,
455 t->data);
456
457 #ifdef CONFIG_NETFILTER_DEBUG
458 if (((struct ip6t_entry *)table_base)->comefrom
459 != 0xeeeeeeec
460 && verdict == IP6T_CONTINUE) {
461 printk("Target %s reentered!\n",
462 t->u.kernel.target->name);
463 verdict = NF_DROP;
464 }
465 ((struct ip6t_entry *)table_base)->comefrom
466 = 0x57acc001;
467 #endif
468 if (verdict == IP6T_CONTINUE)
469 e = (void *)e + e->next_offset;
470 else
471 /* Verdict */
472 break;
473 }
474 } else {
475
476 no_match:
477 e = (void *)e + e->next_offset;
478 }
479 } while (!hotdrop);
480
481 #ifdef CONFIG_NETFILTER_DEBUG
482 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
483 #endif
484 read_unlock_bh(&table->lock);
485
486 #ifdef DEBUG_ALLOW_ALL
487 return NF_ACCEPT;
488 #else
489 if (hotdrop)
490 return NF_DROP;
491 else return verdict;
492 #endif
493 }
494
495 /* Figures out from what hook each rule can be called: returns 0 if
496 there are loops. Puts hook bitmask in comefrom. */
497 static int
498 mark_source_chains(struct xt_table_info *newinfo,
499 unsigned int valid_hooks, void *entry0)
500 {
501 unsigned int hook;
502
503 /* No recursion; use packet counter to save back ptrs (reset
504 to 0 as we leave), and comefrom to save source hook bitmask */
505 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
506 unsigned int pos = newinfo->hook_entry[hook];
507 struct ip6t_entry *e
508 = (struct ip6t_entry *)(entry0 + pos);
509 int visited = e->comefrom & (1 << hook);
510
511 if (!(valid_hooks & (1 << hook)))
512 continue;
513
514 /* Set initial back pointer. */
515 e->counters.pcnt = pos;
516
517 for (;;) {
518 struct ip6t_standard_target *t
519 = (void *)ip6t_get_target(e);
520
521 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
522 printk("iptables: loop hook %u pos %u %08X.\n",
523 hook, pos, e->comefrom);
524 return 0;
525 }
526 e->comefrom
527 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
528
529 /* Unconditional return/END. */
530 if ((e->target_offset == sizeof(struct ip6t_entry)
531 && (strcmp(t->target.u.user.name,
532 IP6T_STANDARD_TARGET) == 0)
533 && t->verdict < 0
534 && unconditional(&e->ipv6)) || visited) {
535 unsigned int oldpos, size;
536
537 if (t->verdict < -NF_MAX_VERDICT - 1) {
538 duprintf("mark_source_chains: bad "
539 "negative verdict (%i)\n",
540 t->verdict);
541 return 0;
542 }
543
544 /* Return: backtrack through the last
545 big jump. */
546 do {
547 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
548 #ifdef DEBUG_IP_FIREWALL_USER
549 if (e->comefrom
550 & (1 << NF_INET_NUMHOOKS)) {
551 duprintf("Back unset "
552 "on hook %u "
553 "rule %u\n",
554 hook, pos);
555 }
556 #endif
557 oldpos = pos;
558 pos = e->counters.pcnt;
559 e->counters.pcnt = 0;
560
561 /* We're at the start. */
562 if (pos == oldpos)
563 goto next;
564
565 e = (struct ip6t_entry *)
566 (entry0 + pos);
567 } while (oldpos == pos + e->next_offset);
568
569 /* Move along one */
570 size = e->next_offset;
571 e = (struct ip6t_entry *)
572 (entry0 + pos + size);
573 e->counters.pcnt = pos;
574 pos += size;
575 } else {
576 int newpos = t->verdict;
577
578 if (strcmp(t->target.u.user.name,
579 IP6T_STANDARD_TARGET) == 0
580 && newpos >= 0) {
581 if (newpos > newinfo->size -
582 sizeof(struct ip6t_entry)) {
583 duprintf("mark_source_chains: "
584 "bad verdict (%i)\n",
585 newpos);
586 return 0;
587 }
588 /* This a jump; chase it. */
589 duprintf("Jump rule %u -> %u\n",
590 pos, newpos);
591 } else {
592 /* ... this is a fallthru */
593 newpos = pos + e->next_offset;
594 }
595 e = (struct ip6t_entry *)
596 (entry0 + newpos);
597 e->counters.pcnt = pos;
598 pos = newpos;
599 }
600 }
601 next:
602 duprintf("Finished chain %u\n", hook);
603 }
604 return 1;
605 }
606
607 static inline int
608 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
609 {
610 if (i && (*i)-- == 0)
611 return 1;
612
613 if (m->u.kernel.match->destroy)
614 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
615 module_put(m->u.kernel.match->me);
616 return 0;
617 }
618
619 static inline int
620 check_match(struct ip6t_entry_match *m,
621 const char *name,
622 const struct ip6t_ip6 *ipv6,
623 unsigned int hookmask,
624 unsigned int *i)
625 {
626 struct xt_match *match;
627 int ret;
628
629 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
630 m->u.user.revision),
631 "ip6t_%s", m->u.user.name);
632 if (IS_ERR(match) || !match) {
633 duprintf("check_match: `%s' not found\n", m->u.user.name);
634 return match ? PTR_ERR(match) : -ENOENT;
635 }
636 m->u.kernel.match = match;
637
638 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
639 name, hookmask, ipv6->proto,
640 ipv6->invflags & IP6T_INV_PROTO);
641 if (ret)
642 goto err;
643
644 if (m->u.kernel.match->checkentry
645 && !m->u.kernel.match->checkentry(name, ipv6, match, m->data,
646 hookmask)) {
647 duprintf("ip_tables: check failed for `%s'.\n",
648 m->u.kernel.match->name);
649 ret = -EINVAL;
650 goto err;
651 }
652
653 (*i)++;
654 return 0;
655 err:
656 module_put(m->u.kernel.match->me);
657 return ret;
658 }
659
660 static struct xt_target ip6t_standard_target;
661
662 static inline int
663 check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
664 unsigned int *i)
665 {
666 struct ip6t_entry_target *t;
667 struct xt_target *target;
668 int ret;
669 unsigned int j;
670
671 if (!ip6_checkentry(&e->ipv6)) {
672 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
673 return -EINVAL;
674 }
675
676 if (e->target_offset + sizeof(struct ip6t_entry_target) >
677 e->next_offset)
678 return -EINVAL;
679
680 j = 0;
681 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j);
682 if (ret != 0)
683 goto cleanup_matches;
684
685 t = ip6t_get_target(e);
686 ret = -EINVAL;
687 if (e->target_offset + t->u.target_size > e->next_offset)
688 goto cleanup_matches;
689 target = try_then_request_module(xt_find_target(AF_INET6,
690 t->u.user.name,
691 t->u.user.revision),
692 "ip6t_%s", t->u.user.name);
693 if (IS_ERR(target) || !target) {
694 duprintf("check_entry: `%s' not found\n", t->u.user.name);
695 ret = target ? PTR_ERR(target) : -ENOENT;
696 goto cleanup_matches;
697 }
698 t->u.kernel.target = target;
699
700 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
701 name, e->comefrom, e->ipv6.proto,
702 e->ipv6.invflags & IP6T_INV_PROTO);
703 if (ret)
704 goto err;
705
706 if (t->u.kernel.target->checkentry
707 && !t->u.kernel.target->checkentry(name, e, target, t->data,
708 e->comefrom)) {
709 duprintf("ip_tables: check failed for `%s'.\n",
710 t->u.kernel.target->name);
711 ret = -EINVAL;
712 goto err;
713 }
714
715 (*i)++;
716 return 0;
717 err:
718 module_put(t->u.kernel.target->me);
719 cleanup_matches:
720 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
721 return ret;
722 }
723
724 static inline int
725 check_entry_size_and_hooks(struct ip6t_entry *e,
726 struct xt_table_info *newinfo,
727 unsigned char *base,
728 unsigned char *limit,
729 const unsigned int *hook_entries,
730 const unsigned int *underflows,
731 unsigned int *i)
732 {
733 unsigned int h;
734
735 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
736 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
737 duprintf("Bad offset %p\n", e);
738 return -EINVAL;
739 }
740
741 if (e->next_offset
742 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
743 duprintf("checking: element %p size %u\n",
744 e, e->next_offset);
745 return -EINVAL;
746 }
747
748 /* Check hooks & underflows */
749 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
750 if ((unsigned char *)e - base == hook_entries[h])
751 newinfo->hook_entry[h] = hook_entries[h];
752 if ((unsigned char *)e - base == underflows[h])
753 newinfo->underflow[h] = underflows[h];
754 }
755
756 /* FIXME: underflows must be unconditional, standard verdicts
757 < 0 (not IP6T_RETURN). --RR */
758
759 /* Clear counters and comefrom */
760 e->counters = ((struct xt_counters) { 0, 0 });
761 e->comefrom = 0;
762
763 (*i)++;
764 return 0;
765 }
766
767 static inline int
768 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
769 {
770 struct ip6t_entry_target *t;
771
772 if (i && (*i)-- == 0)
773 return 1;
774
775 /* Cleanup all matches */
776 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
777 t = ip6t_get_target(e);
778 if (t->u.kernel.target->destroy)
779 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
780 module_put(t->u.kernel.target->me);
781 return 0;
782 }
783
784 /* Checks and translates the user-supplied table segment (held in
785 newinfo) */
786 static int
787 translate_table(const char *name,
788 unsigned int valid_hooks,
789 struct xt_table_info *newinfo,
790 void *entry0,
791 unsigned int size,
792 unsigned int number,
793 const unsigned int *hook_entries,
794 const unsigned int *underflows)
795 {
796 unsigned int i;
797 int ret;
798
799 newinfo->size = size;
800 newinfo->number = number;
801
802 /* Init all hooks to impossible value. */
803 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
804 newinfo->hook_entry[i] = 0xFFFFFFFF;
805 newinfo->underflow[i] = 0xFFFFFFFF;
806 }
807
808 duprintf("translate_table: size %u\n", newinfo->size);
809 i = 0;
810 /* Walk through entries, checking offsets. */
811 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
812 check_entry_size_and_hooks,
813 newinfo,
814 entry0,
815 entry0 + size,
816 hook_entries, underflows, &i);
817 if (ret != 0)
818 return ret;
819
820 if (i != number) {
821 duprintf("translate_table: %u not %u entries\n",
822 i, number);
823 return -EINVAL;
824 }
825
826 /* Check hooks all assigned */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 /* Only hooks which are valid */
829 if (!(valid_hooks & (1 << i)))
830 continue;
831 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
832 duprintf("Invalid hook entry %u %u\n",
833 i, hook_entries[i]);
834 return -EINVAL;
835 }
836 if (newinfo->underflow[i] == 0xFFFFFFFF) {
837 duprintf("Invalid underflow %u %u\n",
838 i, underflows[i]);
839 return -EINVAL;
840 }
841 }
842
843 if (!mark_source_chains(newinfo, valid_hooks, entry0))
844 return -ELOOP;
845
846 /* Finally, each sanity check must pass */
847 i = 0;
848 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
849 check_entry, name, size, &i);
850
851 if (ret != 0) {
852 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
853 cleanup_entry, &i);
854 return ret;
855 }
856
857 /* And one copy for every other CPU */
858 for_each_possible_cpu(i) {
859 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
860 memcpy(newinfo->entries[i], entry0, newinfo->size);
861 }
862
863 return 0;
864 }
865
866 /* Gets counters. */
867 static inline int
868 add_entry_to_counter(const struct ip6t_entry *e,
869 struct xt_counters total[],
870 unsigned int *i)
871 {
872 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
873
874 (*i)++;
875 return 0;
876 }
877
878 static inline int
879 set_entry_to_counter(const struct ip6t_entry *e,
880 struct ip6t_counters total[],
881 unsigned int *i)
882 {
883 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
884
885 (*i)++;
886 return 0;
887 }
888
889 static void
890 get_counters(const struct xt_table_info *t,
891 struct xt_counters counters[])
892 {
893 unsigned int cpu;
894 unsigned int i;
895 unsigned int curcpu;
896
897 /* Instead of clearing (by a previous call to memset())
898 * the counters and using adds, we set the counters
899 * with data used by 'current' CPU
900 * We dont care about preemption here.
901 */
902 curcpu = raw_smp_processor_id();
903
904 i = 0;
905 IP6T_ENTRY_ITERATE(t->entries[curcpu],
906 t->size,
907 set_entry_to_counter,
908 counters,
909 &i);
910
911 for_each_possible_cpu(cpu) {
912 if (cpu == curcpu)
913 continue;
914 i = 0;
915 IP6T_ENTRY_ITERATE(t->entries[cpu],
916 t->size,
917 add_entry_to_counter,
918 counters,
919 &i);
920 }
921 }
922
923 static int
924 copy_entries_to_user(unsigned int total_size,
925 struct xt_table *table,
926 void __user *userptr)
927 {
928 unsigned int off, num, countersize;
929 struct ip6t_entry *e;
930 struct xt_counters *counters;
931 struct xt_table_info *private = table->private;
932 int ret = 0;
933 void *loc_cpu_entry;
934
935 /* We need atomic snapshot of counters: rest doesn't change
936 (other than comefrom, which userspace doesn't care
937 about). */
938 countersize = sizeof(struct xt_counters) * private->number;
939 counters = vmalloc(countersize);
940
941 if (counters == NULL)
942 return -ENOMEM;
943
944 /* First, sum counters... */
945 write_lock_bh(&table->lock);
946 get_counters(private, counters);
947 write_unlock_bh(&table->lock);
948
949 /* choose the copy that is on ourc node/cpu */
950 loc_cpu_entry = private->entries[raw_smp_processor_id()];
951 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
952 ret = -EFAULT;
953 goto free_counters;
954 }
955
956 /* FIXME: use iterator macros --RR */
957 /* ... then go back and fix counters and names */
958 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
959 unsigned int i;
960 struct ip6t_entry_match *m;
961 struct ip6t_entry_target *t;
962
963 e = (struct ip6t_entry *)(loc_cpu_entry + off);
964 if (copy_to_user(userptr + off
965 + offsetof(struct ip6t_entry, counters),
966 &counters[num],
967 sizeof(counters[num])) != 0) {
968 ret = -EFAULT;
969 goto free_counters;
970 }
971
972 for (i = sizeof(struct ip6t_entry);
973 i < e->target_offset;
974 i += m->u.match_size) {
975 m = (void *)e + i;
976
977 if (copy_to_user(userptr + off + i
978 + offsetof(struct ip6t_entry_match,
979 u.user.name),
980 m->u.kernel.match->name,
981 strlen(m->u.kernel.match->name)+1)
982 != 0) {
983 ret = -EFAULT;
984 goto free_counters;
985 }
986 }
987
988 t = ip6t_get_target(e);
989 if (copy_to_user(userptr + off + e->target_offset
990 + offsetof(struct ip6t_entry_target,
991 u.user.name),
992 t->u.kernel.target->name,
993 strlen(t->u.kernel.target->name)+1) != 0) {
994 ret = -EFAULT;
995 goto free_counters;
996 }
997 }
998
999 free_counters:
1000 vfree(counters);
1001 return ret;
1002 }
1003
1004 static int
1005 get_entries(const struct ip6t_get_entries *entries,
1006 struct ip6t_get_entries __user *uptr)
1007 {
1008 int ret;
1009 struct xt_table *t;
1010
1011 t = xt_find_table_lock(AF_INET6, entries->name);
1012 if (t && !IS_ERR(t)) {
1013 struct xt_table_info *private = t->private;
1014 duprintf("t->private->number = %u\n", private->number);
1015 if (entries->size == private->size)
1016 ret = copy_entries_to_user(private->size,
1017 t, uptr->entrytable);
1018 else {
1019 duprintf("get_entries: I've got %u not %u!\n",
1020 private->size, entries->size);
1021 ret = -EINVAL;
1022 }
1023 module_put(t->me);
1024 xt_table_unlock(t);
1025 } else
1026 ret = t ? PTR_ERR(t) : -ENOENT;
1027
1028 return ret;
1029 }
1030
1031 static int
1032 do_replace(void __user *user, unsigned int len)
1033 {
1034 int ret;
1035 struct ip6t_replace tmp;
1036 struct xt_table *t;
1037 struct xt_table_info *newinfo, *oldinfo;
1038 struct xt_counters *counters;
1039 void *loc_cpu_entry, *loc_cpu_old_entry;
1040
1041 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1042 return -EFAULT;
1043
1044 /* overflow check */
1045 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1046 SMP_CACHE_BYTES)
1047 return -ENOMEM;
1048 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1049 return -ENOMEM;
1050
1051 newinfo = xt_alloc_table_info(tmp.size);
1052 if (!newinfo)
1053 return -ENOMEM;
1054
1055 /* choose the copy that is on our node/cpu */
1056 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1057 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1058 tmp.size) != 0) {
1059 ret = -EFAULT;
1060 goto free_newinfo;
1061 }
1062
1063 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
1064 if (!counters) {
1065 ret = -ENOMEM;
1066 goto free_newinfo;
1067 }
1068
1069 ret = translate_table(tmp.name, tmp.valid_hooks,
1070 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1071 tmp.hook_entry, tmp.underflow);
1072 if (ret != 0)
1073 goto free_newinfo_counters;
1074
1075 duprintf("ip_tables: Translated table\n");
1076
1077 t = try_then_request_module(xt_find_table_lock(AF_INET6, tmp.name),
1078 "ip6table_%s", tmp.name);
1079 if (!t || IS_ERR(t)) {
1080 ret = t ? PTR_ERR(t) : -ENOENT;
1081 goto free_newinfo_counters_untrans;
1082 }
1083
1084 /* You lied! */
1085 if (tmp.valid_hooks != t->valid_hooks) {
1086 duprintf("Valid hook crap: %08X vs %08X\n",
1087 tmp.valid_hooks, t->valid_hooks);
1088 ret = -EINVAL;
1089 goto put_module;
1090 }
1091
1092 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
1093 if (!oldinfo)
1094 goto put_module;
1095
1096 /* Update module usage count based on number of rules */
1097 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1098 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1099 if ((oldinfo->number > oldinfo->initial_entries) ||
1100 (newinfo->number <= oldinfo->initial_entries))
1101 module_put(t->me);
1102 if ((oldinfo->number > oldinfo->initial_entries) &&
1103 (newinfo->number <= oldinfo->initial_entries))
1104 module_put(t->me);
1105
1106 /* Get the old counters. */
1107 get_counters(oldinfo, counters);
1108 /* Decrease module usage counts and free resource */
1109 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1110 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1111 xt_free_table_info(oldinfo);
1112 if (copy_to_user(tmp.counters, counters,
1113 sizeof(struct xt_counters) * tmp.num_counters) != 0)
1114 ret = -EFAULT;
1115 vfree(counters);
1116 xt_table_unlock(t);
1117 return ret;
1118
1119 put_module:
1120 module_put(t->me);
1121 xt_table_unlock(t);
1122 free_newinfo_counters_untrans:
1123 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1124 free_newinfo_counters:
1125 vfree(counters);
1126 free_newinfo:
1127 xt_free_table_info(newinfo);
1128 return ret;
1129 }
1130
1131 /* We're lazy, and add to the first CPU; overflow works its fey magic
1132 * and everything is OK. */
1133 static inline int
1134 add_counter_to_entry(struct ip6t_entry *e,
1135 const struct xt_counters addme[],
1136 unsigned int *i)
1137 {
1138 #if 0
1139 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1140 *i,
1141 (long unsigned int)e->counters.pcnt,
1142 (long unsigned int)e->counters.bcnt,
1143 (long unsigned int)addme[*i].pcnt,
1144 (long unsigned int)addme[*i].bcnt);
1145 #endif
1146
1147 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1148
1149 (*i)++;
1150 return 0;
1151 }
1152
1153 static int
1154 do_add_counters(void __user *user, unsigned int len)
1155 {
1156 unsigned int i;
1157 struct xt_counters_info tmp, *paddc;
1158 struct xt_table_info *private;
1159 struct xt_table *t;
1160 int ret = 0;
1161 void *loc_cpu_entry;
1162
1163 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1164 return -EFAULT;
1165
1166 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1167 return -EINVAL;
1168
1169 paddc = vmalloc(len);
1170 if (!paddc)
1171 return -ENOMEM;
1172
1173 if (copy_from_user(paddc, user, len) != 0) {
1174 ret = -EFAULT;
1175 goto free;
1176 }
1177
1178 t = xt_find_table_lock(AF_INET6, tmp.name);
1179 if (!t || IS_ERR(t)) {
1180 ret = t ? PTR_ERR(t) : -ENOENT;
1181 goto free;
1182 }
1183
1184 write_lock_bh(&t->lock);
1185 private = t->private;
1186 if (private->number != tmp.num_counters) {
1187 ret = -EINVAL;
1188 goto unlock_up_free;
1189 }
1190
1191 i = 0;
1192 /* Choose the copy that is on our node */
1193 loc_cpu_entry = private->entries[smp_processor_id()];
1194 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1195 private->size,
1196 add_counter_to_entry,
1197 paddc->counters,
1198 &i);
1199 unlock_up_free:
1200 write_unlock_bh(&t->lock);
1201 xt_table_unlock(t);
1202 module_put(t->me);
1203 free:
1204 vfree(paddc);
1205
1206 return ret;
1207 }
1208
1209 static int
1210 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1211 {
1212 int ret;
1213
1214 if (!capable(CAP_NET_ADMIN))
1215 return -EPERM;
1216
1217 switch (cmd) {
1218 case IP6T_SO_SET_REPLACE:
1219 ret = do_replace(user, len);
1220 break;
1221
1222 case IP6T_SO_SET_ADD_COUNTERS:
1223 ret = do_add_counters(user, len);
1224 break;
1225
1226 default:
1227 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1228 ret = -EINVAL;
1229 }
1230
1231 return ret;
1232 }
1233
1234 static int
1235 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1236 {
1237 int ret;
1238
1239 if (!capable(CAP_NET_ADMIN))
1240 return -EPERM;
1241
1242 switch (cmd) {
1243 case IP6T_SO_GET_INFO: {
1244 char name[IP6T_TABLE_MAXNAMELEN];
1245 struct xt_table *t;
1246
1247 if (*len != sizeof(struct ip6t_getinfo)) {
1248 duprintf("length %u != %u\n", *len,
1249 sizeof(struct ip6t_getinfo));
1250 ret = -EINVAL;
1251 break;
1252 }
1253
1254 if (copy_from_user(name, user, sizeof(name)) != 0) {
1255 ret = -EFAULT;
1256 break;
1257 }
1258 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1259
1260 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1261 "ip6table_%s", name);
1262 if (t && !IS_ERR(t)) {
1263 struct ip6t_getinfo info;
1264 struct xt_table_info *private = t->private;
1265
1266 info.valid_hooks = t->valid_hooks;
1267 memcpy(info.hook_entry, private->hook_entry,
1268 sizeof(info.hook_entry));
1269 memcpy(info.underflow, private->underflow,
1270 sizeof(info.underflow));
1271 info.num_entries = private->number;
1272 info.size = private->size;
1273 memcpy(info.name, name, sizeof(info.name));
1274
1275 if (copy_to_user(user, &info, *len) != 0)
1276 ret = -EFAULT;
1277 else
1278 ret = 0;
1279 xt_table_unlock(t);
1280 module_put(t->me);
1281 } else
1282 ret = t ? PTR_ERR(t) : -ENOENT;
1283 }
1284 break;
1285
1286 case IP6T_SO_GET_ENTRIES: {
1287 struct ip6t_get_entries get;
1288
1289 if (*len < sizeof(get)) {
1290 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1291 ret = -EINVAL;
1292 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1293 ret = -EFAULT;
1294 } else if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1295 duprintf("get_entries: %u != %u\n", *len,
1296 sizeof(struct ip6t_get_entries) + get.size);
1297 ret = -EINVAL;
1298 } else
1299 ret = get_entries(&get, user);
1300 break;
1301 }
1302
1303 case IP6T_SO_GET_REVISION_MATCH:
1304 case IP6T_SO_GET_REVISION_TARGET: {
1305 struct ip6t_get_revision rev;
1306 int target;
1307
1308 if (*len != sizeof(rev)) {
1309 ret = -EINVAL;
1310 break;
1311 }
1312 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1313 ret = -EFAULT;
1314 break;
1315 }
1316
1317 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1318 target = 1;
1319 else
1320 target = 0;
1321
1322 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1323 rev.revision,
1324 target, &ret),
1325 "ip6t_%s", rev.name);
1326 break;
1327 }
1328
1329 default:
1330 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1331 ret = -EINVAL;
1332 }
1333
1334 return ret;
1335 }
1336
1337 int ip6t_register_table(struct xt_table *table,
1338 const struct ip6t_replace *repl)
1339 {
1340 int ret;
1341 struct xt_table_info *newinfo;
1342 static struct xt_table_info bootstrap
1343 = { 0, 0, 0, { 0 }, { 0 }, { } };
1344 void *loc_cpu_entry;
1345
1346 newinfo = xt_alloc_table_info(repl->size);
1347 if (!newinfo)
1348 return -ENOMEM;
1349
1350 /* choose the copy on our node/cpu */
1351 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1352 memcpy(loc_cpu_entry, repl->entries, repl->size);
1353
1354 ret = translate_table(table->name, table->valid_hooks,
1355 newinfo, loc_cpu_entry, repl->size,
1356 repl->num_entries,
1357 repl->hook_entry,
1358 repl->underflow);
1359 if (ret != 0) {
1360 xt_free_table_info(newinfo);
1361 return ret;
1362 }
1363
1364 ret = xt_register_table(table, &bootstrap, newinfo);
1365 if (ret != 0) {
1366 xt_free_table_info(newinfo);
1367 return ret;
1368 }
1369
1370 return 0;
1371 }
1372
1373 void ip6t_unregister_table(struct xt_table *table)
1374 {
1375 struct xt_table_info *private;
1376 void *loc_cpu_entry;
1377
1378 private = xt_unregister_table(table);
1379
1380 /* Decrease module usage counts and free resources */
1381 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1382 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1383 xt_free_table_info(private);
1384 }
1385
1386 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1387 static inline bool
1388 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1389 u_int8_t type, u_int8_t code,
1390 bool invert)
1391 {
1392 return (type == test_type && code >= min_code && code <= max_code)
1393 ^ invert;
1394 }
1395
1396 static bool
1397 icmp6_match(const struct sk_buff *skb,
1398 const struct net_device *in,
1399 const struct net_device *out,
1400 const struct xt_match *match,
1401 const void *matchinfo,
1402 int offset,
1403 unsigned int protoff,
1404 bool *hotdrop)
1405 {
1406 struct icmp6hdr _icmp, *ic;
1407 const struct ip6t_icmp *icmpinfo = matchinfo;
1408
1409 /* Must not be a fragment. */
1410 if (offset)
1411 return false;
1412
1413 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
1414 if (ic == NULL) {
1415 /* We've been asked to examine this packet, and we
1416 can't. Hence, no choice but to drop. */
1417 duprintf("Dropping evil ICMP tinygram.\n");
1418 *hotdrop = true;
1419 return false;
1420 }
1421
1422 return icmp6_type_code_match(icmpinfo->type,
1423 icmpinfo->code[0],
1424 icmpinfo->code[1],
1425 ic->icmp6_type, ic->icmp6_code,
1426 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1427 }
1428
1429 /* Called when user tries to insert an entry of this type. */
1430 static bool
1431 icmp6_checkentry(const char *tablename,
1432 const void *entry,
1433 const struct xt_match *match,
1434 void *matchinfo,
1435 unsigned int hook_mask)
1436 {
1437 const struct ip6t_icmp *icmpinfo = matchinfo;
1438
1439 /* Must specify no unknown invflags */
1440 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
1441 }
1442
1443 /* The built-in targets: standard (NULL) and error. */
1444 static struct xt_target ip6t_standard_target __read_mostly = {
1445 .name = IP6T_STANDARD_TARGET,
1446 .targetsize = sizeof(int),
1447 .family = AF_INET6,
1448 };
1449
1450 static struct xt_target ip6t_error_target __read_mostly = {
1451 .name = IP6T_ERROR_TARGET,
1452 .target = ip6t_error,
1453 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
1454 .family = AF_INET6,
1455 };
1456
1457 static struct nf_sockopt_ops ip6t_sockopts = {
1458 .pf = PF_INET6,
1459 .set_optmin = IP6T_BASE_CTL,
1460 .set_optmax = IP6T_SO_SET_MAX+1,
1461 .set = do_ip6t_set_ctl,
1462 .get_optmin = IP6T_BASE_CTL,
1463 .get_optmax = IP6T_SO_GET_MAX+1,
1464 .get = do_ip6t_get_ctl,
1465 .owner = THIS_MODULE,
1466 };
1467
1468 static struct xt_match icmp6_matchstruct __read_mostly = {
1469 .name = "icmp6",
1470 .match = &icmp6_match,
1471 .matchsize = sizeof(struct ip6t_icmp),
1472 .checkentry = icmp6_checkentry,
1473 .proto = IPPROTO_ICMPV6,
1474 .family = AF_INET6,
1475 };
1476
1477 static int __init ip6_tables_init(void)
1478 {
1479 int ret;
1480
1481 ret = xt_proto_init(AF_INET6);
1482 if (ret < 0)
1483 goto err1;
1484
1485 /* Noone else will be downing sem now, so we won't sleep */
1486 ret = xt_register_target(&ip6t_standard_target);
1487 if (ret < 0)
1488 goto err2;
1489 ret = xt_register_target(&ip6t_error_target);
1490 if (ret < 0)
1491 goto err3;
1492 ret = xt_register_match(&icmp6_matchstruct);
1493 if (ret < 0)
1494 goto err4;
1495
1496 /* Register setsockopt */
1497 ret = nf_register_sockopt(&ip6t_sockopts);
1498 if (ret < 0)
1499 goto err5;
1500
1501 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1502 return 0;
1503
1504 err5:
1505 xt_unregister_match(&icmp6_matchstruct);
1506 err4:
1507 xt_unregister_target(&ip6t_error_target);
1508 err3:
1509 xt_unregister_target(&ip6t_standard_target);
1510 err2:
1511 xt_proto_fini(AF_INET6);
1512 err1:
1513 return ret;
1514 }
1515
1516 static void __exit ip6_tables_fini(void)
1517 {
1518 nf_unregister_sockopt(&ip6t_sockopts);
1519 xt_unregister_match(&icmp6_matchstruct);
1520 xt_unregister_target(&ip6t_error_target);
1521 xt_unregister_target(&ip6t_standard_target);
1522 xt_proto_fini(AF_INET6);
1523 }
1524
1525 /*
1526 * find the offset to specified header or the protocol number of last header
1527 * if target < 0. "last header" is transport protocol header, ESP, or
1528 * "No next header".
1529 *
1530 * If target header is found, its offset is set in *offset and return protocol
1531 * number. Otherwise, return -1.
1532 *
1533 * If the first fragment doesn't contain the final protocol header or
1534 * NEXTHDR_NONE it is considered invalid.
1535 *
1536 * Note that non-1st fragment is special case that "the protocol number
1537 * of last header" is "next header" field in Fragment header. In this case,
1538 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
1539 * isn't NULL.
1540 *
1541 */
1542 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
1543 int target, unsigned short *fragoff)
1544 {
1545 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
1546 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
1547 unsigned int len = skb->len - start;
1548
1549 if (fragoff)
1550 *fragoff = 0;
1551
1552 while (nexthdr != target) {
1553 struct ipv6_opt_hdr _hdr, *hp;
1554 unsigned int hdrlen;
1555
1556 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
1557 if (target < 0)
1558 break;
1559 return -ENOENT;
1560 }
1561
1562 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
1563 if (hp == NULL)
1564 return -EBADMSG;
1565 if (nexthdr == NEXTHDR_FRAGMENT) {
1566 unsigned short _frag_off;
1567 __be16 *fp;
1568 fp = skb_header_pointer(skb,
1569 start+offsetof(struct frag_hdr,
1570 frag_off),
1571 sizeof(_frag_off),
1572 &_frag_off);
1573 if (fp == NULL)
1574 return -EBADMSG;
1575
1576 _frag_off = ntohs(*fp) & ~0x7;
1577 if (_frag_off) {
1578 if (target < 0 &&
1579 ((!ipv6_ext_hdr(hp->nexthdr)) ||
1580 hp->nexthdr == NEXTHDR_NONE)) {
1581 if (fragoff)
1582 *fragoff = _frag_off;
1583 return hp->nexthdr;
1584 }
1585 return -ENOENT;
1586 }
1587 hdrlen = 8;
1588 } else if (nexthdr == NEXTHDR_AUTH)
1589 hdrlen = (hp->hdrlen + 2) << 2;
1590 else
1591 hdrlen = ipv6_optlen(hp);
1592
1593 nexthdr = hp->nexthdr;
1594 len -= hdrlen;
1595 start += hdrlen;
1596 }
1597
1598 *offset = start;
1599 return nexthdr;
1600 }
1601
1602 EXPORT_SYMBOL(ip6t_register_table);
1603 EXPORT_SYMBOL(ip6t_unregister_table);
1604 EXPORT_SYMBOL(ip6t_do_table);
1605 EXPORT_SYMBOL(ip6t_ext_hdr);
1606 EXPORT_SYMBOL(ipv6_find_hdr);
1607
1608 module_init(ip6_tables_init);
1609 module_exit(ip6_tables_fini);