]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
Merge branch 'for-2.6.34' of git://linux-nfs.org/~bfields/linux
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
56 do { \
57 if (!(x)) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
60 } while(0)
61 #else
62 #define IP_NF_ASSERT(x)
63 #endif
64
65 #if 0
66 /* All the better to debug you with... */
67 #define static
68 #define inline
69 #endif
70
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 {
73 return xt_alloc_initial_table(ip6t, IP6T);
74 }
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
77 /*
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
83
84 Hence the start of any table is given by get_table() below. */
85
86 /* Check for an extension */
87 int
88 ip6t_ext_hdr(u8 nexthdr)
89 {
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
97 }
98
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
101 static inline bool
102 ip6_packet_match(const struct sk_buff *skb,
103 const char *indev,
104 const char *outdev,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
108 {
109 unsigned long ret;
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
111
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
113
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
119 /*
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
126 return false;
127 }
128
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
130
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
135 return false;
136 }
137
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
139
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
144 return false;
145 }
146
147 /* ... might want to do something with class and flowlabel here ... */
148
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
151 int protohdr;
152 unsigned short _frag_off;
153
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
155 if (protohdr < 0) {
156 if (_frag_off == 0)
157 *hotdrop = true;
158 return false;
159 }
160 *fragoff = _frag_off;
161
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
163 protohdr,
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
165 ip6info->proto);
166
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
169 return false;
170 }
171 return true;
172 }
173
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
177 return false;
178 }
179 return true;
180 }
181
182 /* should be ip6 safe */
183 static bool
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
185 {
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
189 return false;
190 }
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
194 return false;
195 }
196 return true;
197 }
198
199 static unsigned int
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 {
202 if (net_ratelimit())
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
205
206 return NF_DROP;
207 }
208
209 /* Performance critical - called for every packet */
210 static inline bool
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
213 {
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
216
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
219 return true;
220 else
221 return false;
222 }
223
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
226 {
227 return (struct ip6t_entry *)(base + offset);
228 }
229
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
233 {
234 static const struct ip6t_ip6 uncond;
235
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
237 }
238
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
241 {
242 return ip6t_get_target((struct ip6t_entry *)e);
243 }
244
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
254 };
255
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
260 };
261
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
266 };
267
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
270 .u = {
271 .log = {
272 .level = 4,
273 .logflags = NF_LOG_MASK,
274 },
275 },
276 };
277
278 /* Mildly perf critical (only if packet tracing is on) */
279 static inline int
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
283 {
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
285
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
289 (*rulenum) = 0;
290 } else if (s == e) {
291 (*rulenum)++;
292
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
296 t->verdict < 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
302 }
303 return 1;
304 } else
305 (*rulenum)++;
306
307 return 0;
308 }
309
310 static void trace_packet(const struct sk_buff *skb,
311 unsigned int hook,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
317 {
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
322 unsigned int rulenum = 0;
323
324 table_base = private->entries[smp_processor_id()];
325 root = get_entry(table_base, private->hook_entry[hook]);
326
327 hookname = chainname = hooknames[hook];
328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
329
330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
331 if (get_chainname_rulenum(iter, e, hookname,
332 &chainname, &comment, &rulenum) != 0)
333 break;
334
335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
336 "TRACE: %s:%s:%s:%u ",
337 tablename, chainname, comment, rulenum);
338 }
339 #endif
340
341 static inline __pure struct ip6t_entry *
342 ip6t_next_entry(const struct ip6t_entry *entry)
343 {
344 return (void *)entry + entry->next_offset;
345 }
346
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 unsigned int
349 ip6t_do_table(struct sk_buff *skb,
350 unsigned int hook,
351 const struct net_device *in,
352 const struct net_device *out,
353 struct xt_table *table)
354 {
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
356
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 bool hotdrop = false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict = NF_DROP;
361 const char *indev, *outdev;
362 const void *table_base;
363 struct ip6t_entry *e, *back;
364 const struct xt_table_info *private;
365 struct xt_match_param mtpar;
366 struct xt_target_param tgpar;
367
368 /* Initialization */
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
376 * match it. */
377 mtpar.hotdrop = &hotdrop;
378 mtpar.in = tgpar.in = in;
379 mtpar.out = tgpar.out = out;
380 mtpar.family = tgpar.family = NFPROTO_IPV6;
381 mtpar.hooknum = tgpar.hooknum = hook;
382
383 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
384
385 xt_info_rdlock_bh();
386 private = table->private;
387 table_base = private->entries[smp_processor_id()];
388
389 e = get_entry(table_base, private->hook_entry[hook]);
390
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
393
394 do {
395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
397
398 IP_NF_ASSERT(e);
399 IP_NF_ASSERT(back);
400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
402 no_match:
403 e = ip6t_next_entry(e);
404 continue;
405 }
406
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
409 goto no_match;
410
411 ADD_COUNTER(e->counters,
412 ntohs(ipv6_hdr(skb)->payload_len) +
413 sizeof(struct ipv6hdr), 1);
414
415 t = ip6t_get_target_c(e);
416 IP_NF_ASSERT(t->u.kernel.target);
417
418 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
419 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
420 /* The packet is traced: log it */
421 if (unlikely(skb->nf_trace))
422 trace_packet(skb, hook, in, out,
423 table->name, private, e);
424 #endif
425 /* Standard target? */
426 if (!t->u.kernel.target->target) {
427 int v;
428
429 v = ((struct ip6t_standard_target *)t)->verdict;
430 if (v < 0) {
431 /* Pop from stack? */
432 if (v != IP6T_RETURN) {
433 verdict = (unsigned)(-v) - 1;
434 break;
435 }
436 e = back;
437 back = get_entry(table_base, back->comefrom);
438 continue;
439 }
440 if (table_base + v != ip6t_next_entry(e) &&
441 !(e->ipv6.flags & IP6T_F_GOTO)) {
442 /* Save old back ptr in next entry */
443 struct ip6t_entry *next = ip6t_next_entry(e);
444 next->comefrom = (void *)back - table_base;
445 /* set back pointer to next entry */
446 back = next;
447 }
448
449 e = get_entry(table_base, v);
450 continue;
451 }
452
453 /* Targets which reenter must return
454 abs. verdicts */
455 tgpar.target = t->u.kernel.target;
456 tgpar.targinfo = t->data;
457
458 #ifdef CONFIG_NETFILTER_DEBUG
459 tb_comefrom = 0xeeeeeeec;
460 #endif
461 verdict = t->u.kernel.target->target(skb, &tgpar);
462
463 #ifdef CONFIG_NETFILTER_DEBUG
464 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
465 printk("Target %s reentered!\n",
466 t->u.kernel.target->name);
467 verdict = NF_DROP;
468 }
469 tb_comefrom = 0x57acc001;
470 #endif
471 if (verdict == IP6T_CONTINUE)
472 e = ip6t_next_entry(e);
473 else
474 /* Verdict */
475 break;
476 } while (!hotdrop);
477
478 #ifdef CONFIG_NETFILTER_DEBUG
479 tb_comefrom = NETFILTER_LINK_POISON;
480 #endif
481 xt_info_rdunlock_bh();
482
483 #ifdef DEBUG_ALLOW_ALL
484 return NF_ACCEPT;
485 #else
486 if (hotdrop)
487 return NF_DROP;
488 else return verdict;
489 #endif
490
491 #undef tb_comefrom
492 }
493
494 /* Figures out from what hook each rule can be called: returns 0 if
495 there are loops. Puts hook bitmask in comefrom. */
496 static int
497 mark_source_chains(const struct xt_table_info *newinfo,
498 unsigned int valid_hooks, void *entry0)
499 {
500 unsigned int hook;
501
502 /* No recursion; use packet counter to save back ptrs (reset
503 to 0 as we leave), and comefrom to save source hook bitmask */
504 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
505 unsigned int pos = newinfo->hook_entry[hook];
506 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
507
508 if (!(valid_hooks & (1 << hook)))
509 continue;
510
511 /* Set initial back pointer. */
512 e->counters.pcnt = pos;
513
514 for (;;) {
515 const struct ip6t_standard_target *t
516 = (void *)ip6t_get_target_c(e);
517 int visited = e->comefrom & (1 << hook);
518
519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
520 printk("iptables: loop hook %u pos %u %08X.\n",
521 hook, pos, e->comefrom);
522 return 0;
523 }
524 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
525
526 /* Unconditional return/END. */
527 if ((e->target_offset == sizeof(struct ip6t_entry) &&
528 (strcmp(t->target.u.user.name,
529 IP6T_STANDARD_TARGET) == 0) &&
530 t->verdict < 0 &&
531 unconditional(&e->ipv6)) || visited) {
532 unsigned int oldpos, size;
533
534 if ((strcmp(t->target.u.user.name,
535 IP6T_STANDARD_TARGET) == 0) &&
536 t->verdict < -NF_MAX_VERDICT - 1) {
537 duprintf("mark_source_chains: bad "
538 "negative verdict (%i)\n",
539 t->verdict);
540 return 0;
541 }
542
543 /* Return: backtrack through the last
544 big jump. */
545 do {
546 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
547 #ifdef DEBUG_IP_FIREWALL_USER
548 if (e->comefrom
549 & (1 << NF_INET_NUMHOOKS)) {
550 duprintf("Back unset "
551 "on hook %u "
552 "rule %u\n",
553 hook, pos);
554 }
555 #endif
556 oldpos = pos;
557 pos = e->counters.pcnt;
558 e->counters.pcnt = 0;
559
560 /* We're at the start. */
561 if (pos == oldpos)
562 goto next;
563
564 e = (struct ip6t_entry *)
565 (entry0 + pos);
566 } while (oldpos == pos + e->next_offset);
567
568 /* Move along one */
569 size = e->next_offset;
570 e = (struct ip6t_entry *)
571 (entry0 + pos + size);
572 e->counters.pcnt = pos;
573 pos += size;
574 } else {
575 int newpos = t->verdict;
576
577 if (strcmp(t->target.u.user.name,
578 IP6T_STANDARD_TARGET) == 0 &&
579 newpos >= 0) {
580 if (newpos > newinfo->size -
581 sizeof(struct ip6t_entry)) {
582 duprintf("mark_source_chains: "
583 "bad verdict (%i)\n",
584 newpos);
585 return 0;
586 }
587 /* This a jump; chase it. */
588 duprintf("Jump rule %u -> %u\n",
589 pos, newpos);
590 } else {
591 /* ... this is a fallthru */
592 newpos = pos + e->next_offset;
593 }
594 e = (struct ip6t_entry *)
595 (entry0 + newpos);
596 e->counters.pcnt = pos;
597 pos = newpos;
598 }
599 }
600 next:
601 duprintf("Finished chain %u\n", hook);
602 }
603 return 1;
604 }
605
606 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
607 {
608 struct xt_mtdtor_param par;
609
610 par.net = net;
611 par.match = m->u.kernel.match;
612 par.matchinfo = m->data;
613 par.family = NFPROTO_IPV6;
614 if (par.match->destroy != NULL)
615 par.match->destroy(&par);
616 module_put(par.match->me);
617 }
618
619 static int
620 check_entry(const struct ip6t_entry *e, const char *name)
621 {
622 const struct ip6t_entry_target *t;
623
624 if (!ip6_checkentry(&e->ipv6)) {
625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
626 return -EINVAL;
627 }
628
629 if (e->target_offset + sizeof(struct ip6t_entry_target) >
630 e->next_offset)
631 return -EINVAL;
632
633 t = ip6t_get_target_c(e);
634 if (e->target_offset + t->u.target_size > e->next_offset)
635 return -EINVAL;
636
637 return 0;
638 }
639
640 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
641 {
642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
643 int ret;
644
645 par->match = m->u.kernel.match;
646 par->matchinfo = m->data;
647
648 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
649 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
650 if (ret < 0) {
651 duprintf("ip_tables: check failed for `%s'.\n",
652 par.match->name);
653 return ret;
654 }
655 return 0;
656 }
657
658 static int
659 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
660 {
661 struct xt_match *match;
662 int ret;
663
664 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
665 m->u.user.revision),
666 "ip6t_%s", m->u.user.name);
667 if (IS_ERR(match) || !match) {
668 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
669 return match ? PTR_ERR(match) : -ENOENT;
670 }
671 m->u.kernel.match = match;
672
673 ret = check_match(m, par);
674 if (ret)
675 goto err;
676
677 return 0;
678 err:
679 module_put(m->u.kernel.match->me);
680 return ret;
681 }
682
683 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
684 {
685 struct ip6t_entry_target *t = ip6t_get_target(e);
686 struct xt_tgchk_param par = {
687 .net = net,
688 .table = name,
689 .entryinfo = e,
690 .target = t->u.kernel.target,
691 .targinfo = t->data,
692 .hook_mask = e->comefrom,
693 .family = NFPROTO_IPV6,
694 };
695 int ret;
696
697 t = ip6t_get_target(e);
698 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
699 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
700 if (ret < 0) {
701 duprintf("ip_tables: check failed for `%s'.\n",
702 t->u.kernel.target->name);
703 return ret;
704 }
705 return 0;
706 }
707
708 static int
709 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
710 unsigned int size)
711 {
712 struct ip6t_entry_target *t;
713 struct xt_target *target;
714 int ret;
715 unsigned int j;
716 struct xt_mtchk_param mtpar;
717 struct xt_entry_match *ematch;
718
719 ret = check_entry(e, name);
720 if (ret)
721 return ret;
722
723 j = 0;
724 mtpar.net = net;
725 mtpar.table = name;
726 mtpar.entryinfo = &e->ipv6;
727 mtpar.hook_mask = e->comefrom;
728 mtpar.family = NFPROTO_IPV6;
729 xt_ematch_foreach(ematch, e) {
730 ret = find_check_match(ematch, &mtpar);
731 if (ret != 0)
732 goto cleanup_matches;
733 ++j;
734 }
735
736 t = ip6t_get_target(e);
737 target = try_then_request_module(xt_find_target(AF_INET6,
738 t->u.user.name,
739 t->u.user.revision),
740 "ip6t_%s", t->u.user.name);
741 if (IS_ERR(target) || !target) {
742 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
743 ret = target ? PTR_ERR(target) : -ENOENT;
744 goto cleanup_matches;
745 }
746 t->u.kernel.target = target;
747
748 ret = check_target(e, net, name);
749 if (ret)
750 goto err;
751 return 0;
752 err:
753 module_put(t->u.kernel.target->me);
754 cleanup_matches:
755 xt_ematch_foreach(ematch, e) {
756 if (j-- == 0)
757 break;
758 cleanup_match(ematch, net);
759 }
760 return ret;
761 }
762
763 static bool check_underflow(const struct ip6t_entry *e)
764 {
765 const struct ip6t_entry_target *t;
766 unsigned int verdict;
767
768 if (!unconditional(&e->ipv6))
769 return false;
770 t = ip6t_get_target_c(e);
771 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
772 return false;
773 verdict = ((struct ip6t_standard_target *)t)->verdict;
774 verdict = -verdict - 1;
775 return verdict == NF_DROP || verdict == NF_ACCEPT;
776 }
777
778 static int
779 check_entry_size_and_hooks(struct ip6t_entry *e,
780 struct xt_table_info *newinfo,
781 const unsigned char *base,
782 const unsigned char *limit,
783 const unsigned int *hook_entries,
784 const unsigned int *underflows,
785 unsigned int valid_hooks)
786 {
787 unsigned int h;
788
789 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
790 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
791 duprintf("Bad offset %p\n", e);
792 return -EINVAL;
793 }
794
795 if (e->next_offset
796 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
797 duprintf("checking: element %p size %u\n",
798 e, e->next_offset);
799 return -EINVAL;
800 }
801
802 /* Check hooks & underflows */
803 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
804 if (!(valid_hooks & (1 << h)))
805 continue;
806 if ((unsigned char *)e - base == hook_entries[h])
807 newinfo->hook_entry[h] = hook_entries[h];
808 if ((unsigned char *)e - base == underflows[h]) {
809 if (!check_underflow(e)) {
810 pr_err("Underflows must be unconditional and "
811 "use the STANDARD target with "
812 "ACCEPT/DROP\n");
813 return -EINVAL;
814 }
815 newinfo->underflow[h] = underflows[h];
816 }
817 }
818
819 /* Clear counters and comefrom */
820 e->counters = ((struct xt_counters) { 0, 0 });
821 e->comefrom = 0;
822 return 0;
823 }
824
825 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
826 {
827 struct xt_tgdtor_param par;
828 struct ip6t_entry_target *t;
829 struct xt_entry_match *ematch;
830
831 /* Cleanup all matches */
832 xt_ematch_foreach(ematch, e)
833 cleanup_match(ematch, net);
834 t = ip6t_get_target(e);
835
836 par.net = net;
837 par.target = t->u.kernel.target;
838 par.targinfo = t->data;
839 par.family = NFPROTO_IPV6;
840 if (par.target->destroy != NULL)
841 par.target->destroy(&par);
842 module_put(par.target->me);
843 }
844
845 /* Checks and translates the user-supplied table segment (held in
846 newinfo) */
847 static int
848 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
849 const struct ip6t_replace *repl)
850 {
851 struct ip6t_entry *iter;
852 unsigned int i;
853 int ret = 0;
854
855 newinfo->size = repl->size;
856 newinfo->number = repl->num_entries;
857
858 /* Init all hooks to impossible value. */
859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
860 newinfo->hook_entry[i] = 0xFFFFFFFF;
861 newinfo->underflow[i] = 0xFFFFFFFF;
862 }
863
864 duprintf("translate_table: size %u\n", newinfo->size);
865 i = 0;
866 /* Walk through entries, checking offsets. */
867 xt_entry_foreach(iter, entry0, newinfo->size) {
868 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
869 entry0 + repl->size,
870 repl->hook_entry,
871 repl->underflow,
872 repl->valid_hooks);
873 if (ret != 0)
874 return ret;
875 ++i;
876 }
877
878 if (i != repl->num_entries) {
879 duprintf("translate_table: %u not %u entries\n",
880 i, repl->num_entries);
881 return -EINVAL;
882 }
883
884 /* Check hooks all assigned */
885 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
886 /* Only hooks which are valid */
887 if (!(repl->valid_hooks & (1 << i)))
888 continue;
889 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
890 duprintf("Invalid hook entry %u %u\n",
891 i, repl->hook_entry[i]);
892 return -EINVAL;
893 }
894 if (newinfo->underflow[i] == 0xFFFFFFFF) {
895 duprintf("Invalid underflow %u %u\n",
896 i, repl->underflow[i]);
897 return -EINVAL;
898 }
899 }
900
901 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
902 return -ELOOP;
903
904 /* Finally, each sanity check must pass */
905 i = 0;
906 xt_entry_foreach(iter, entry0, newinfo->size) {
907 ret = find_check_entry(iter, net, repl->name, repl->size);
908 if (ret != 0)
909 break;
910 ++i;
911 }
912
913 if (ret != 0) {
914 xt_entry_foreach(iter, entry0, newinfo->size) {
915 if (i-- == 0)
916 break;
917 cleanup_entry(iter, net);
918 }
919 return ret;
920 }
921
922 /* And one copy for every other CPU */
923 for_each_possible_cpu(i) {
924 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
925 memcpy(newinfo->entries[i], entry0, newinfo->size);
926 }
927
928 return ret;
929 }
930
931 static void
932 get_counters(const struct xt_table_info *t,
933 struct xt_counters counters[])
934 {
935 struct ip6t_entry *iter;
936 unsigned int cpu;
937 unsigned int i;
938 unsigned int curcpu;
939
940 /* Instead of clearing (by a previous call to memset())
941 * the counters and using adds, we set the counters
942 * with data used by 'current' CPU
943 *
944 * Bottom half has to be disabled to prevent deadlock
945 * if new softirq were to run and call ipt_do_table
946 */
947 local_bh_disable();
948 curcpu = smp_processor_id();
949
950 i = 0;
951 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
952 SET_COUNTER(counters[i], iter->counters.bcnt,
953 iter->counters.pcnt);
954 ++i;
955 }
956
957 for_each_possible_cpu(cpu) {
958 if (cpu == curcpu)
959 continue;
960 i = 0;
961 xt_info_wrlock(cpu);
962 xt_entry_foreach(iter, t->entries[cpu], t->size) {
963 ADD_COUNTER(counters[i], iter->counters.bcnt,
964 iter->counters.pcnt);
965 ++i;
966 }
967 xt_info_wrunlock(cpu);
968 }
969 local_bh_enable();
970 }
971
972 static struct xt_counters *alloc_counters(const struct xt_table *table)
973 {
974 unsigned int countersize;
975 struct xt_counters *counters;
976 const struct xt_table_info *private = table->private;
977
978 /* We need atomic snapshot of counters: rest doesn't change
979 (other than comefrom, which userspace doesn't care
980 about). */
981 countersize = sizeof(struct xt_counters) * private->number;
982 counters = vmalloc_node(countersize, numa_node_id());
983
984 if (counters == NULL)
985 return ERR_PTR(-ENOMEM);
986
987 get_counters(private, counters);
988
989 return counters;
990 }
991
992 static int
993 copy_entries_to_user(unsigned int total_size,
994 const struct xt_table *table,
995 void __user *userptr)
996 {
997 unsigned int off, num;
998 const struct ip6t_entry *e;
999 struct xt_counters *counters;
1000 const struct xt_table_info *private = table->private;
1001 int ret = 0;
1002 const void *loc_cpu_entry;
1003
1004 counters = alloc_counters(table);
1005 if (IS_ERR(counters))
1006 return PTR_ERR(counters);
1007
1008 /* choose the copy that is on our node/cpu, ...
1009 * This choice is lazy (because current thread is
1010 * allowed to migrate to another cpu)
1011 */
1012 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1013 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1014 ret = -EFAULT;
1015 goto free_counters;
1016 }
1017
1018 /* FIXME: use iterator macros --RR */
1019 /* ... then go back and fix counters and names */
1020 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1021 unsigned int i;
1022 const struct ip6t_entry_match *m;
1023 const struct ip6t_entry_target *t;
1024
1025 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1026 if (copy_to_user(userptr + off
1027 + offsetof(struct ip6t_entry, counters),
1028 &counters[num],
1029 sizeof(counters[num])) != 0) {
1030 ret = -EFAULT;
1031 goto free_counters;
1032 }
1033
1034 for (i = sizeof(struct ip6t_entry);
1035 i < e->target_offset;
1036 i += m->u.match_size) {
1037 m = (void *)e + i;
1038
1039 if (copy_to_user(userptr + off + i
1040 + offsetof(struct ip6t_entry_match,
1041 u.user.name),
1042 m->u.kernel.match->name,
1043 strlen(m->u.kernel.match->name)+1)
1044 != 0) {
1045 ret = -EFAULT;
1046 goto free_counters;
1047 }
1048 }
1049
1050 t = ip6t_get_target_c(e);
1051 if (copy_to_user(userptr + off + e->target_offset
1052 + offsetof(struct ip6t_entry_target,
1053 u.user.name),
1054 t->u.kernel.target->name,
1055 strlen(t->u.kernel.target->name)+1) != 0) {
1056 ret = -EFAULT;
1057 goto free_counters;
1058 }
1059 }
1060
1061 free_counters:
1062 vfree(counters);
1063 return ret;
1064 }
1065
1066 #ifdef CONFIG_COMPAT
1067 static void compat_standard_from_user(void *dst, const void *src)
1068 {
1069 int v = *(compat_int_t *)src;
1070
1071 if (v > 0)
1072 v += xt_compat_calc_jump(AF_INET6, v);
1073 memcpy(dst, &v, sizeof(v));
1074 }
1075
1076 static int compat_standard_to_user(void __user *dst, const void *src)
1077 {
1078 compat_int_t cv = *(int *)src;
1079
1080 if (cv > 0)
1081 cv -= xt_compat_calc_jump(AF_INET6, cv);
1082 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1083 }
1084
1085 static int compat_calc_entry(const struct ip6t_entry *e,
1086 const struct xt_table_info *info,
1087 const void *base, struct xt_table_info *newinfo)
1088 {
1089 const struct xt_entry_match *ematch;
1090 const struct ip6t_entry_target *t;
1091 unsigned int entry_offset;
1092 int off, i, ret;
1093
1094 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1095 entry_offset = (void *)e - base;
1096 xt_ematch_foreach(ematch, e)
1097 off += xt_compat_match_offset(ematch->u.kernel.match);
1098 t = ip6t_get_target_c(e);
1099 off += xt_compat_target_offset(t->u.kernel.target);
1100 newinfo->size -= off;
1101 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1102 if (ret)
1103 return ret;
1104
1105 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1106 if (info->hook_entry[i] &&
1107 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1108 newinfo->hook_entry[i] -= off;
1109 if (info->underflow[i] &&
1110 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1111 newinfo->underflow[i] -= off;
1112 }
1113 return 0;
1114 }
1115
1116 static int compat_table_info(const struct xt_table_info *info,
1117 struct xt_table_info *newinfo)
1118 {
1119 struct ip6t_entry *iter;
1120 void *loc_cpu_entry;
1121 int ret;
1122
1123 if (!newinfo || !info)
1124 return -EINVAL;
1125
1126 /* we dont care about newinfo->entries[] */
1127 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1128 newinfo->initial_entries = 0;
1129 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1130 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1131 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1132 if (ret != 0)
1133 return ret;
1134 }
1135 return 0;
1136 }
1137 #endif
1138
1139 static int get_info(struct net *net, void __user *user,
1140 const int *len, int compat)
1141 {
1142 char name[IP6T_TABLE_MAXNAMELEN];
1143 struct xt_table *t;
1144 int ret;
1145
1146 if (*len != sizeof(struct ip6t_getinfo)) {
1147 duprintf("length %u != %zu\n", *len,
1148 sizeof(struct ip6t_getinfo));
1149 return -EINVAL;
1150 }
1151
1152 if (copy_from_user(name, user, sizeof(name)) != 0)
1153 return -EFAULT;
1154
1155 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1156 #ifdef CONFIG_COMPAT
1157 if (compat)
1158 xt_compat_lock(AF_INET6);
1159 #endif
1160 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1161 "ip6table_%s", name);
1162 if (t && !IS_ERR(t)) {
1163 struct ip6t_getinfo info;
1164 const struct xt_table_info *private = t->private;
1165 #ifdef CONFIG_COMPAT
1166 struct xt_table_info tmp;
1167
1168 if (compat) {
1169 ret = compat_table_info(private, &tmp);
1170 xt_compat_flush_offsets(AF_INET6);
1171 private = &tmp;
1172 }
1173 #endif
1174 info.valid_hooks = t->valid_hooks;
1175 memcpy(info.hook_entry, private->hook_entry,
1176 sizeof(info.hook_entry));
1177 memcpy(info.underflow, private->underflow,
1178 sizeof(info.underflow));
1179 info.num_entries = private->number;
1180 info.size = private->size;
1181 strcpy(info.name, name);
1182
1183 if (copy_to_user(user, &info, *len) != 0)
1184 ret = -EFAULT;
1185 else
1186 ret = 0;
1187
1188 xt_table_unlock(t);
1189 module_put(t->me);
1190 } else
1191 ret = t ? PTR_ERR(t) : -ENOENT;
1192 #ifdef CONFIG_COMPAT
1193 if (compat)
1194 xt_compat_unlock(AF_INET6);
1195 #endif
1196 return ret;
1197 }
1198
1199 static int
1200 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1201 const int *len)
1202 {
1203 int ret;
1204 struct ip6t_get_entries get;
1205 struct xt_table *t;
1206
1207 if (*len < sizeof(get)) {
1208 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1209 return -EINVAL;
1210 }
1211 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1212 return -EFAULT;
1213 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1214 duprintf("get_entries: %u != %zu\n",
1215 *len, sizeof(get) + get.size);
1216 return -EINVAL;
1217 }
1218
1219 t = xt_find_table_lock(net, AF_INET6, get.name);
1220 if (t && !IS_ERR(t)) {
1221 struct xt_table_info *private = t->private;
1222 duprintf("t->private->number = %u\n", private->number);
1223 if (get.size == private->size)
1224 ret = copy_entries_to_user(private->size,
1225 t, uptr->entrytable);
1226 else {
1227 duprintf("get_entries: I've got %u not %u!\n",
1228 private->size, get.size);
1229 ret = -EAGAIN;
1230 }
1231 module_put(t->me);
1232 xt_table_unlock(t);
1233 } else
1234 ret = t ? PTR_ERR(t) : -ENOENT;
1235
1236 return ret;
1237 }
1238
1239 static int
1240 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1241 struct xt_table_info *newinfo, unsigned int num_counters,
1242 void __user *counters_ptr)
1243 {
1244 int ret;
1245 struct xt_table *t;
1246 struct xt_table_info *oldinfo;
1247 struct xt_counters *counters;
1248 const void *loc_cpu_old_entry;
1249 struct ip6t_entry *iter;
1250
1251 ret = 0;
1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1253 numa_node_id());
1254 if (!counters) {
1255 ret = -ENOMEM;
1256 goto out;
1257 }
1258
1259 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1260 "ip6table_%s", name);
1261 if (!t || IS_ERR(t)) {
1262 ret = t ? PTR_ERR(t) : -ENOENT;
1263 goto free_newinfo_counters_untrans;
1264 }
1265
1266 /* You lied! */
1267 if (valid_hooks != t->valid_hooks) {
1268 duprintf("Valid hook crap: %08X vs %08X\n",
1269 valid_hooks, t->valid_hooks);
1270 ret = -EINVAL;
1271 goto put_module;
1272 }
1273
1274 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1275 if (!oldinfo)
1276 goto put_module;
1277
1278 /* Update module usage count based on number of rules */
1279 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1280 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1281 if ((oldinfo->number > oldinfo->initial_entries) ||
1282 (newinfo->number <= oldinfo->initial_entries))
1283 module_put(t->me);
1284 if ((oldinfo->number > oldinfo->initial_entries) &&
1285 (newinfo->number <= oldinfo->initial_entries))
1286 module_put(t->me);
1287
1288 /* Get the old counters, and synchronize with replace */
1289 get_counters(oldinfo, counters);
1290
1291 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1293 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1294 cleanup_entry(iter, net);
1295
1296 xt_free_table_info(oldinfo);
1297 if (copy_to_user(counters_ptr, counters,
1298 sizeof(struct xt_counters) * num_counters) != 0)
1299 ret = -EFAULT;
1300 vfree(counters);
1301 xt_table_unlock(t);
1302 return ret;
1303
1304 put_module:
1305 module_put(t->me);
1306 xt_table_unlock(t);
1307 free_newinfo_counters_untrans:
1308 vfree(counters);
1309 out:
1310 return ret;
1311 }
1312
1313 static int
1314 do_replace(struct net *net, const void __user *user, unsigned int len)
1315 {
1316 int ret;
1317 struct ip6t_replace tmp;
1318 struct xt_table_info *newinfo;
1319 void *loc_cpu_entry;
1320 struct ip6t_entry *iter;
1321
1322 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1323 return -EFAULT;
1324
1325 /* overflow check */
1326 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1327 return -ENOMEM;
1328
1329 newinfo = xt_alloc_table_info(tmp.size);
1330 if (!newinfo)
1331 return -ENOMEM;
1332
1333 /* choose the copy that is on our node/cpu */
1334 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1335 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1336 tmp.size) != 0) {
1337 ret = -EFAULT;
1338 goto free_newinfo;
1339 }
1340
1341 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1342 if (ret != 0)
1343 goto free_newinfo;
1344
1345 duprintf("ip_tables: Translated table\n");
1346
1347 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1348 tmp.num_counters, tmp.counters);
1349 if (ret)
1350 goto free_newinfo_untrans;
1351 return 0;
1352
1353 free_newinfo_untrans:
1354 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1355 cleanup_entry(iter, net);
1356 free_newinfo:
1357 xt_free_table_info(newinfo);
1358 return ret;
1359 }
1360
1361 static int
1362 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1363 int compat)
1364 {
1365 unsigned int i, curcpu;
1366 struct xt_counters_info tmp;
1367 struct xt_counters *paddc;
1368 unsigned int num_counters;
1369 char *name;
1370 int size;
1371 void *ptmp;
1372 struct xt_table *t;
1373 const struct xt_table_info *private;
1374 int ret = 0;
1375 const void *loc_cpu_entry;
1376 struct ip6t_entry *iter;
1377 #ifdef CONFIG_COMPAT
1378 struct compat_xt_counters_info compat_tmp;
1379
1380 if (compat) {
1381 ptmp = &compat_tmp;
1382 size = sizeof(struct compat_xt_counters_info);
1383 } else
1384 #endif
1385 {
1386 ptmp = &tmp;
1387 size = sizeof(struct xt_counters_info);
1388 }
1389
1390 if (copy_from_user(ptmp, user, size) != 0)
1391 return -EFAULT;
1392
1393 #ifdef CONFIG_COMPAT
1394 if (compat) {
1395 num_counters = compat_tmp.num_counters;
1396 name = compat_tmp.name;
1397 } else
1398 #endif
1399 {
1400 num_counters = tmp.num_counters;
1401 name = tmp.name;
1402 }
1403
1404 if (len != size + num_counters * sizeof(struct xt_counters))
1405 return -EINVAL;
1406
1407 paddc = vmalloc_node(len - size, numa_node_id());
1408 if (!paddc)
1409 return -ENOMEM;
1410
1411 if (copy_from_user(paddc, user + size, len - size) != 0) {
1412 ret = -EFAULT;
1413 goto free;
1414 }
1415
1416 t = xt_find_table_lock(net, AF_INET6, name);
1417 if (!t || IS_ERR(t)) {
1418 ret = t ? PTR_ERR(t) : -ENOENT;
1419 goto free;
1420 }
1421
1422
1423 local_bh_disable();
1424 private = t->private;
1425 if (private->number != num_counters) {
1426 ret = -EINVAL;
1427 goto unlock_up_free;
1428 }
1429
1430 i = 0;
1431 /* Choose the copy that is on our node */
1432 curcpu = smp_processor_id();
1433 xt_info_wrlock(curcpu);
1434 loc_cpu_entry = private->entries[curcpu];
1435 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1436 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1437 ++i;
1438 }
1439 xt_info_wrunlock(curcpu);
1440
1441 unlock_up_free:
1442 local_bh_enable();
1443 xt_table_unlock(t);
1444 module_put(t->me);
1445 free:
1446 vfree(paddc);
1447
1448 return ret;
1449 }
1450
1451 #ifdef CONFIG_COMPAT
1452 struct compat_ip6t_replace {
1453 char name[IP6T_TABLE_MAXNAMELEN];
1454 u32 valid_hooks;
1455 u32 num_entries;
1456 u32 size;
1457 u32 hook_entry[NF_INET_NUMHOOKS];
1458 u32 underflow[NF_INET_NUMHOOKS];
1459 u32 num_counters;
1460 compat_uptr_t counters; /* struct ip6t_counters * */
1461 struct compat_ip6t_entry entries[0];
1462 };
1463
1464 static int
1465 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1466 unsigned int *size, struct xt_counters *counters,
1467 unsigned int i)
1468 {
1469 struct ip6t_entry_target *t;
1470 struct compat_ip6t_entry __user *ce;
1471 u_int16_t target_offset, next_offset;
1472 compat_uint_t origsize;
1473 const struct xt_entry_match *ematch;
1474 int ret = 0;
1475
1476 origsize = *size;
1477 ce = (struct compat_ip6t_entry __user *)*dstptr;
1478 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1479 copy_to_user(&ce->counters, &counters[i],
1480 sizeof(counters[i])) != 0)
1481 return -EFAULT;
1482
1483 *dstptr += sizeof(struct compat_ip6t_entry);
1484 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1485
1486 xt_ematch_foreach(ematch, e) {
1487 ret = xt_compat_match_to_user(ematch, dstptr, size);
1488 if (ret != 0)
1489 return ret;
1490 }
1491 target_offset = e->target_offset - (origsize - *size);
1492 t = ip6t_get_target(e);
1493 ret = xt_compat_target_to_user(t, dstptr, size);
1494 if (ret)
1495 return ret;
1496 next_offset = e->next_offset - (origsize - *size);
1497 if (put_user(target_offset, &ce->target_offset) != 0 ||
1498 put_user(next_offset, &ce->next_offset) != 0)
1499 return -EFAULT;
1500 return 0;
1501 }
1502
1503 static int
1504 compat_find_calc_match(struct ip6t_entry_match *m,
1505 const char *name,
1506 const struct ip6t_ip6 *ipv6,
1507 unsigned int hookmask,
1508 int *size)
1509 {
1510 struct xt_match *match;
1511
1512 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1513 m->u.user.revision),
1514 "ip6t_%s", m->u.user.name);
1515 if (IS_ERR(match) || !match) {
1516 duprintf("compat_check_calc_match: `%s' not found\n",
1517 m->u.user.name);
1518 return match ? PTR_ERR(match) : -ENOENT;
1519 }
1520 m->u.kernel.match = match;
1521 *size += xt_compat_match_offset(match);
1522 return 0;
1523 }
1524
1525 static void compat_release_entry(struct compat_ip6t_entry *e)
1526 {
1527 struct ip6t_entry_target *t;
1528 struct xt_entry_match *ematch;
1529
1530 /* Cleanup all matches */
1531 xt_ematch_foreach(ematch, e)
1532 module_put(ematch->u.kernel.match->me);
1533 t = compat_ip6t_get_target(e);
1534 module_put(t->u.kernel.target->me);
1535 }
1536
1537 static int
1538 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1539 struct xt_table_info *newinfo,
1540 unsigned int *size,
1541 const unsigned char *base,
1542 const unsigned char *limit,
1543 const unsigned int *hook_entries,
1544 const unsigned int *underflows,
1545 const char *name)
1546 {
1547 struct xt_entry_match *ematch;
1548 struct ip6t_entry_target *t;
1549 struct xt_target *target;
1550 unsigned int entry_offset;
1551 unsigned int j;
1552 int ret, off, h;
1553
1554 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1555 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1556 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1557 duprintf("Bad offset %p, limit = %p\n", e, limit);
1558 return -EINVAL;
1559 }
1560
1561 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1562 sizeof(struct compat_xt_entry_target)) {
1563 duprintf("checking: element %p size %u\n",
1564 e, e->next_offset);
1565 return -EINVAL;
1566 }
1567
1568 /* For purposes of check_entry casting the compat entry is fine */
1569 ret = check_entry((struct ip6t_entry *)e, name);
1570 if (ret)
1571 return ret;
1572
1573 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1574 entry_offset = (void *)e - (void *)base;
1575 j = 0;
1576 xt_ematch_foreach(ematch, e) {
1577 ret = compat_find_calc_match(ematch, name,
1578 &e->ipv6, e->comefrom, &off);
1579 if (ret != 0)
1580 goto release_matches;
1581 ++j;
1582 }
1583
1584 t = compat_ip6t_get_target(e);
1585 target = try_then_request_module(xt_find_target(AF_INET6,
1586 t->u.user.name,
1587 t->u.user.revision),
1588 "ip6t_%s", t->u.user.name);
1589 if (IS_ERR(target) || !target) {
1590 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1591 t->u.user.name);
1592 ret = target ? PTR_ERR(target) : -ENOENT;
1593 goto release_matches;
1594 }
1595 t->u.kernel.target = target;
1596
1597 off += xt_compat_target_offset(target);
1598 *size += off;
1599 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1600 if (ret)
1601 goto out;
1602
1603 /* Check hooks & underflows */
1604 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1605 if ((unsigned char *)e - base == hook_entries[h])
1606 newinfo->hook_entry[h] = hook_entries[h];
1607 if ((unsigned char *)e - base == underflows[h])
1608 newinfo->underflow[h] = underflows[h];
1609 }
1610
1611 /* Clear counters and comefrom */
1612 memset(&e->counters, 0, sizeof(e->counters));
1613 e->comefrom = 0;
1614 return 0;
1615
1616 out:
1617 module_put(t->u.kernel.target->me);
1618 release_matches:
1619 xt_ematch_foreach(ematch, e) {
1620 if (j-- == 0)
1621 break;
1622 module_put(ematch->u.kernel.match->me);
1623 }
1624 return ret;
1625 }
1626
1627 static int
1628 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1629 unsigned int *size, const char *name,
1630 struct xt_table_info *newinfo, unsigned char *base)
1631 {
1632 struct ip6t_entry_target *t;
1633 struct xt_target *target;
1634 struct ip6t_entry *de;
1635 unsigned int origsize;
1636 int ret, h;
1637 struct xt_entry_match *ematch;
1638
1639 ret = 0;
1640 origsize = *size;
1641 de = (struct ip6t_entry *)*dstptr;
1642 memcpy(de, e, sizeof(struct ip6t_entry));
1643 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1644
1645 *dstptr += sizeof(struct ip6t_entry);
1646 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1647
1648 xt_ematch_foreach(ematch, e) {
1649 ret = xt_compat_match_from_user(ematch, dstptr, size);
1650 if (ret != 0)
1651 return ret;
1652 }
1653 de->target_offset = e->target_offset - (origsize - *size);
1654 t = compat_ip6t_get_target(e);
1655 target = t->u.kernel.target;
1656 xt_compat_target_from_user(t, dstptr, size);
1657
1658 de->next_offset = e->next_offset - (origsize - *size);
1659 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1660 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1661 newinfo->hook_entry[h] -= origsize - *size;
1662 if ((unsigned char *)de - base < newinfo->underflow[h])
1663 newinfo->underflow[h] -= origsize - *size;
1664 }
1665 return ret;
1666 }
1667
1668 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1669 const char *name)
1670 {
1671 unsigned int j;
1672 int ret = 0;
1673 struct xt_mtchk_param mtpar;
1674 struct xt_entry_match *ematch;
1675
1676 j = 0;
1677 mtpar.net = net;
1678 mtpar.table = name;
1679 mtpar.entryinfo = &e->ipv6;
1680 mtpar.hook_mask = e->comefrom;
1681 mtpar.family = NFPROTO_IPV6;
1682 xt_ematch_foreach(ematch, e) {
1683 ret = check_match(ematch, &mtpar);
1684 if (ret != 0)
1685 goto cleanup_matches;
1686 ++j;
1687 }
1688
1689 ret = check_target(e, net, name);
1690 if (ret)
1691 goto cleanup_matches;
1692 return 0;
1693
1694 cleanup_matches:
1695 xt_ematch_foreach(ematch, e) {
1696 if (j-- == 0)
1697 break;
1698 cleanup_match(ematch, net);
1699 }
1700 return ret;
1701 }
1702
1703 static int
1704 translate_compat_table(struct net *net,
1705 const char *name,
1706 unsigned int valid_hooks,
1707 struct xt_table_info **pinfo,
1708 void **pentry0,
1709 unsigned int total_size,
1710 unsigned int number,
1711 unsigned int *hook_entries,
1712 unsigned int *underflows)
1713 {
1714 unsigned int i, j;
1715 struct xt_table_info *newinfo, *info;
1716 void *pos, *entry0, *entry1;
1717 struct compat_ip6t_entry *iter0;
1718 struct ip6t_entry *iter1;
1719 unsigned int size;
1720 int ret = 0;
1721
1722 info = *pinfo;
1723 entry0 = *pentry0;
1724 size = total_size;
1725 info->number = number;
1726
1727 /* Init all hooks to impossible value. */
1728 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1729 info->hook_entry[i] = 0xFFFFFFFF;
1730 info->underflow[i] = 0xFFFFFFFF;
1731 }
1732
1733 duprintf("translate_compat_table: size %u\n", info->size);
1734 j = 0;
1735 xt_compat_lock(AF_INET6);
1736 /* Walk through entries, checking offsets. */
1737 xt_entry_foreach(iter0, entry0, total_size) {
1738 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1739 entry0,
1740 entry0 + total_size,
1741 hook_entries,
1742 underflows,
1743 name);
1744 if (ret != 0)
1745 goto out_unlock;
1746 ++j;
1747 }
1748
1749 ret = -EINVAL;
1750 if (j != number) {
1751 duprintf("translate_compat_table: %u not %u entries\n",
1752 j, number);
1753 goto out_unlock;
1754 }
1755
1756 /* Check hooks all assigned */
1757 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1758 /* Only hooks which are valid */
1759 if (!(valid_hooks & (1 << i)))
1760 continue;
1761 if (info->hook_entry[i] == 0xFFFFFFFF) {
1762 duprintf("Invalid hook entry %u %u\n",
1763 i, hook_entries[i]);
1764 goto out_unlock;
1765 }
1766 if (info->underflow[i] == 0xFFFFFFFF) {
1767 duprintf("Invalid underflow %u %u\n",
1768 i, underflows[i]);
1769 goto out_unlock;
1770 }
1771 }
1772
1773 ret = -ENOMEM;
1774 newinfo = xt_alloc_table_info(size);
1775 if (!newinfo)
1776 goto out_unlock;
1777
1778 newinfo->number = number;
1779 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1780 newinfo->hook_entry[i] = info->hook_entry[i];
1781 newinfo->underflow[i] = info->underflow[i];
1782 }
1783 entry1 = newinfo->entries[raw_smp_processor_id()];
1784 pos = entry1;
1785 size = total_size;
1786 xt_entry_foreach(iter0, entry0, total_size) {
1787 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1788 name, newinfo, entry1);
1789 if (ret != 0)
1790 break;
1791 }
1792 xt_compat_flush_offsets(AF_INET6);
1793 xt_compat_unlock(AF_INET6);
1794 if (ret)
1795 goto free_newinfo;
1796
1797 ret = -ELOOP;
1798 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1799 goto free_newinfo;
1800
1801 i = 0;
1802 xt_entry_foreach(iter1, entry1, newinfo->size) {
1803 ret = compat_check_entry(iter1, net, name);
1804 if (ret != 0)
1805 break;
1806 ++i;
1807 }
1808 if (ret) {
1809 /*
1810 * The first i matches need cleanup_entry (calls ->destroy)
1811 * because they had called ->check already. The other j-i
1812 * entries need only release.
1813 */
1814 int skip = i;
1815 j -= i;
1816 xt_entry_foreach(iter0, entry0, newinfo->size) {
1817 if (skip-- > 0)
1818 continue;
1819 if (j-- == 0)
1820 break;
1821 compat_release_entry(iter0);
1822 }
1823 xt_entry_foreach(iter1, entry1, newinfo->size) {
1824 if (i-- == 0)
1825 break;
1826 cleanup_entry(iter1, net);
1827 }
1828 xt_free_table_info(newinfo);
1829 return ret;
1830 }
1831
1832 /* And one copy for every other CPU */
1833 for_each_possible_cpu(i)
1834 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1835 memcpy(newinfo->entries[i], entry1, newinfo->size);
1836
1837 *pinfo = newinfo;
1838 *pentry0 = entry1;
1839 xt_free_table_info(info);
1840 return 0;
1841
1842 free_newinfo:
1843 xt_free_table_info(newinfo);
1844 out:
1845 xt_entry_foreach(iter0, entry0, total_size) {
1846 if (j-- == 0)
1847 break;
1848 compat_release_entry(iter0);
1849 }
1850 return ret;
1851 out_unlock:
1852 xt_compat_flush_offsets(AF_INET6);
1853 xt_compat_unlock(AF_INET6);
1854 goto out;
1855 }
1856
1857 static int
1858 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1859 {
1860 int ret;
1861 struct compat_ip6t_replace tmp;
1862 struct xt_table_info *newinfo;
1863 void *loc_cpu_entry;
1864 struct ip6t_entry *iter;
1865
1866 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1867 return -EFAULT;
1868
1869 /* overflow check */
1870 if (tmp.size >= INT_MAX / num_possible_cpus())
1871 return -ENOMEM;
1872 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1873 return -ENOMEM;
1874
1875 newinfo = xt_alloc_table_info(tmp.size);
1876 if (!newinfo)
1877 return -ENOMEM;
1878
1879 /* choose the copy that is on our node/cpu */
1880 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1881 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1882 tmp.size) != 0) {
1883 ret = -EFAULT;
1884 goto free_newinfo;
1885 }
1886
1887 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1888 &newinfo, &loc_cpu_entry, tmp.size,
1889 tmp.num_entries, tmp.hook_entry,
1890 tmp.underflow);
1891 if (ret != 0)
1892 goto free_newinfo;
1893
1894 duprintf("compat_do_replace: Translated table\n");
1895
1896 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1897 tmp.num_counters, compat_ptr(tmp.counters));
1898 if (ret)
1899 goto free_newinfo_untrans;
1900 return 0;
1901
1902 free_newinfo_untrans:
1903 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1904 cleanup_entry(iter, net);
1905 free_newinfo:
1906 xt_free_table_info(newinfo);
1907 return ret;
1908 }
1909
1910 static int
1911 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1912 unsigned int len)
1913 {
1914 int ret;
1915
1916 if (!capable(CAP_NET_ADMIN))
1917 return -EPERM;
1918
1919 switch (cmd) {
1920 case IP6T_SO_SET_REPLACE:
1921 ret = compat_do_replace(sock_net(sk), user, len);
1922 break;
1923
1924 case IP6T_SO_SET_ADD_COUNTERS:
1925 ret = do_add_counters(sock_net(sk), user, len, 1);
1926 break;
1927
1928 default:
1929 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1930 ret = -EINVAL;
1931 }
1932
1933 return ret;
1934 }
1935
1936 struct compat_ip6t_get_entries {
1937 char name[IP6T_TABLE_MAXNAMELEN];
1938 compat_uint_t size;
1939 struct compat_ip6t_entry entrytable[0];
1940 };
1941
1942 static int
1943 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1944 void __user *userptr)
1945 {
1946 struct xt_counters *counters;
1947 const struct xt_table_info *private = table->private;
1948 void __user *pos;
1949 unsigned int size;
1950 int ret = 0;
1951 const void *loc_cpu_entry;
1952 unsigned int i = 0;
1953 struct ip6t_entry *iter;
1954
1955 counters = alloc_counters(table);
1956 if (IS_ERR(counters))
1957 return PTR_ERR(counters);
1958
1959 /* choose the copy that is on our node/cpu, ...
1960 * This choice is lazy (because current thread is
1961 * allowed to migrate to another cpu)
1962 */
1963 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1964 pos = userptr;
1965 size = total_size;
1966 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1967 ret = compat_copy_entry_to_user(iter, &pos,
1968 &size, counters, i++);
1969 if (ret != 0)
1970 break;
1971 }
1972
1973 vfree(counters);
1974 return ret;
1975 }
1976
1977 static int
1978 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1979 int *len)
1980 {
1981 int ret;
1982 struct compat_ip6t_get_entries get;
1983 struct xt_table *t;
1984
1985 if (*len < sizeof(get)) {
1986 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1987 return -EINVAL;
1988 }
1989
1990 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1991 return -EFAULT;
1992
1993 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1994 duprintf("compat_get_entries: %u != %zu\n",
1995 *len, sizeof(get) + get.size);
1996 return -EINVAL;
1997 }
1998
1999 xt_compat_lock(AF_INET6);
2000 t = xt_find_table_lock(net, AF_INET6, get.name);
2001 if (t && !IS_ERR(t)) {
2002 const struct xt_table_info *private = t->private;
2003 struct xt_table_info info;
2004 duprintf("t->private->number = %u\n", private->number);
2005 ret = compat_table_info(private, &info);
2006 if (!ret && get.size == info.size) {
2007 ret = compat_copy_entries_to_user(private->size,
2008 t, uptr->entrytable);
2009 } else if (!ret) {
2010 duprintf("compat_get_entries: I've got %u not %u!\n",
2011 private->size, get.size);
2012 ret = -EAGAIN;
2013 }
2014 xt_compat_flush_offsets(AF_INET6);
2015 module_put(t->me);
2016 xt_table_unlock(t);
2017 } else
2018 ret = t ? PTR_ERR(t) : -ENOENT;
2019
2020 xt_compat_unlock(AF_INET6);
2021 return ret;
2022 }
2023
2024 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2025
2026 static int
2027 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2028 {
2029 int ret;
2030
2031 if (!capable(CAP_NET_ADMIN))
2032 return -EPERM;
2033
2034 switch (cmd) {
2035 case IP6T_SO_GET_INFO:
2036 ret = get_info(sock_net(sk), user, len, 1);
2037 break;
2038 case IP6T_SO_GET_ENTRIES:
2039 ret = compat_get_entries(sock_net(sk), user, len);
2040 break;
2041 default:
2042 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2043 }
2044 return ret;
2045 }
2046 #endif
2047
2048 static int
2049 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2050 {
2051 int ret;
2052
2053 if (!capable(CAP_NET_ADMIN))
2054 return -EPERM;
2055
2056 switch (cmd) {
2057 case IP6T_SO_SET_REPLACE:
2058 ret = do_replace(sock_net(sk), user, len);
2059 break;
2060
2061 case IP6T_SO_SET_ADD_COUNTERS:
2062 ret = do_add_counters(sock_net(sk), user, len, 0);
2063 break;
2064
2065 default:
2066 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2067 ret = -EINVAL;
2068 }
2069
2070 return ret;
2071 }
2072
2073 static int
2074 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2075 {
2076 int ret;
2077
2078 if (!capable(CAP_NET_ADMIN))
2079 return -EPERM;
2080
2081 switch (cmd) {
2082 case IP6T_SO_GET_INFO:
2083 ret = get_info(sock_net(sk), user, len, 0);
2084 break;
2085
2086 case IP6T_SO_GET_ENTRIES:
2087 ret = get_entries(sock_net(sk), user, len);
2088 break;
2089
2090 case IP6T_SO_GET_REVISION_MATCH:
2091 case IP6T_SO_GET_REVISION_TARGET: {
2092 struct ip6t_get_revision rev;
2093 int target;
2094
2095 if (*len != sizeof(rev)) {
2096 ret = -EINVAL;
2097 break;
2098 }
2099 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2100 ret = -EFAULT;
2101 break;
2102 }
2103
2104 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2105 target = 1;
2106 else
2107 target = 0;
2108
2109 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2110 rev.revision,
2111 target, &ret),
2112 "ip6t_%s", rev.name);
2113 break;
2114 }
2115
2116 default:
2117 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2118 ret = -EINVAL;
2119 }
2120
2121 return ret;
2122 }
2123
2124 struct xt_table *ip6t_register_table(struct net *net,
2125 const struct xt_table *table,
2126 const struct ip6t_replace *repl)
2127 {
2128 int ret;
2129 struct xt_table_info *newinfo;
2130 struct xt_table_info bootstrap
2131 = { 0, 0, 0, { 0 }, { 0 }, { } };
2132 void *loc_cpu_entry;
2133 struct xt_table *new_table;
2134
2135 newinfo = xt_alloc_table_info(repl->size);
2136 if (!newinfo) {
2137 ret = -ENOMEM;
2138 goto out;
2139 }
2140
2141 /* choose the copy on our node/cpu, but dont care about preemption */
2142 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2143 memcpy(loc_cpu_entry, repl->entries, repl->size);
2144
2145 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2146 if (ret != 0)
2147 goto out_free;
2148
2149 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2150 if (IS_ERR(new_table)) {
2151 ret = PTR_ERR(new_table);
2152 goto out_free;
2153 }
2154 return new_table;
2155
2156 out_free:
2157 xt_free_table_info(newinfo);
2158 out:
2159 return ERR_PTR(ret);
2160 }
2161
2162 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2163 {
2164 struct xt_table_info *private;
2165 void *loc_cpu_entry;
2166 struct module *table_owner = table->me;
2167 struct ip6t_entry *iter;
2168
2169 private = xt_unregister_table(table);
2170
2171 /* Decrease module usage counts and free resources */
2172 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2173 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2174 cleanup_entry(iter, net);
2175 if (private->number > private->initial_entries)
2176 module_put(table_owner);
2177 xt_free_table_info(private);
2178 }
2179
2180 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2181 static inline bool
2182 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2183 u_int8_t type, u_int8_t code,
2184 bool invert)
2185 {
2186 return (type == test_type && code >= min_code && code <= max_code)
2187 ^ invert;
2188 }
2189
2190 static bool
2191 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2192 {
2193 const struct icmp6hdr *ic;
2194 struct icmp6hdr _icmph;
2195 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2196
2197 /* Must not be a fragment. */
2198 if (par->fragoff != 0)
2199 return false;
2200
2201 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2202 if (ic == NULL) {
2203 /* We've been asked to examine this packet, and we
2204 * can't. Hence, no choice but to drop.
2205 */
2206 duprintf("Dropping evil ICMP tinygram.\n");
2207 *par->hotdrop = true;
2208 return false;
2209 }
2210
2211 return icmp6_type_code_match(icmpinfo->type,
2212 icmpinfo->code[0],
2213 icmpinfo->code[1],
2214 ic->icmp6_type, ic->icmp6_code,
2215 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2216 }
2217
2218 /* Called when user tries to insert an entry of this type. */
2219 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2220 {
2221 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2222
2223 /* Must specify no unknown invflags */
2224 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2225 }
2226
2227 /* The built-in targets: standard (NULL) and error. */
2228 static struct xt_target ip6t_standard_target __read_mostly = {
2229 .name = IP6T_STANDARD_TARGET,
2230 .targetsize = sizeof(int),
2231 .family = NFPROTO_IPV6,
2232 #ifdef CONFIG_COMPAT
2233 .compatsize = sizeof(compat_int_t),
2234 .compat_from_user = compat_standard_from_user,
2235 .compat_to_user = compat_standard_to_user,
2236 #endif
2237 };
2238
2239 static struct xt_target ip6t_error_target __read_mostly = {
2240 .name = IP6T_ERROR_TARGET,
2241 .target = ip6t_error,
2242 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2243 .family = NFPROTO_IPV6,
2244 };
2245
2246 static struct nf_sockopt_ops ip6t_sockopts = {
2247 .pf = PF_INET6,
2248 .set_optmin = IP6T_BASE_CTL,
2249 .set_optmax = IP6T_SO_SET_MAX+1,
2250 .set = do_ip6t_set_ctl,
2251 #ifdef CONFIG_COMPAT
2252 .compat_set = compat_do_ip6t_set_ctl,
2253 #endif
2254 .get_optmin = IP6T_BASE_CTL,
2255 .get_optmax = IP6T_SO_GET_MAX+1,
2256 .get = do_ip6t_get_ctl,
2257 #ifdef CONFIG_COMPAT
2258 .compat_get = compat_do_ip6t_get_ctl,
2259 #endif
2260 .owner = THIS_MODULE,
2261 };
2262
2263 static struct xt_match icmp6_matchstruct __read_mostly = {
2264 .name = "icmp6",
2265 .match = icmp6_match,
2266 .matchsize = sizeof(struct ip6t_icmp),
2267 .checkentry = icmp6_checkentry,
2268 .proto = IPPROTO_ICMPV6,
2269 .family = NFPROTO_IPV6,
2270 };
2271
2272 static int __net_init ip6_tables_net_init(struct net *net)
2273 {
2274 return xt_proto_init(net, NFPROTO_IPV6);
2275 }
2276
2277 static void __net_exit ip6_tables_net_exit(struct net *net)
2278 {
2279 xt_proto_fini(net, NFPROTO_IPV6);
2280 }
2281
2282 static struct pernet_operations ip6_tables_net_ops = {
2283 .init = ip6_tables_net_init,
2284 .exit = ip6_tables_net_exit,
2285 };
2286
2287 static int __init ip6_tables_init(void)
2288 {
2289 int ret;
2290
2291 ret = register_pernet_subsys(&ip6_tables_net_ops);
2292 if (ret < 0)
2293 goto err1;
2294
2295 /* Noone else will be downing sem now, so we won't sleep */
2296 ret = xt_register_target(&ip6t_standard_target);
2297 if (ret < 0)
2298 goto err2;
2299 ret = xt_register_target(&ip6t_error_target);
2300 if (ret < 0)
2301 goto err3;
2302 ret = xt_register_match(&icmp6_matchstruct);
2303 if (ret < 0)
2304 goto err4;
2305
2306 /* Register setsockopt */
2307 ret = nf_register_sockopt(&ip6t_sockopts);
2308 if (ret < 0)
2309 goto err5;
2310
2311 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2312 return 0;
2313
2314 err5:
2315 xt_unregister_match(&icmp6_matchstruct);
2316 err4:
2317 xt_unregister_target(&ip6t_error_target);
2318 err3:
2319 xt_unregister_target(&ip6t_standard_target);
2320 err2:
2321 unregister_pernet_subsys(&ip6_tables_net_ops);
2322 err1:
2323 return ret;
2324 }
2325
2326 static void __exit ip6_tables_fini(void)
2327 {
2328 nf_unregister_sockopt(&ip6t_sockopts);
2329
2330 xt_unregister_match(&icmp6_matchstruct);
2331 xt_unregister_target(&ip6t_error_target);
2332 xt_unregister_target(&ip6t_standard_target);
2333
2334 unregister_pernet_subsys(&ip6_tables_net_ops);
2335 }
2336
2337 /*
2338 * find the offset to specified header or the protocol number of last header
2339 * if target < 0. "last header" is transport protocol header, ESP, or
2340 * "No next header".
2341 *
2342 * If target header is found, its offset is set in *offset and return protocol
2343 * number. Otherwise, return -1.
2344 *
2345 * If the first fragment doesn't contain the final protocol header or
2346 * NEXTHDR_NONE it is considered invalid.
2347 *
2348 * Note that non-1st fragment is special case that "the protocol number
2349 * of last header" is "next header" field in Fragment header. In this case,
2350 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2351 * isn't NULL.
2352 *
2353 */
2354 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2355 int target, unsigned short *fragoff)
2356 {
2357 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2358 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2359 unsigned int len = skb->len - start;
2360
2361 if (fragoff)
2362 *fragoff = 0;
2363
2364 while (nexthdr != target) {
2365 struct ipv6_opt_hdr _hdr, *hp;
2366 unsigned int hdrlen;
2367
2368 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2369 if (target < 0)
2370 break;
2371 return -ENOENT;
2372 }
2373
2374 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2375 if (hp == NULL)
2376 return -EBADMSG;
2377 if (nexthdr == NEXTHDR_FRAGMENT) {
2378 unsigned short _frag_off;
2379 __be16 *fp;
2380 fp = skb_header_pointer(skb,
2381 start+offsetof(struct frag_hdr,
2382 frag_off),
2383 sizeof(_frag_off),
2384 &_frag_off);
2385 if (fp == NULL)
2386 return -EBADMSG;
2387
2388 _frag_off = ntohs(*fp) & ~0x7;
2389 if (_frag_off) {
2390 if (target < 0 &&
2391 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2392 hp->nexthdr == NEXTHDR_NONE)) {
2393 if (fragoff)
2394 *fragoff = _frag_off;
2395 return hp->nexthdr;
2396 }
2397 return -ENOENT;
2398 }
2399 hdrlen = 8;
2400 } else if (nexthdr == NEXTHDR_AUTH)
2401 hdrlen = (hp->hdrlen + 2) << 2;
2402 else
2403 hdrlen = ipv6_optlen(hp);
2404
2405 nexthdr = hp->nexthdr;
2406 len -= hdrlen;
2407 start += hdrlen;
2408 }
2409
2410 *offset = start;
2411 return nexthdr;
2412 }
2413
2414 EXPORT_SYMBOL(ip6t_register_table);
2415 EXPORT_SYMBOL(ip6t_unregister_table);
2416 EXPORT_SYMBOL(ip6t_do_table);
2417 EXPORT_SYMBOL(ip6t_ext_hdr);
2418 EXPORT_SYMBOL(ipv6_find_hdr);
2419
2420 module_init(ip6_tables_init);
2421 module_exit(ip6_tables_fini);