]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
netfilter: xtables: change xt_match.checkentry return type
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
56 do { \
57 if (!(x)) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
60 } while(0)
61 #else
62 #define IP_NF_ASSERT(x)
63 #endif
64
65 #if 0
66 /* All the better to debug you with... */
67 #define static
68 #define inline
69 #endif
70
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 {
73 return xt_alloc_initial_table(ip6t, IP6T);
74 }
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
77 /*
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
83
84 Hence the start of any table is given by get_table() below. */
85
86 /* Check for an extension */
87 int
88 ip6t_ext_hdr(u8 nexthdr)
89 {
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
97 }
98
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
101 static inline bool
102 ip6_packet_match(const struct sk_buff *skb,
103 const char *indev,
104 const char *outdev,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
108 {
109 unsigned long ret;
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
111
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
113
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
119 /*
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
126 return false;
127 }
128
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
130
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
135 return false;
136 }
137
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
139
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
144 return false;
145 }
146
147 /* ... might want to do something with class and flowlabel here ... */
148
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
151 int protohdr;
152 unsigned short _frag_off;
153
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
155 if (protohdr < 0) {
156 if (_frag_off == 0)
157 *hotdrop = true;
158 return false;
159 }
160 *fragoff = _frag_off;
161
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
163 protohdr,
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
165 ip6info->proto);
166
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
169 return false;
170 }
171 return true;
172 }
173
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
177 return false;
178 }
179 return true;
180 }
181
182 /* should be ip6 safe */
183 static bool
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
185 {
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
189 return false;
190 }
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
194 return false;
195 }
196 return true;
197 }
198
199 static unsigned int
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 {
202 if (net_ratelimit())
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
204
205 return NF_DROP;
206 }
207
208 /* Performance critical - called for every packet */
209 static inline bool
210 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
211 struct xt_match_param *par)
212 {
213 par->match = m->u.kernel.match;
214 par->matchinfo = m->data;
215
216 /* Stop iteration if it doesn't match */
217 if (!m->u.kernel.match->match(skb, par))
218 return true;
219 else
220 return false;
221 }
222
223 static inline struct ip6t_entry *
224 get_entry(const void *base, unsigned int offset)
225 {
226 return (struct ip6t_entry *)(base + offset);
227 }
228
229 /* All zeroes == unconditional rule. */
230 /* Mildly perf critical (only if packet tracing is on) */
231 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
232 {
233 static const struct ip6t_ip6 uncond;
234
235 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
236 }
237
238 static inline const struct ip6t_entry_target *
239 ip6t_get_target_c(const struct ip6t_entry *e)
240 {
241 return ip6t_get_target((struct ip6t_entry *)e);
242 }
243
244 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
245 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
246 /* This cries for unification! */
247 static const char *const hooknames[] = {
248 [NF_INET_PRE_ROUTING] = "PREROUTING",
249 [NF_INET_LOCAL_IN] = "INPUT",
250 [NF_INET_FORWARD] = "FORWARD",
251 [NF_INET_LOCAL_OUT] = "OUTPUT",
252 [NF_INET_POST_ROUTING] = "POSTROUTING",
253 };
254
255 enum nf_ip_trace_comments {
256 NF_IP6_TRACE_COMMENT_RULE,
257 NF_IP6_TRACE_COMMENT_RETURN,
258 NF_IP6_TRACE_COMMENT_POLICY,
259 };
260
261 static const char *const comments[] = {
262 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
263 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
264 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
265 };
266
267 static struct nf_loginfo trace_loginfo = {
268 .type = NF_LOG_TYPE_LOG,
269 .u = {
270 .log = {
271 .level = 4,
272 .logflags = NF_LOG_MASK,
273 },
274 },
275 };
276
277 /* Mildly perf critical (only if packet tracing is on) */
278 static inline int
279 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
280 const char *hookname, const char **chainname,
281 const char **comment, unsigned int *rulenum)
282 {
283 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
284
285 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
286 /* Head of user chain: ERROR target with chainname */
287 *chainname = t->target.data;
288 (*rulenum) = 0;
289 } else if (s == e) {
290 (*rulenum)++;
291
292 if (s->target_offset == sizeof(struct ip6t_entry) &&
293 strcmp(t->target.u.kernel.target->name,
294 IP6T_STANDARD_TARGET) == 0 &&
295 t->verdict < 0 &&
296 unconditional(&s->ipv6)) {
297 /* Tail of chains: STANDARD target (return/policy) */
298 *comment = *chainname == hookname
299 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
300 : comments[NF_IP6_TRACE_COMMENT_RETURN];
301 }
302 return 1;
303 } else
304 (*rulenum)++;
305
306 return 0;
307 }
308
309 static void trace_packet(const struct sk_buff *skb,
310 unsigned int hook,
311 const struct net_device *in,
312 const struct net_device *out,
313 const char *tablename,
314 const struct xt_table_info *private,
315 const struct ip6t_entry *e)
316 {
317 const void *table_base;
318 const struct ip6t_entry *root;
319 const char *hookname, *chainname, *comment;
320 const struct ip6t_entry *iter;
321 unsigned int rulenum = 0;
322
323 table_base = private->entries[smp_processor_id()];
324 root = get_entry(table_base, private->hook_entry[hook]);
325
326 hookname = chainname = hooknames[hook];
327 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
328
329 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
330 if (get_chainname_rulenum(iter, e, hookname,
331 &chainname, &comment, &rulenum) != 0)
332 break;
333
334 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
335 "TRACE: %s:%s:%s:%u ",
336 tablename, chainname, comment, rulenum);
337 }
338 #endif
339
340 static inline __pure struct ip6t_entry *
341 ip6t_next_entry(const struct ip6t_entry *entry)
342 {
343 return (void *)entry + entry->next_offset;
344 }
345
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
347 unsigned int
348 ip6t_do_table(struct sk_buff *skb,
349 unsigned int hook,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
353 {
354 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
355
356 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
357 bool hotdrop = false;
358 /* Initializing verdict to NF_DROP keeps gcc happy. */
359 unsigned int verdict = NF_DROP;
360 const char *indev, *outdev;
361 const void *table_base;
362 struct ip6t_entry *e, *back;
363 const struct xt_table_info *private;
364 struct xt_match_param mtpar;
365 struct xt_target_param tgpar;
366
367 /* Initialization */
368 indev = in ? in->name : nulldevname;
369 outdev = out ? out->name : nulldevname;
370 /* We handle fragments by dealing with the first fragment as
371 * if it was a normal packet. All other fragments are treated
372 * normally, except that they will NEVER match rules that ask
373 * things we don't know, ie. tcp syn flag or ports). If the
374 * rule is also a fragment-specific rule, non-fragments won't
375 * match it. */
376 mtpar.hotdrop = &hotdrop;
377 mtpar.in = tgpar.in = in;
378 mtpar.out = tgpar.out = out;
379 mtpar.family = tgpar.family = NFPROTO_IPV6;
380 mtpar.hooknum = tgpar.hooknum = hook;
381
382 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
383
384 xt_info_rdlock_bh();
385 private = table->private;
386 table_base = private->entries[smp_processor_id()];
387
388 e = get_entry(table_base, private->hook_entry[hook]);
389
390 /* For return from builtin chain */
391 back = get_entry(table_base, private->underflow[hook]);
392
393 do {
394 const struct ip6t_entry_target *t;
395 const struct xt_entry_match *ematch;
396
397 IP_NF_ASSERT(e);
398 IP_NF_ASSERT(back);
399 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
400 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
401 no_match:
402 e = ip6t_next_entry(e);
403 continue;
404 }
405
406 xt_ematch_foreach(ematch, e)
407 if (do_match(ematch, skb, &mtpar) != 0)
408 goto no_match;
409
410 ADD_COUNTER(e->counters,
411 ntohs(ipv6_hdr(skb)->payload_len) +
412 sizeof(struct ipv6hdr), 1);
413
414 t = ip6t_get_target_c(e);
415 IP_NF_ASSERT(t->u.kernel.target);
416
417 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
418 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
419 /* The packet is traced: log it */
420 if (unlikely(skb->nf_trace))
421 trace_packet(skb, hook, in, out,
422 table->name, private, e);
423 #endif
424 /* Standard target? */
425 if (!t->u.kernel.target->target) {
426 int v;
427
428 v = ((struct ip6t_standard_target *)t)->verdict;
429 if (v < 0) {
430 /* Pop from stack? */
431 if (v != IP6T_RETURN) {
432 verdict = (unsigned)(-v) - 1;
433 break;
434 }
435 e = back;
436 back = get_entry(table_base, back->comefrom);
437 continue;
438 }
439 if (table_base + v != ip6t_next_entry(e) &&
440 !(e->ipv6.flags & IP6T_F_GOTO)) {
441 /* Save old back ptr in next entry */
442 struct ip6t_entry *next = ip6t_next_entry(e);
443 next->comefrom = (void *)back - table_base;
444 /* set back pointer to next entry */
445 back = next;
446 }
447
448 e = get_entry(table_base, v);
449 continue;
450 }
451
452 /* Targets which reenter must return
453 abs. verdicts */
454 tgpar.target = t->u.kernel.target;
455 tgpar.targinfo = t->data;
456
457 #ifdef CONFIG_NETFILTER_DEBUG
458 tb_comefrom = 0xeeeeeeec;
459 #endif
460 verdict = t->u.kernel.target->target(skb, &tgpar);
461
462 #ifdef CONFIG_NETFILTER_DEBUG
463 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
464 printk("Target %s reentered!\n",
465 t->u.kernel.target->name);
466 verdict = NF_DROP;
467 }
468 tb_comefrom = 0x57acc001;
469 #endif
470 if (verdict == IP6T_CONTINUE)
471 e = ip6t_next_entry(e);
472 else
473 /* Verdict */
474 break;
475 } while (!hotdrop);
476
477 #ifdef CONFIG_NETFILTER_DEBUG
478 tb_comefrom = NETFILTER_LINK_POISON;
479 #endif
480 xt_info_rdunlock_bh();
481
482 #ifdef DEBUG_ALLOW_ALL
483 return NF_ACCEPT;
484 #else
485 if (hotdrop)
486 return NF_DROP;
487 else return verdict;
488 #endif
489
490 #undef tb_comefrom
491 }
492
493 /* Figures out from what hook each rule can be called: returns 0 if
494 there are loops. Puts hook bitmask in comefrom. */
495 static int
496 mark_source_chains(const struct xt_table_info *newinfo,
497 unsigned int valid_hooks, void *entry0)
498 {
499 unsigned int hook;
500
501 /* No recursion; use packet counter to save back ptrs (reset
502 to 0 as we leave), and comefrom to save source hook bitmask */
503 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
504 unsigned int pos = newinfo->hook_entry[hook];
505 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
506
507 if (!(valid_hooks & (1 << hook)))
508 continue;
509
510 /* Set initial back pointer. */
511 e->counters.pcnt = pos;
512
513 for (;;) {
514 const struct ip6t_standard_target *t
515 = (void *)ip6t_get_target_c(e);
516 int visited = e->comefrom & (1 << hook);
517
518 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
519 printk("iptables: loop hook %u pos %u %08X.\n",
520 hook, pos, e->comefrom);
521 return 0;
522 }
523 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
524
525 /* Unconditional return/END. */
526 if ((e->target_offset == sizeof(struct ip6t_entry) &&
527 (strcmp(t->target.u.user.name,
528 IP6T_STANDARD_TARGET) == 0) &&
529 t->verdict < 0 &&
530 unconditional(&e->ipv6)) || visited) {
531 unsigned int oldpos, size;
532
533 if ((strcmp(t->target.u.user.name,
534 IP6T_STANDARD_TARGET) == 0) &&
535 t->verdict < -NF_MAX_VERDICT - 1) {
536 duprintf("mark_source_chains: bad "
537 "negative verdict (%i)\n",
538 t->verdict);
539 return 0;
540 }
541
542 /* Return: backtrack through the last
543 big jump. */
544 do {
545 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
546 #ifdef DEBUG_IP_FIREWALL_USER
547 if (e->comefrom
548 & (1 << NF_INET_NUMHOOKS)) {
549 duprintf("Back unset "
550 "on hook %u "
551 "rule %u\n",
552 hook, pos);
553 }
554 #endif
555 oldpos = pos;
556 pos = e->counters.pcnt;
557 e->counters.pcnt = 0;
558
559 /* We're at the start. */
560 if (pos == oldpos)
561 goto next;
562
563 e = (struct ip6t_entry *)
564 (entry0 + pos);
565 } while (oldpos == pos + e->next_offset);
566
567 /* Move along one */
568 size = e->next_offset;
569 e = (struct ip6t_entry *)
570 (entry0 + pos + size);
571 e->counters.pcnt = pos;
572 pos += size;
573 } else {
574 int newpos = t->verdict;
575
576 if (strcmp(t->target.u.user.name,
577 IP6T_STANDARD_TARGET) == 0 &&
578 newpos >= 0) {
579 if (newpos > newinfo->size -
580 sizeof(struct ip6t_entry)) {
581 duprintf("mark_source_chains: "
582 "bad verdict (%i)\n",
583 newpos);
584 return 0;
585 }
586 /* This a jump; chase it. */
587 duprintf("Jump rule %u -> %u\n",
588 pos, newpos);
589 } else {
590 /* ... this is a fallthru */
591 newpos = pos + e->next_offset;
592 }
593 e = (struct ip6t_entry *)
594 (entry0 + newpos);
595 e->counters.pcnt = pos;
596 pos = newpos;
597 }
598 }
599 next:
600 duprintf("Finished chain %u\n", hook);
601 }
602 return 1;
603 }
604
605 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
606 {
607 struct xt_mtdtor_param par;
608
609 par.net = net;
610 par.match = m->u.kernel.match;
611 par.matchinfo = m->data;
612 par.family = NFPROTO_IPV6;
613 if (par.match->destroy != NULL)
614 par.match->destroy(&par);
615 module_put(par.match->me);
616 }
617
618 static int
619 check_entry(const struct ip6t_entry *e, const char *name)
620 {
621 const struct ip6t_entry_target *t;
622
623 if (!ip6_checkentry(&e->ipv6)) {
624 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
625 return -EINVAL;
626 }
627
628 if (e->target_offset + sizeof(struct ip6t_entry_target) >
629 e->next_offset)
630 return -EINVAL;
631
632 t = ip6t_get_target_c(e);
633 if (e->target_offset + t->u.target_size > e->next_offset)
634 return -EINVAL;
635
636 return 0;
637 }
638
639 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
640 {
641 const struct ip6t_ip6 *ipv6 = par->entryinfo;
642 int ret;
643
644 par->match = m->u.kernel.match;
645 par->matchinfo = m->data;
646
647 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
648 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
649 if (ret < 0) {
650 duprintf("ip_tables: check failed for `%s'.\n",
651 par.match->name);
652 return ret;
653 }
654 return 0;
655 }
656
657 static int
658 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
659 {
660 struct xt_match *match;
661 int ret;
662
663 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
664 m->u.user.revision);
665 if (IS_ERR(match)) {
666 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
667 return PTR_ERR(match);
668 }
669 m->u.kernel.match = match;
670
671 ret = check_match(m, par);
672 if (ret)
673 goto err;
674
675 return 0;
676 err:
677 module_put(m->u.kernel.match->me);
678 return ret;
679 }
680
681 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
682 {
683 struct ip6t_entry_target *t = ip6t_get_target(e);
684 struct xt_tgchk_param par = {
685 .net = net,
686 .table = name,
687 .entryinfo = e,
688 .target = t->u.kernel.target,
689 .targinfo = t->data,
690 .hook_mask = e->comefrom,
691 .family = NFPROTO_IPV6,
692 };
693 int ret;
694
695 t = ip6t_get_target(e);
696 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
697 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
698 if (ret < 0) {
699 duprintf("ip_tables: check failed for `%s'.\n",
700 t->u.kernel.target->name);
701 return ret;
702 }
703 return 0;
704 }
705
706 static int
707 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
708 unsigned int size)
709 {
710 struct ip6t_entry_target *t;
711 struct xt_target *target;
712 int ret;
713 unsigned int j;
714 struct xt_mtchk_param mtpar;
715 struct xt_entry_match *ematch;
716
717 ret = check_entry(e, name);
718 if (ret)
719 return ret;
720
721 j = 0;
722 mtpar.net = net;
723 mtpar.table = name;
724 mtpar.entryinfo = &e->ipv6;
725 mtpar.hook_mask = e->comefrom;
726 mtpar.family = NFPROTO_IPV6;
727 xt_ematch_foreach(ematch, e) {
728 ret = find_check_match(ematch, &mtpar);
729 if (ret != 0)
730 goto cleanup_matches;
731 ++j;
732 }
733
734 t = ip6t_get_target(e);
735 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
736 t->u.user.revision);
737 if (IS_ERR(target)) {
738 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
739 ret = PTR_ERR(target);
740 goto cleanup_matches;
741 }
742 t->u.kernel.target = target;
743
744 ret = check_target(e, net, name);
745 if (ret)
746 goto err;
747 return 0;
748 err:
749 module_put(t->u.kernel.target->me);
750 cleanup_matches:
751 xt_ematch_foreach(ematch, e) {
752 if (j-- == 0)
753 break;
754 cleanup_match(ematch, net);
755 }
756 return ret;
757 }
758
759 static bool check_underflow(const struct ip6t_entry *e)
760 {
761 const struct ip6t_entry_target *t;
762 unsigned int verdict;
763
764 if (!unconditional(&e->ipv6))
765 return false;
766 t = ip6t_get_target_c(e);
767 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
768 return false;
769 verdict = ((struct ip6t_standard_target *)t)->verdict;
770 verdict = -verdict - 1;
771 return verdict == NF_DROP || verdict == NF_ACCEPT;
772 }
773
774 static int
775 check_entry_size_and_hooks(struct ip6t_entry *e,
776 struct xt_table_info *newinfo,
777 const unsigned char *base,
778 const unsigned char *limit,
779 const unsigned int *hook_entries,
780 const unsigned int *underflows,
781 unsigned int valid_hooks)
782 {
783 unsigned int h;
784
785 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
786 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
787 duprintf("Bad offset %p\n", e);
788 return -EINVAL;
789 }
790
791 if (e->next_offset
792 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
793 duprintf("checking: element %p size %u\n",
794 e, e->next_offset);
795 return -EINVAL;
796 }
797
798 /* Check hooks & underflows */
799 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
800 if (!(valid_hooks & (1 << h)))
801 continue;
802 if ((unsigned char *)e - base == hook_entries[h])
803 newinfo->hook_entry[h] = hook_entries[h];
804 if ((unsigned char *)e - base == underflows[h]) {
805 if (!check_underflow(e)) {
806 pr_err("Underflows must be unconditional and "
807 "use the STANDARD target with "
808 "ACCEPT/DROP\n");
809 return -EINVAL;
810 }
811 newinfo->underflow[h] = underflows[h];
812 }
813 }
814
815 /* Clear counters and comefrom */
816 e->counters = ((struct xt_counters) { 0, 0 });
817 e->comefrom = 0;
818 return 0;
819 }
820
821 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
822 {
823 struct xt_tgdtor_param par;
824 struct ip6t_entry_target *t;
825 struct xt_entry_match *ematch;
826
827 /* Cleanup all matches */
828 xt_ematch_foreach(ematch, e)
829 cleanup_match(ematch, net);
830 t = ip6t_get_target(e);
831
832 par.net = net;
833 par.target = t->u.kernel.target;
834 par.targinfo = t->data;
835 par.family = NFPROTO_IPV6;
836 if (par.target->destroy != NULL)
837 par.target->destroy(&par);
838 module_put(par.target->me);
839 }
840
841 /* Checks and translates the user-supplied table segment (held in
842 newinfo) */
843 static int
844 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
845 const struct ip6t_replace *repl)
846 {
847 struct ip6t_entry *iter;
848 unsigned int i;
849 int ret = 0;
850
851 newinfo->size = repl->size;
852 newinfo->number = repl->num_entries;
853
854 /* Init all hooks to impossible value. */
855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
856 newinfo->hook_entry[i] = 0xFFFFFFFF;
857 newinfo->underflow[i] = 0xFFFFFFFF;
858 }
859
860 duprintf("translate_table: size %u\n", newinfo->size);
861 i = 0;
862 /* Walk through entries, checking offsets. */
863 xt_entry_foreach(iter, entry0, newinfo->size) {
864 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
865 entry0 + repl->size,
866 repl->hook_entry,
867 repl->underflow,
868 repl->valid_hooks);
869 if (ret != 0)
870 return ret;
871 ++i;
872 }
873
874 if (i != repl->num_entries) {
875 duprintf("translate_table: %u not %u entries\n",
876 i, repl->num_entries);
877 return -EINVAL;
878 }
879
880 /* Check hooks all assigned */
881 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
882 /* Only hooks which are valid */
883 if (!(repl->valid_hooks & (1 << i)))
884 continue;
885 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
886 duprintf("Invalid hook entry %u %u\n",
887 i, repl->hook_entry[i]);
888 return -EINVAL;
889 }
890 if (newinfo->underflow[i] == 0xFFFFFFFF) {
891 duprintf("Invalid underflow %u %u\n",
892 i, repl->underflow[i]);
893 return -EINVAL;
894 }
895 }
896
897 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
898 return -ELOOP;
899
900 /* Finally, each sanity check must pass */
901 i = 0;
902 xt_entry_foreach(iter, entry0, newinfo->size) {
903 ret = find_check_entry(iter, net, repl->name, repl->size);
904 if (ret != 0)
905 break;
906 ++i;
907 }
908
909 if (ret != 0) {
910 xt_entry_foreach(iter, entry0, newinfo->size) {
911 if (i-- == 0)
912 break;
913 cleanup_entry(iter, net);
914 }
915 return ret;
916 }
917
918 /* And one copy for every other CPU */
919 for_each_possible_cpu(i) {
920 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
921 memcpy(newinfo->entries[i], entry0, newinfo->size);
922 }
923
924 return ret;
925 }
926
927 static void
928 get_counters(const struct xt_table_info *t,
929 struct xt_counters counters[])
930 {
931 struct ip6t_entry *iter;
932 unsigned int cpu;
933 unsigned int i;
934 unsigned int curcpu;
935
936 /* Instead of clearing (by a previous call to memset())
937 * the counters and using adds, we set the counters
938 * with data used by 'current' CPU
939 *
940 * Bottom half has to be disabled to prevent deadlock
941 * if new softirq were to run and call ipt_do_table
942 */
943 local_bh_disable();
944 curcpu = smp_processor_id();
945
946 i = 0;
947 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
948 SET_COUNTER(counters[i], iter->counters.bcnt,
949 iter->counters.pcnt);
950 ++i;
951 }
952
953 for_each_possible_cpu(cpu) {
954 if (cpu == curcpu)
955 continue;
956 i = 0;
957 xt_info_wrlock(cpu);
958 xt_entry_foreach(iter, t->entries[cpu], t->size) {
959 ADD_COUNTER(counters[i], iter->counters.bcnt,
960 iter->counters.pcnt);
961 ++i;
962 }
963 xt_info_wrunlock(cpu);
964 }
965 local_bh_enable();
966 }
967
968 static struct xt_counters *alloc_counters(const struct xt_table *table)
969 {
970 unsigned int countersize;
971 struct xt_counters *counters;
972 const struct xt_table_info *private = table->private;
973
974 /* We need atomic snapshot of counters: rest doesn't change
975 (other than comefrom, which userspace doesn't care
976 about). */
977 countersize = sizeof(struct xt_counters) * private->number;
978 counters = vmalloc_node(countersize, numa_node_id());
979
980 if (counters == NULL)
981 return ERR_PTR(-ENOMEM);
982
983 get_counters(private, counters);
984
985 return counters;
986 }
987
988 static int
989 copy_entries_to_user(unsigned int total_size,
990 const struct xt_table *table,
991 void __user *userptr)
992 {
993 unsigned int off, num;
994 const struct ip6t_entry *e;
995 struct xt_counters *counters;
996 const struct xt_table_info *private = table->private;
997 int ret = 0;
998 const void *loc_cpu_entry;
999
1000 counters = alloc_counters(table);
1001 if (IS_ERR(counters))
1002 return PTR_ERR(counters);
1003
1004 /* choose the copy that is on our node/cpu, ...
1005 * This choice is lazy (because current thread is
1006 * allowed to migrate to another cpu)
1007 */
1008 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1009 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1010 ret = -EFAULT;
1011 goto free_counters;
1012 }
1013
1014 /* FIXME: use iterator macros --RR */
1015 /* ... then go back and fix counters and names */
1016 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1017 unsigned int i;
1018 const struct ip6t_entry_match *m;
1019 const struct ip6t_entry_target *t;
1020
1021 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1022 if (copy_to_user(userptr + off
1023 + offsetof(struct ip6t_entry, counters),
1024 &counters[num],
1025 sizeof(counters[num])) != 0) {
1026 ret = -EFAULT;
1027 goto free_counters;
1028 }
1029
1030 for (i = sizeof(struct ip6t_entry);
1031 i < e->target_offset;
1032 i += m->u.match_size) {
1033 m = (void *)e + i;
1034
1035 if (copy_to_user(userptr + off + i
1036 + offsetof(struct ip6t_entry_match,
1037 u.user.name),
1038 m->u.kernel.match->name,
1039 strlen(m->u.kernel.match->name)+1)
1040 != 0) {
1041 ret = -EFAULT;
1042 goto free_counters;
1043 }
1044 }
1045
1046 t = ip6t_get_target_c(e);
1047 if (copy_to_user(userptr + off + e->target_offset
1048 + offsetof(struct ip6t_entry_target,
1049 u.user.name),
1050 t->u.kernel.target->name,
1051 strlen(t->u.kernel.target->name)+1) != 0) {
1052 ret = -EFAULT;
1053 goto free_counters;
1054 }
1055 }
1056
1057 free_counters:
1058 vfree(counters);
1059 return ret;
1060 }
1061
1062 #ifdef CONFIG_COMPAT
1063 static void compat_standard_from_user(void *dst, const void *src)
1064 {
1065 int v = *(compat_int_t *)src;
1066
1067 if (v > 0)
1068 v += xt_compat_calc_jump(AF_INET6, v);
1069 memcpy(dst, &v, sizeof(v));
1070 }
1071
1072 static int compat_standard_to_user(void __user *dst, const void *src)
1073 {
1074 compat_int_t cv = *(int *)src;
1075
1076 if (cv > 0)
1077 cv -= xt_compat_calc_jump(AF_INET6, cv);
1078 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1079 }
1080
1081 static int compat_calc_entry(const struct ip6t_entry *e,
1082 const struct xt_table_info *info,
1083 const void *base, struct xt_table_info *newinfo)
1084 {
1085 const struct xt_entry_match *ematch;
1086 const struct ip6t_entry_target *t;
1087 unsigned int entry_offset;
1088 int off, i, ret;
1089
1090 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1091 entry_offset = (void *)e - base;
1092 xt_ematch_foreach(ematch, e)
1093 off += xt_compat_match_offset(ematch->u.kernel.match);
1094 t = ip6t_get_target_c(e);
1095 off += xt_compat_target_offset(t->u.kernel.target);
1096 newinfo->size -= off;
1097 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1098 if (ret)
1099 return ret;
1100
1101 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1102 if (info->hook_entry[i] &&
1103 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1104 newinfo->hook_entry[i] -= off;
1105 if (info->underflow[i] &&
1106 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1107 newinfo->underflow[i] -= off;
1108 }
1109 return 0;
1110 }
1111
1112 static int compat_table_info(const struct xt_table_info *info,
1113 struct xt_table_info *newinfo)
1114 {
1115 struct ip6t_entry *iter;
1116 void *loc_cpu_entry;
1117 int ret;
1118
1119 if (!newinfo || !info)
1120 return -EINVAL;
1121
1122 /* we dont care about newinfo->entries[] */
1123 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1124 newinfo->initial_entries = 0;
1125 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1126 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1127 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1128 if (ret != 0)
1129 return ret;
1130 }
1131 return 0;
1132 }
1133 #endif
1134
1135 static int get_info(struct net *net, void __user *user,
1136 const int *len, int compat)
1137 {
1138 char name[IP6T_TABLE_MAXNAMELEN];
1139 struct xt_table *t;
1140 int ret;
1141
1142 if (*len != sizeof(struct ip6t_getinfo)) {
1143 duprintf("length %u != %zu\n", *len,
1144 sizeof(struct ip6t_getinfo));
1145 return -EINVAL;
1146 }
1147
1148 if (copy_from_user(name, user, sizeof(name)) != 0)
1149 return -EFAULT;
1150
1151 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1152 #ifdef CONFIG_COMPAT
1153 if (compat)
1154 xt_compat_lock(AF_INET6);
1155 #endif
1156 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1157 "ip6table_%s", name);
1158 if (t && !IS_ERR(t)) {
1159 struct ip6t_getinfo info;
1160 const struct xt_table_info *private = t->private;
1161 #ifdef CONFIG_COMPAT
1162 struct xt_table_info tmp;
1163
1164 if (compat) {
1165 ret = compat_table_info(private, &tmp);
1166 xt_compat_flush_offsets(AF_INET6);
1167 private = &tmp;
1168 }
1169 #endif
1170 info.valid_hooks = t->valid_hooks;
1171 memcpy(info.hook_entry, private->hook_entry,
1172 sizeof(info.hook_entry));
1173 memcpy(info.underflow, private->underflow,
1174 sizeof(info.underflow));
1175 info.num_entries = private->number;
1176 info.size = private->size;
1177 strcpy(info.name, name);
1178
1179 if (copy_to_user(user, &info, *len) != 0)
1180 ret = -EFAULT;
1181 else
1182 ret = 0;
1183
1184 xt_table_unlock(t);
1185 module_put(t->me);
1186 } else
1187 ret = t ? PTR_ERR(t) : -ENOENT;
1188 #ifdef CONFIG_COMPAT
1189 if (compat)
1190 xt_compat_unlock(AF_INET6);
1191 #endif
1192 return ret;
1193 }
1194
1195 static int
1196 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1197 const int *len)
1198 {
1199 int ret;
1200 struct ip6t_get_entries get;
1201 struct xt_table *t;
1202
1203 if (*len < sizeof(get)) {
1204 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1205 return -EINVAL;
1206 }
1207 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1208 return -EFAULT;
1209 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1210 duprintf("get_entries: %u != %zu\n",
1211 *len, sizeof(get) + get.size);
1212 return -EINVAL;
1213 }
1214
1215 t = xt_find_table_lock(net, AF_INET6, get.name);
1216 if (t && !IS_ERR(t)) {
1217 struct xt_table_info *private = t->private;
1218 duprintf("t->private->number = %u\n", private->number);
1219 if (get.size == private->size)
1220 ret = copy_entries_to_user(private->size,
1221 t, uptr->entrytable);
1222 else {
1223 duprintf("get_entries: I've got %u not %u!\n",
1224 private->size, get.size);
1225 ret = -EAGAIN;
1226 }
1227 module_put(t->me);
1228 xt_table_unlock(t);
1229 } else
1230 ret = t ? PTR_ERR(t) : -ENOENT;
1231
1232 return ret;
1233 }
1234
1235 static int
1236 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1237 struct xt_table_info *newinfo, unsigned int num_counters,
1238 void __user *counters_ptr)
1239 {
1240 int ret;
1241 struct xt_table *t;
1242 struct xt_table_info *oldinfo;
1243 struct xt_counters *counters;
1244 const void *loc_cpu_old_entry;
1245 struct ip6t_entry *iter;
1246
1247 ret = 0;
1248 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1249 numa_node_id());
1250 if (!counters) {
1251 ret = -ENOMEM;
1252 goto out;
1253 }
1254
1255 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1256 "ip6table_%s", name);
1257 if (!t || IS_ERR(t)) {
1258 ret = t ? PTR_ERR(t) : -ENOENT;
1259 goto free_newinfo_counters_untrans;
1260 }
1261
1262 /* You lied! */
1263 if (valid_hooks != t->valid_hooks) {
1264 duprintf("Valid hook crap: %08X vs %08X\n",
1265 valid_hooks, t->valid_hooks);
1266 ret = -EINVAL;
1267 goto put_module;
1268 }
1269
1270 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1271 if (!oldinfo)
1272 goto put_module;
1273
1274 /* Update module usage count based on number of rules */
1275 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1276 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1277 if ((oldinfo->number > oldinfo->initial_entries) ||
1278 (newinfo->number <= oldinfo->initial_entries))
1279 module_put(t->me);
1280 if ((oldinfo->number > oldinfo->initial_entries) &&
1281 (newinfo->number <= oldinfo->initial_entries))
1282 module_put(t->me);
1283
1284 /* Get the old counters, and synchronize with replace */
1285 get_counters(oldinfo, counters);
1286
1287 /* Decrease module usage counts and free resource */
1288 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1289 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1290 cleanup_entry(iter, net);
1291
1292 xt_free_table_info(oldinfo);
1293 if (copy_to_user(counters_ptr, counters,
1294 sizeof(struct xt_counters) * num_counters) != 0)
1295 ret = -EFAULT;
1296 vfree(counters);
1297 xt_table_unlock(t);
1298 return ret;
1299
1300 put_module:
1301 module_put(t->me);
1302 xt_table_unlock(t);
1303 free_newinfo_counters_untrans:
1304 vfree(counters);
1305 out:
1306 return ret;
1307 }
1308
1309 static int
1310 do_replace(struct net *net, const void __user *user, unsigned int len)
1311 {
1312 int ret;
1313 struct ip6t_replace tmp;
1314 struct xt_table_info *newinfo;
1315 void *loc_cpu_entry;
1316 struct ip6t_entry *iter;
1317
1318 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1319 return -EFAULT;
1320
1321 /* overflow check */
1322 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1323 return -ENOMEM;
1324
1325 newinfo = xt_alloc_table_info(tmp.size);
1326 if (!newinfo)
1327 return -ENOMEM;
1328
1329 /* choose the copy that is on our node/cpu */
1330 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1331 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1332 tmp.size) != 0) {
1333 ret = -EFAULT;
1334 goto free_newinfo;
1335 }
1336
1337 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1338 if (ret != 0)
1339 goto free_newinfo;
1340
1341 duprintf("ip_tables: Translated table\n");
1342
1343 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1344 tmp.num_counters, tmp.counters);
1345 if (ret)
1346 goto free_newinfo_untrans;
1347 return 0;
1348
1349 free_newinfo_untrans:
1350 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1351 cleanup_entry(iter, net);
1352 free_newinfo:
1353 xt_free_table_info(newinfo);
1354 return ret;
1355 }
1356
1357 static int
1358 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1359 int compat)
1360 {
1361 unsigned int i, curcpu;
1362 struct xt_counters_info tmp;
1363 struct xt_counters *paddc;
1364 unsigned int num_counters;
1365 char *name;
1366 int size;
1367 void *ptmp;
1368 struct xt_table *t;
1369 const struct xt_table_info *private;
1370 int ret = 0;
1371 const void *loc_cpu_entry;
1372 struct ip6t_entry *iter;
1373 #ifdef CONFIG_COMPAT
1374 struct compat_xt_counters_info compat_tmp;
1375
1376 if (compat) {
1377 ptmp = &compat_tmp;
1378 size = sizeof(struct compat_xt_counters_info);
1379 } else
1380 #endif
1381 {
1382 ptmp = &tmp;
1383 size = sizeof(struct xt_counters_info);
1384 }
1385
1386 if (copy_from_user(ptmp, user, size) != 0)
1387 return -EFAULT;
1388
1389 #ifdef CONFIG_COMPAT
1390 if (compat) {
1391 num_counters = compat_tmp.num_counters;
1392 name = compat_tmp.name;
1393 } else
1394 #endif
1395 {
1396 num_counters = tmp.num_counters;
1397 name = tmp.name;
1398 }
1399
1400 if (len != size + num_counters * sizeof(struct xt_counters))
1401 return -EINVAL;
1402
1403 paddc = vmalloc_node(len - size, numa_node_id());
1404 if (!paddc)
1405 return -ENOMEM;
1406
1407 if (copy_from_user(paddc, user + size, len - size) != 0) {
1408 ret = -EFAULT;
1409 goto free;
1410 }
1411
1412 t = xt_find_table_lock(net, AF_INET6, name);
1413 if (!t || IS_ERR(t)) {
1414 ret = t ? PTR_ERR(t) : -ENOENT;
1415 goto free;
1416 }
1417
1418
1419 local_bh_disable();
1420 private = t->private;
1421 if (private->number != num_counters) {
1422 ret = -EINVAL;
1423 goto unlock_up_free;
1424 }
1425
1426 i = 0;
1427 /* Choose the copy that is on our node */
1428 curcpu = smp_processor_id();
1429 xt_info_wrlock(curcpu);
1430 loc_cpu_entry = private->entries[curcpu];
1431 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1432 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1433 ++i;
1434 }
1435 xt_info_wrunlock(curcpu);
1436
1437 unlock_up_free:
1438 local_bh_enable();
1439 xt_table_unlock(t);
1440 module_put(t->me);
1441 free:
1442 vfree(paddc);
1443
1444 return ret;
1445 }
1446
1447 #ifdef CONFIG_COMPAT
1448 struct compat_ip6t_replace {
1449 char name[IP6T_TABLE_MAXNAMELEN];
1450 u32 valid_hooks;
1451 u32 num_entries;
1452 u32 size;
1453 u32 hook_entry[NF_INET_NUMHOOKS];
1454 u32 underflow[NF_INET_NUMHOOKS];
1455 u32 num_counters;
1456 compat_uptr_t counters; /* struct ip6t_counters * */
1457 struct compat_ip6t_entry entries[0];
1458 };
1459
1460 static int
1461 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1462 unsigned int *size, struct xt_counters *counters,
1463 unsigned int i)
1464 {
1465 struct ip6t_entry_target *t;
1466 struct compat_ip6t_entry __user *ce;
1467 u_int16_t target_offset, next_offset;
1468 compat_uint_t origsize;
1469 const struct xt_entry_match *ematch;
1470 int ret = 0;
1471
1472 origsize = *size;
1473 ce = (struct compat_ip6t_entry __user *)*dstptr;
1474 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1475 copy_to_user(&ce->counters, &counters[i],
1476 sizeof(counters[i])) != 0)
1477 return -EFAULT;
1478
1479 *dstptr += sizeof(struct compat_ip6t_entry);
1480 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1481
1482 xt_ematch_foreach(ematch, e) {
1483 ret = xt_compat_match_to_user(ematch, dstptr, size);
1484 if (ret != 0)
1485 return ret;
1486 }
1487 target_offset = e->target_offset - (origsize - *size);
1488 t = ip6t_get_target(e);
1489 ret = xt_compat_target_to_user(t, dstptr, size);
1490 if (ret)
1491 return ret;
1492 next_offset = e->next_offset - (origsize - *size);
1493 if (put_user(target_offset, &ce->target_offset) != 0 ||
1494 put_user(next_offset, &ce->next_offset) != 0)
1495 return -EFAULT;
1496 return 0;
1497 }
1498
1499 static int
1500 compat_find_calc_match(struct ip6t_entry_match *m,
1501 const char *name,
1502 const struct ip6t_ip6 *ipv6,
1503 unsigned int hookmask,
1504 int *size)
1505 {
1506 struct xt_match *match;
1507
1508 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1509 m->u.user.revision);
1510 if (IS_ERR(match)) {
1511 duprintf("compat_check_calc_match: `%s' not found\n",
1512 m->u.user.name);
1513 return PTR_ERR(match);
1514 }
1515 m->u.kernel.match = match;
1516 *size += xt_compat_match_offset(match);
1517 return 0;
1518 }
1519
1520 static void compat_release_entry(struct compat_ip6t_entry *e)
1521 {
1522 struct ip6t_entry_target *t;
1523 struct xt_entry_match *ematch;
1524
1525 /* Cleanup all matches */
1526 xt_ematch_foreach(ematch, e)
1527 module_put(ematch->u.kernel.match->me);
1528 t = compat_ip6t_get_target(e);
1529 module_put(t->u.kernel.target->me);
1530 }
1531
1532 static int
1533 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1534 struct xt_table_info *newinfo,
1535 unsigned int *size,
1536 const unsigned char *base,
1537 const unsigned char *limit,
1538 const unsigned int *hook_entries,
1539 const unsigned int *underflows,
1540 const char *name)
1541 {
1542 struct xt_entry_match *ematch;
1543 struct ip6t_entry_target *t;
1544 struct xt_target *target;
1545 unsigned int entry_offset;
1546 unsigned int j;
1547 int ret, off, h;
1548
1549 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1550 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1551 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1552 duprintf("Bad offset %p, limit = %p\n", e, limit);
1553 return -EINVAL;
1554 }
1555
1556 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1557 sizeof(struct compat_xt_entry_target)) {
1558 duprintf("checking: element %p size %u\n",
1559 e, e->next_offset);
1560 return -EINVAL;
1561 }
1562
1563 /* For purposes of check_entry casting the compat entry is fine */
1564 ret = check_entry((struct ip6t_entry *)e, name);
1565 if (ret)
1566 return ret;
1567
1568 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1569 entry_offset = (void *)e - (void *)base;
1570 j = 0;
1571 xt_ematch_foreach(ematch, e) {
1572 ret = compat_find_calc_match(ematch, name,
1573 &e->ipv6, e->comefrom, &off);
1574 if (ret != 0)
1575 goto release_matches;
1576 ++j;
1577 }
1578
1579 t = compat_ip6t_get_target(e);
1580 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1581 t->u.user.revision);
1582 if (IS_ERR(target)) {
1583 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1584 t->u.user.name);
1585 ret = PTR_ERR(target);
1586 goto release_matches;
1587 }
1588 t->u.kernel.target = target;
1589
1590 off += xt_compat_target_offset(target);
1591 *size += off;
1592 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1593 if (ret)
1594 goto out;
1595
1596 /* Check hooks & underflows */
1597 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1598 if ((unsigned char *)e - base == hook_entries[h])
1599 newinfo->hook_entry[h] = hook_entries[h];
1600 if ((unsigned char *)e - base == underflows[h])
1601 newinfo->underflow[h] = underflows[h];
1602 }
1603
1604 /* Clear counters and comefrom */
1605 memset(&e->counters, 0, sizeof(e->counters));
1606 e->comefrom = 0;
1607 return 0;
1608
1609 out:
1610 module_put(t->u.kernel.target->me);
1611 release_matches:
1612 xt_ematch_foreach(ematch, e) {
1613 if (j-- == 0)
1614 break;
1615 module_put(ematch->u.kernel.match->me);
1616 }
1617 return ret;
1618 }
1619
1620 static int
1621 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1622 unsigned int *size, const char *name,
1623 struct xt_table_info *newinfo, unsigned char *base)
1624 {
1625 struct ip6t_entry_target *t;
1626 struct xt_target *target;
1627 struct ip6t_entry *de;
1628 unsigned int origsize;
1629 int ret, h;
1630 struct xt_entry_match *ematch;
1631
1632 ret = 0;
1633 origsize = *size;
1634 de = (struct ip6t_entry *)*dstptr;
1635 memcpy(de, e, sizeof(struct ip6t_entry));
1636 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1637
1638 *dstptr += sizeof(struct ip6t_entry);
1639 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1640
1641 xt_ematch_foreach(ematch, e) {
1642 ret = xt_compat_match_from_user(ematch, dstptr, size);
1643 if (ret != 0)
1644 return ret;
1645 }
1646 de->target_offset = e->target_offset - (origsize - *size);
1647 t = compat_ip6t_get_target(e);
1648 target = t->u.kernel.target;
1649 xt_compat_target_from_user(t, dstptr, size);
1650
1651 de->next_offset = e->next_offset - (origsize - *size);
1652 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1653 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1654 newinfo->hook_entry[h] -= origsize - *size;
1655 if ((unsigned char *)de - base < newinfo->underflow[h])
1656 newinfo->underflow[h] -= origsize - *size;
1657 }
1658 return ret;
1659 }
1660
1661 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1662 const char *name)
1663 {
1664 unsigned int j;
1665 int ret = 0;
1666 struct xt_mtchk_param mtpar;
1667 struct xt_entry_match *ematch;
1668
1669 j = 0;
1670 mtpar.net = net;
1671 mtpar.table = name;
1672 mtpar.entryinfo = &e->ipv6;
1673 mtpar.hook_mask = e->comefrom;
1674 mtpar.family = NFPROTO_IPV6;
1675 xt_ematch_foreach(ematch, e) {
1676 ret = check_match(ematch, &mtpar);
1677 if (ret != 0)
1678 goto cleanup_matches;
1679 ++j;
1680 }
1681
1682 ret = check_target(e, net, name);
1683 if (ret)
1684 goto cleanup_matches;
1685 return 0;
1686
1687 cleanup_matches:
1688 xt_ematch_foreach(ematch, e) {
1689 if (j-- == 0)
1690 break;
1691 cleanup_match(ematch, net);
1692 }
1693 return ret;
1694 }
1695
1696 static int
1697 translate_compat_table(struct net *net,
1698 const char *name,
1699 unsigned int valid_hooks,
1700 struct xt_table_info **pinfo,
1701 void **pentry0,
1702 unsigned int total_size,
1703 unsigned int number,
1704 unsigned int *hook_entries,
1705 unsigned int *underflows)
1706 {
1707 unsigned int i, j;
1708 struct xt_table_info *newinfo, *info;
1709 void *pos, *entry0, *entry1;
1710 struct compat_ip6t_entry *iter0;
1711 struct ip6t_entry *iter1;
1712 unsigned int size;
1713 int ret = 0;
1714
1715 info = *pinfo;
1716 entry0 = *pentry0;
1717 size = total_size;
1718 info->number = number;
1719
1720 /* Init all hooks to impossible value. */
1721 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1722 info->hook_entry[i] = 0xFFFFFFFF;
1723 info->underflow[i] = 0xFFFFFFFF;
1724 }
1725
1726 duprintf("translate_compat_table: size %u\n", info->size);
1727 j = 0;
1728 xt_compat_lock(AF_INET6);
1729 /* Walk through entries, checking offsets. */
1730 xt_entry_foreach(iter0, entry0, total_size) {
1731 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1732 entry0,
1733 entry0 + total_size,
1734 hook_entries,
1735 underflows,
1736 name);
1737 if (ret != 0)
1738 goto out_unlock;
1739 ++j;
1740 }
1741
1742 ret = -EINVAL;
1743 if (j != number) {
1744 duprintf("translate_compat_table: %u not %u entries\n",
1745 j, number);
1746 goto out_unlock;
1747 }
1748
1749 /* Check hooks all assigned */
1750 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1751 /* Only hooks which are valid */
1752 if (!(valid_hooks & (1 << i)))
1753 continue;
1754 if (info->hook_entry[i] == 0xFFFFFFFF) {
1755 duprintf("Invalid hook entry %u %u\n",
1756 i, hook_entries[i]);
1757 goto out_unlock;
1758 }
1759 if (info->underflow[i] == 0xFFFFFFFF) {
1760 duprintf("Invalid underflow %u %u\n",
1761 i, underflows[i]);
1762 goto out_unlock;
1763 }
1764 }
1765
1766 ret = -ENOMEM;
1767 newinfo = xt_alloc_table_info(size);
1768 if (!newinfo)
1769 goto out_unlock;
1770
1771 newinfo->number = number;
1772 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1773 newinfo->hook_entry[i] = info->hook_entry[i];
1774 newinfo->underflow[i] = info->underflow[i];
1775 }
1776 entry1 = newinfo->entries[raw_smp_processor_id()];
1777 pos = entry1;
1778 size = total_size;
1779 xt_entry_foreach(iter0, entry0, total_size) {
1780 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1781 name, newinfo, entry1);
1782 if (ret != 0)
1783 break;
1784 }
1785 xt_compat_flush_offsets(AF_INET6);
1786 xt_compat_unlock(AF_INET6);
1787 if (ret)
1788 goto free_newinfo;
1789
1790 ret = -ELOOP;
1791 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1792 goto free_newinfo;
1793
1794 i = 0;
1795 xt_entry_foreach(iter1, entry1, newinfo->size) {
1796 ret = compat_check_entry(iter1, net, name);
1797 if (ret != 0)
1798 break;
1799 ++i;
1800 }
1801 if (ret) {
1802 /*
1803 * The first i matches need cleanup_entry (calls ->destroy)
1804 * because they had called ->check already. The other j-i
1805 * entries need only release.
1806 */
1807 int skip = i;
1808 j -= i;
1809 xt_entry_foreach(iter0, entry0, newinfo->size) {
1810 if (skip-- > 0)
1811 continue;
1812 if (j-- == 0)
1813 break;
1814 compat_release_entry(iter0);
1815 }
1816 xt_entry_foreach(iter1, entry1, newinfo->size) {
1817 if (i-- == 0)
1818 break;
1819 cleanup_entry(iter1, net);
1820 }
1821 xt_free_table_info(newinfo);
1822 return ret;
1823 }
1824
1825 /* And one copy for every other CPU */
1826 for_each_possible_cpu(i)
1827 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1828 memcpy(newinfo->entries[i], entry1, newinfo->size);
1829
1830 *pinfo = newinfo;
1831 *pentry0 = entry1;
1832 xt_free_table_info(info);
1833 return 0;
1834
1835 free_newinfo:
1836 xt_free_table_info(newinfo);
1837 out:
1838 xt_entry_foreach(iter0, entry0, total_size) {
1839 if (j-- == 0)
1840 break;
1841 compat_release_entry(iter0);
1842 }
1843 return ret;
1844 out_unlock:
1845 xt_compat_flush_offsets(AF_INET6);
1846 xt_compat_unlock(AF_INET6);
1847 goto out;
1848 }
1849
1850 static int
1851 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1852 {
1853 int ret;
1854 struct compat_ip6t_replace tmp;
1855 struct xt_table_info *newinfo;
1856 void *loc_cpu_entry;
1857 struct ip6t_entry *iter;
1858
1859 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1860 return -EFAULT;
1861
1862 /* overflow check */
1863 if (tmp.size >= INT_MAX / num_possible_cpus())
1864 return -ENOMEM;
1865 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1866 return -ENOMEM;
1867
1868 newinfo = xt_alloc_table_info(tmp.size);
1869 if (!newinfo)
1870 return -ENOMEM;
1871
1872 /* choose the copy that is on our node/cpu */
1873 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1874 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1875 tmp.size) != 0) {
1876 ret = -EFAULT;
1877 goto free_newinfo;
1878 }
1879
1880 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1881 &newinfo, &loc_cpu_entry, tmp.size,
1882 tmp.num_entries, tmp.hook_entry,
1883 tmp.underflow);
1884 if (ret != 0)
1885 goto free_newinfo;
1886
1887 duprintf("compat_do_replace: Translated table\n");
1888
1889 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1890 tmp.num_counters, compat_ptr(tmp.counters));
1891 if (ret)
1892 goto free_newinfo_untrans;
1893 return 0;
1894
1895 free_newinfo_untrans:
1896 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1897 cleanup_entry(iter, net);
1898 free_newinfo:
1899 xt_free_table_info(newinfo);
1900 return ret;
1901 }
1902
1903 static int
1904 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1905 unsigned int len)
1906 {
1907 int ret;
1908
1909 if (!capable(CAP_NET_ADMIN))
1910 return -EPERM;
1911
1912 switch (cmd) {
1913 case IP6T_SO_SET_REPLACE:
1914 ret = compat_do_replace(sock_net(sk), user, len);
1915 break;
1916
1917 case IP6T_SO_SET_ADD_COUNTERS:
1918 ret = do_add_counters(sock_net(sk), user, len, 1);
1919 break;
1920
1921 default:
1922 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1923 ret = -EINVAL;
1924 }
1925
1926 return ret;
1927 }
1928
1929 struct compat_ip6t_get_entries {
1930 char name[IP6T_TABLE_MAXNAMELEN];
1931 compat_uint_t size;
1932 struct compat_ip6t_entry entrytable[0];
1933 };
1934
1935 static int
1936 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1937 void __user *userptr)
1938 {
1939 struct xt_counters *counters;
1940 const struct xt_table_info *private = table->private;
1941 void __user *pos;
1942 unsigned int size;
1943 int ret = 0;
1944 const void *loc_cpu_entry;
1945 unsigned int i = 0;
1946 struct ip6t_entry *iter;
1947
1948 counters = alloc_counters(table);
1949 if (IS_ERR(counters))
1950 return PTR_ERR(counters);
1951
1952 /* choose the copy that is on our node/cpu, ...
1953 * This choice is lazy (because current thread is
1954 * allowed to migrate to another cpu)
1955 */
1956 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1957 pos = userptr;
1958 size = total_size;
1959 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1960 ret = compat_copy_entry_to_user(iter, &pos,
1961 &size, counters, i++);
1962 if (ret != 0)
1963 break;
1964 }
1965
1966 vfree(counters);
1967 return ret;
1968 }
1969
1970 static int
1971 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1972 int *len)
1973 {
1974 int ret;
1975 struct compat_ip6t_get_entries get;
1976 struct xt_table *t;
1977
1978 if (*len < sizeof(get)) {
1979 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1980 return -EINVAL;
1981 }
1982
1983 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1984 return -EFAULT;
1985
1986 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1987 duprintf("compat_get_entries: %u != %zu\n",
1988 *len, sizeof(get) + get.size);
1989 return -EINVAL;
1990 }
1991
1992 xt_compat_lock(AF_INET6);
1993 t = xt_find_table_lock(net, AF_INET6, get.name);
1994 if (t && !IS_ERR(t)) {
1995 const struct xt_table_info *private = t->private;
1996 struct xt_table_info info;
1997 duprintf("t->private->number = %u\n", private->number);
1998 ret = compat_table_info(private, &info);
1999 if (!ret && get.size == info.size) {
2000 ret = compat_copy_entries_to_user(private->size,
2001 t, uptr->entrytable);
2002 } else if (!ret) {
2003 duprintf("compat_get_entries: I've got %u not %u!\n",
2004 private->size, get.size);
2005 ret = -EAGAIN;
2006 }
2007 xt_compat_flush_offsets(AF_INET6);
2008 module_put(t->me);
2009 xt_table_unlock(t);
2010 } else
2011 ret = t ? PTR_ERR(t) : -ENOENT;
2012
2013 xt_compat_unlock(AF_INET6);
2014 return ret;
2015 }
2016
2017 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2018
2019 static int
2020 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2021 {
2022 int ret;
2023
2024 if (!capable(CAP_NET_ADMIN))
2025 return -EPERM;
2026
2027 switch (cmd) {
2028 case IP6T_SO_GET_INFO:
2029 ret = get_info(sock_net(sk), user, len, 1);
2030 break;
2031 case IP6T_SO_GET_ENTRIES:
2032 ret = compat_get_entries(sock_net(sk), user, len);
2033 break;
2034 default:
2035 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2036 }
2037 return ret;
2038 }
2039 #endif
2040
2041 static int
2042 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2043 {
2044 int ret;
2045
2046 if (!capable(CAP_NET_ADMIN))
2047 return -EPERM;
2048
2049 switch (cmd) {
2050 case IP6T_SO_SET_REPLACE:
2051 ret = do_replace(sock_net(sk), user, len);
2052 break;
2053
2054 case IP6T_SO_SET_ADD_COUNTERS:
2055 ret = do_add_counters(sock_net(sk), user, len, 0);
2056 break;
2057
2058 default:
2059 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2060 ret = -EINVAL;
2061 }
2062
2063 return ret;
2064 }
2065
2066 static int
2067 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2068 {
2069 int ret;
2070
2071 if (!capable(CAP_NET_ADMIN))
2072 return -EPERM;
2073
2074 switch (cmd) {
2075 case IP6T_SO_GET_INFO:
2076 ret = get_info(sock_net(sk), user, len, 0);
2077 break;
2078
2079 case IP6T_SO_GET_ENTRIES:
2080 ret = get_entries(sock_net(sk), user, len);
2081 break;
2082
2083 case IP6T_SO_GET_REVISION_MATCH:
2084 case IP6T_SO_GET_REVISION_TARGET: {
2085 struct ip6t_get_revision rev;
2086 int target;
2087
2088 if (*len != sizeof(rev)) {
2089 ret = -EINVAL;
2090 break;
2091 }
2092 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2093 ret = -EFAULT;
2094 break;
2095 }
2096
2097 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2098 target = 1;
2099 else
2100 target = 0;
2101
2102 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2103 rev.revision,
2104 target, &ret),
2105 "ip6t_%s", rev.name);
2106 break;
2107 }
2108
2109 default:
2110 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2111 ret = -EINVAL;
2112 }
2113
2114 return ret;
2115 }
2116
2117 struct xt_table *ip6t_register_table(struct net *net,
2118 const struct xt_table *table,
2119 const struct ip6t_replace *repl)
2120 {
2121 int ret;
2122 struct xt_table_info *newinfo;
2123 struct xt_table_info bootstrap
2124 = { 0, 0, 0, { 0 }, { 0 }, { } };
2125 void *loc_cpu_entry;
2126 struct xt_table *new_table;
2127
2128 newinfo = xt_alloc_table_info(repl->size);
2129 if (!newinfo) {
2130 ret = -ENOMEM;
2131 goto out;
2132 }
2133
2134 /* choose the copy on our node/cpu, but dont care about preemption */
2135 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2136 memcpy(loc_cpu_entry, repl->entries, repl->size);
2137
2138 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2139 if (ret != 0)
2140 goto out_free;
2141
2142 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2143 if (IS_ERR(new_table)) {
2144 ret = PTR_ERR(new_table);
2145 goto out_free;
2146 }
2147 return new_table;
2148
2149 out_free:
2150 xt_free_table_info(newinfo);
2151 out:
2152 return ERR_PTR(ret);
2153 }
2154
2155 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2156 {
2157 struct xt_table_info *private;
2158 void *loc_cpu_entry;
2159 struct module *table_owner = table->me;
2160 struct ip6t_entry *iter;
2161
2162 private = xt_unregister_table(table);
2163
2164 /* Decrease module usage counts and free resources */
2165 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2166 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2167 cleanup_entry(iter, net);
2168 if (private->number > private->initial_entries)
2169 module_put(table_owner);
2170 xt_free_table_info(private);
2171 }
2172
2173 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2174 static inline bool
2175 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2176 u_int8_t type, u_int8_t code,
2177 bool invert)
2178 {
2179 return (type == test_type && code >= min_code && code <= max_code)
2180 ^ invert;
2181 }
2182
2183 static bool
2184 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2185 {
2186 const struct icmp6hdr *ic;
2187 struct icmp6hdr _icmph;
2188 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2189
2190 /* Must not be a fragment. */
2191 if (par->fragoff != 0)
2192 return false;
2193
2194 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2195 if (ic == NULL) {
2196 /* We've been asked to examine this packet, and we
2197 * can't. Hence, no choice but to drop.
2198 */
2199 duprintf("Dropping evil ICMP tinygram.\n");
2200 *par->hotdrop = true;
2201 return false;
2202 }
2203
2204 return icmp6_type_code_match(icmpinfo->type,
2205 icmpinfo->code[0],
2206 icmpinfo->code[1],
2207 ic->icmp6_type, ic->icmp6_code,
2208 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2209 }
2210
2211 /* Called when user tries to insert an entry of this type. */
2212 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2213 {
2214 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2215
2216 /* Must specify no unknown invflags */
2217 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2218 }
2219
2220 /* The built-in targets: standard (NULL) and error. */
2221 static struct xt_target ip6t_standard_target __read_mostly = {
2222 .name = IP6T_STANDARD_TARGET,
2223 .targetsize = sizeof(int),
2224 .family = NFPROTO_IPV6,
2225 #ifdef CONFIG_COMPAT
2226 .compatsize = sizeof(compat_int_t),
2227 .compat_from_user = compat_standard_from_user,
2228 .compat_to_user = compat_standard_to_user,
2229 #endif
2230 };
2231
2232 static struct xt_target ip6t_error_target __read_mostly = {
2233 .name = IP6T_ERROR_TARGET,
2234 .target = ip6t_error,
2235 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2236 .family = NFPROTO_IPV6,
2237 };
2238
2239 static struct nf_sockopt_ops ip6t_sockopts = {
2240 .pf = PF_INET6,
2241 .set_optmin = IP6T_BASE_CTL,
2242 .set_optmax = IP6T_SO_SET_MAX+1,
2243 .set = do_ip6t_set_ctl,
2244 #ifdef CONFIG_COMPAT
2245 .compat_set = compat_do_ip6t_set_ctl,
2246 #endif
2247 .get_optmin = IP6T_BASE_CTL,
2248 .get_optmax = IP6T_SO_GET_MAX+1,
2249 .get = do_ip6t_get_ctl,
2250 #ifdef CONFIG_COMPAT
2251 .compat_get = compat_do_ip6t_get_ctl,
2252 #endif
2253 .owner = THIS_MODULE,
2254 };
2255
2256 static struct xt_match icmp6_matchstruct __read_mostly = {
2257 .name = "icmp6",
2258 .match = icmp6_match,
2259 .matchsize = sizeof(struct ip6t_icmp),
2260 .checkentry = icmp6_checkentry,
2261 .proto = IPPROTO_ICMPV6,
2262 .family = NFPROTO_IPV6,
2263 };
2264
2265 static int __net_init ip6_tables_net_init(struct net *net)
2266 {
2267 return xt_proto_init(net, NFPROTO_IPV6);
2268 }
2269
2270 static void __net_exit ip6_tables_net_exit(struct net *net)
2271 {
2272 xt_proto_fini(net, NFPROTO_IPV6);
2273 }
2274
2275 static struct pernet_operations ip6_tables_net_ops = {
2276 .init = ip6_tables_net_init,
2277 .exit = ip6_tables_net_exit,
2278 };
2279
2280 static int __init ip6_tables_init(void)
2281 {
2282 int ret;
2283
2284 ret = register_pernet_subsys(&ip6_tables_net_ops);
2285 if (ret < 0)
2286 goto err1;
2287
2288 /* Noone else will be downing sem now, so we won't sleep */
2289 ret = xt_register_target(&ip6t_standard_target);
2290 if (ret < 0)
2291 goto err2;
2292 ret = xt_register_target(&ip6t_error_target);
2293 if (ret < 0)
2294 goto err3;
2295 ret = xt_register_match(&icmp6_matchstruct);
2296 if (ret < 0)
2297 goto err4;
2298
2299 /* Register setsockopt */
2300 ret = nf_register_sockopt(&ip6t_sockopts);
2301 if (ret < 0)
2302 goto err5;
2303
2304 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2305 return 0;
2306
2307 err5:
2308 xt_unregister_match(&icmp6_matchstruct);
2309 err4:
2310 xt_unregister_target(&ip6t_error_target);
2311 err3:
2312 xt_unregister_target(&ip6t_standard_target);
2313 err2:
2314 unregister_pernet_subsys(&ip6_tables_net_ops);
2315 err1:
2316 return ret;
2317 }
2318
2319 static void __exit ip6_tables_fini(void)
2320 {
2321 nf_unregister_sockopt(&ip6t_sockopts);
2322
2323 xt_unregister_match(&icmp6_matchstruct);
2324 xt_unregister_target(&ip6t_error_target);
2325 xt_unregister_target(&ip6t_standard_target);
2326
2327 unregister_pernet_subsys(&ip6_tables_net_ops);
2328 }
2329
2330 /*
2331 * find the offset to specified header or the protocol number of last header
2332 * if target < 0. "last header" is transport protocol header, ESP, or
2333 * "No next header".
2334 *
2335 * If target header is found, its offset is set in *offset and return protocol
2336 * number. Otherwise, return -1.
2337 *
2338 * If the first fragment doesn't contain the final protocol header or
2339 * NEXTHDR_NONE it is considered invalid.
2340 *
2341 * Note that non-1st fragment is special case that "the protocol number
2342 * of last header" is "next header" field in Fragment header. In this case,
2343 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2344 * isn't NULL.
2345 *
2346 */
2347 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2348 int target, unsigned short *fragoff)
2349 {
2350 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2351 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2352 unsigned int len = skb->len - start;
2353
2354 if (fragoff)
2355 *fragoff = 0;
2356
2357 while (nexthdr != target) {
2358 struct ipv6_opt_hdr _hdr, *hp;
2359 unsigned int hdrlen;
2360
2361 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2362 if (target < 0)
2363 break;
2364 return -ENOENT;
2365 }
2366
2367 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2368 if (hp == NULL)
2369 return -EBADMSG;
2370 if (nexthdr == NEXTHDR_FRAGMENT) {
2371 unsigned short _frag_off;
2372 __be16 *fp;
2373 fp = skb_header_pointer(skb,
2374 start+offsetof(struct frag_hdr,
2375 frag_off),
2376 sizeof(_frag_off),
2377 &_frag_off);
2378 if (fp == NULL)
2379 return -EBADMSG;
2380
2381 _frag_off = ntohs(*fp) & ~0x7;
2382 if (_frag_off) {
2383 if (target < 0 &&
2384 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2385 hp->nexthdr == NEXTHDR_NONE)) {
2386 if (fragoff)
2387 *fragoff = _frag_off;
2388 return hp->nexthdr;
2389 }
2390 return -ENOENT;
2391 }
2392 hdrlen = 8;
2393 } else if (nexthdr == NEXTHDR_AUTH)
2394 hdrlen = (hp->hdrlen + 2) << 2;
2395 else
2396 hdrlen = ipv6_optlen(hp);
2397
2398 nexthdr = hp->nexthdr;
2399 len -= hdrlen;
2400 start += hdrlen;
2401 }
2402
2403 *offset = start;
2404 return nexthdr;
2405 }
2406
2407 EXPORT_SYMBOL(ip6t_register_table);
2408 EXPORT_SYMBOL(ip6t_unregister_table);
2409 EXPORT_SYMBOL(ip6t_do_table);
2410 EXPORT_SYMBOL(ip6t_ext_hdr);
2411 EXPORT_SYMBOL(ipv6_find_hdr);
2412
2413 module_init(ip6_tables_init);
2414 module_exit(ip6_tables_fini);