]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
ASoC: Fix resource reclaim for osk5912
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
59
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
65
66 void *ip6t_alloc_initial_table(const struct xt_table *info)
67 {
68 return xt_alloc_initial_table(ip6t, IP6T);
69 }
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
71
72 /*
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
78
79 Hence the start of any table is given by get_table() below. */
80
81 /* Check for an extension */
82 int
83 ip6t_ext_hdr(u8 nexthdr)
84 {
85 return (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS);
92 }
93
94 /* Returns whether matches rule or not. */
95 /* Performance critical - called for every packet */
96 static inline bool
97 ip6_packet_match(const struct sk_buff *skb,
98 const char *indev,
99 const char *outdev,
100 const struct ip6t_ip6 *ip6info,
101 unsigned int *protoff,
102 int *fragoff, bool *hotdrop)
103 {
104 unsigned long ret;
105 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106
107 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108
109 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
110 &ip6info->src), IP6T_INV_SRCIP) ||
111 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
112 &ip6info->dst), IP6T_INV_DSTIP)) {
113 dprintf("Source or dest mismatch.\n");
114 /*
115 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
116 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
117 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
118 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
119 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
120 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
121 return false;
122 }
123
124 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
127 dprintf("VIA in mismatch (%s vs %s).%s\n",
128 indev, ip6info->iniface,
129 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
130 return false;
131 }
132
133 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
134
135 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ip6info->outiface,
138 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
139 return false;
140 }
141
142 /* ... might want to do something with class and flowlabel here ... */
143
144 /* look for the desired protocol header */
145 if((ip6info->flags & IP6T_F_PROTO)) {
146 int protohdr;
147 unsigned short _frag_off;
148
149 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
150 if (protohdr < 0) {
151 if (_frag_off == 0)
152 *hotdrop = true;
153 return false;
154 }
155 *fragoff = _frag_off;
156
157 dprintf("Packet protocol %hi ?= %s%hi.\n",
158 protohdr,
159 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 ip6info->proto);
161
162 if (ip6info->proto == protohdr) {
163 if(ip6info->invflags & IP6T_INV_PROTO) {
164 return false;
165 }
166 return true;
167 }
168
169 /* We need match for the '-p all', too! */
170 if ((ip6info->proto != 0) &&
171 !(ip6info->invflags & IP6T_INV_PROTO))
172 return false;
173 }
174 return true;
175 }
176
177 /* should be ip6 safe */
178 static bool
179 ip6_checkentry(const struct ip6t_ip6 *ipv6)
180 {
181 if (ipv6->flags & ~IP6T_F_MASK) {
182 duprintf("Unknown flag bits set: %08X\n",
183 ipv6->flags & ~IP6T_F_MASK);
184 return false;
185 }
186 if (ipv6->invflags & ~IP6T_INV_MASK) {
187 duprintf("Unknown invflag bits set: %08X\n",
188 ipv6->invflags & ~IP6T_INV_MASK);
189 return false;
190 }
191 return true;
192 }
193
194 static unsigned int
195 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
196 {
197 if (net_ratelimit())
198 pr_info("error: `%s'\n", (const char *)par->targinfo);
199
200 return NF_DROP;
201 }
202
203 static inline struct ip6t_entry *
204 get_entry(const void *base, unsigned int offset)
205 {
206 return (struct ip6t_entry *)(base + offset);
207 }
208
209 /* All zeroes == unconditional rule. */
210 /* Mildly perf critical (only if packet tracing is on) */
211 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
212 {
213 static const struct ip6t_ip6 uncond;
214
215 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
216 }
217
218 static inline const struct xt_entry_target *
219 ip6t_get_target_c(const struct ip6t_entry *e)
220 {
221 return ip6t_get_target((struct ip6t_entry *)e);
222 }
223
224 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
225 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
226 /* This cries for unification! */
227 static const char *const hooknames[] = {
228 [NF_INET_PRE_ROUTING] = "PREROUTING",
229 [NF_INET_LOCAL_IN] = "INPUT",
230 [NF_INET_FORWARD] = "FORWARD",
231 [NF_INET_LOCAL_OUT] = "OUTPUT",
232 [NF_INET_POST_ROUTING] = "POSTROUTING",
233 };
234
235 enum nf_ip_trace_comments {
236 NF_IP6_TRACE_COMMENT_RULE,
237 NF_IP6_TRACE_COMMENT_RETURN,
238 NF_IP6_TRACE_COMMENT_POLICY,
239 };
240
241 static const char *const comments[] = {
242 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
243 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
244 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
245 };
246
247 static struct nf_loginfo trace_loginfo = {
248 .type = NF_LOG_TYPE_LOG,
249 .u = {
250 .log = {
251 .level = 4,
252 .logflags = NF_LOG_MASK,
253 },
254 },
255 };
256
257 /* Mildly perf critical (only if packet tracing is on) */
258 static inline int
259 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
260 const char *hookname, const char **chainname,
261 const char **comment, unsigned int *rulenum)
262 {
263 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
264
265 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
266 /* Head of user chain: ERROR target with chainname */
267 *chainname = t->target.data;
268 (*rulenum) = 0;
269 } else if (s == e) {
270 (*rulenum)++;
271
272 if (s->target_offset == sizeof(struct ip6t_entry) &&
273 strcmp(t->target.u.kernel.target->name,
274 XT_STANDARD_TARGET) == 0 &&
275 t->verdict < 0 &&
276 unconditional(&s->ipv6)) {
277 /* Tail of chains: STANDARD target (return/policy) */
278 *comment = *chainname == hookname
279 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
280 : comments[NF_IP6_TRACE_COMMENT_RETURN];
281 }
282 return 1;
283 } else
284 (*rulenum)++;
285
286 return 0;
287 }
288
289 static void trace_packet(const struct sk_buff *skb,
290 unsigned int hook,
291 const struct net_device *in,
292 const struct net_device *out,
293 const char *tablename,
294 const struct xt_table_info *private,
295 const struct ip6t_entry *e)
296 {
297 const void *table_base;
298 const struct ip6t_entry *root;
299 const char *hookname, *chainname, *comment;
300 const struct ip6t_entry *iter;
301 unsigned int rulenum = 0;
302
303 table_base = private->entries[smp_processor_id()];
304 root = get_entry(table_base, private->hook_entry[hook]);
305
306 hookname = chainname = hooknames[hook];
307 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
308
309 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
310 if (get_chainname_rulenum(iter, e, hookname,
311 &chainname, &comment, &rulenum) != 0)
312 break;
313
314 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
315 "TRACE: %s:%s:%s:%u ",
316 tablename, chainname, comment, rulenum);
317 }
318 #endif
319
320 static inline __pure struct ip6t_entry *
321 ip6t_next_entry(const struct ip6t_entry *entry)
322 {
323 return (void *)entry + entry->next_offset;
324 }
325
326 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
327 unsigned int
328 ip6t_do_table(struct sk_buff *skb,
329 unsigned int hook,
330 const struct net_device *in,
331 const struct net_device *out,
332 struct xt_table *table)
333 {
334 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
335 /* Initializing verdict to NF_DROP keeps gcc happy. */
336 unsigned int verdict = NF_DROP;
337 const char *indev, *outdev;
338 const void *table_base;
339 struct ip6t_entry *e, **jumpstack;
340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private;
342 struct xt_action_param acpar;
343
344 /* Initialization */
345 indev = in ? in->name : nulldevname;
346 outdev = out ? out->name : nulldevname;
347 /* We handle fragments by dealing with the first fragment as
348 * if it was a normal packet. All other fragments are treated
349 * normally, except that they will NEVER match rules that ask
350 * things we don't know, ie. tcp syn flag or ports). If the
351 * rule is also a fragment-specific rule, non-fragments won't
352 * match it. */
353 acpar.hotdrop = false;
354 acpar.in = in;
355 acpar.out = out;
356 acpar.family = NFPROTO_IPV6;
357 acpar.hooknum = hook;
358
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360
361 xt_info_rdlock_bh();
362 private = table->private;
363 cpu = smp_processor_id();
364 table_base = private->entries[cpu];
365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
366 stackptr = per_cpu_ptr(private->stackptr, cpu);
367 origptr = *stackptr;
368
369 e = get_entry(table_base, private->hook_entry[hook]);
370
371 do {
372 const struct xt_entry_target *t;
373 const struct xt_entry_match *ematch;
374
375 IP_NF_ASSERT(e);
376 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
377 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
378 no_match:
379 e = ip6t_next_entry(e);
380 continue;
381 }
382
383 xt_ematch_foreach(ematch, e) {
384 acpar.match = ematch->u.kernel.match;
385 acpar.matchinfo = ematch->data;
386 if (!acpar.match->match(skb, &acpar))
387 goto no_match;
388 }
389
390 ADD_COUNTER(e->counters, skb->len, 1);
391
392 t = ip6t_get_target_c(e);
393 IP_NF_ASSERT(t->u.kernel.target);
394
395 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
396 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
397 /* The packet is traced: log it */
398 if (unlikely(skb->nf_trace))
399 trace_packet(skb, hook, in, out,
400 table->name, private, e);
401 #endif
402 /* Standard target? */
403 if (!t->u.kernel.target->target) {
404 int v;
405
406 v = ((struct xt_standard_target *)t)->verdict;
407 if (v < 0) {
408 /* Pop from stack? */
409 if (v != XT_RETURN) {
410 verdict = (unsigned)(-v) - 1;
411 break;
412 }
413 if (*stackptr == 0)
414 e = get_entry(table_base,
415 private->underflow[hook]);
416 else
417 e = ip6t_next_entry(jumpstack[--*stackptr]);
418 continue;
419 }
420 if (table_base + v != ip6t_next_entry(e) &&
421 !(e->ipv6.flags & IP6T_F_GOTO)) {
422 if (*stackptr >= private->stacksize) {
423 verdict = NF_DROP;
424 break;
425 }
426 jumpstack[(*stackptr)++] = e;
427 }
428
429 e = get_entry(table_base, v);
430 continue;
431 }
432
433 acpar.target = t->u.kernel.target;
434 acpar.targinfo = t->data;
435
436 verdict = t->u.kernel.target->target(skb, &acpar);
437 if (verdict == XT_CONTINUE)
438 e = ip6t_next_entry(e);
439 else
440 /* Verdict */
441 break;
442 } while (!acpar.hotdrop);
443
444 xt_info_rdunlock_bh();
445 *stackptr = origptr;
446
447 #ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT;
449 #else
450 if (acpar.hotdrop)
451 return NF_DROP;
452 else return verdict;
453 #endif
454 }
455
456 /* Figures out from what hook each rule can be called: returns 0 if
457 there are loops. Puts hook bitmask in comefrom. */
458 static int
459 mark_source_chains(const struct xt_table_info *newinfo,
460 unsigned int valid_hooks, void *entry0)
461 {
462 unsigned int hook;
463
464 /* No recursion; use packet counter to save back ptrs (reset
465 to 0 as we leave), and comefrom to save source hook bitmask */
466 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
467 unsigned int pos = newinfo->hook_entry[hook];
468 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
469
470 if (!(valid_hooks & (1 << hook)))
471 continue;
472
473 /* Set initial back pointer. */
474 e->counters.pcnt = pos;
475
476 for (;;) {
477 const struct xt_standard_target *t
478 = (void *)ip6t_get_target_c(e);
479 int visited = e->comefrom & (1 << hook);
480
481 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
482 pr_err("iptables: loop hook %u pos %u %08X.\n",
483 hook, pos, e->comefrom);
484 return 0;
485 }
486 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
487
488 /* Unconditional return/END. */
489 if ((e->target_offset == sizeof(struct ip6t_entry) &&
490 (strcmp(t->target.u.user.name,
491 XT_STANDARD_TARGET) == 0) &&
492 t->verdict < 0 &&
493 unconditional(&e->ipv6)) || visited) {
494 unsigned int oldpos, size;
495
496 if ((strcmp(t->target.u.user.name,
497 XT_STANDARD_TARGET) == 0) &&
498 t->verdict < -NF_MAX_VERDICT - 1) {
499 duprintf("mark_source_chains: bad "
500 "negative verdict (%i)\n",
501 t->verdict);
502 return 0;
503 }
504
505 /* Return: backtrack through the last
506 big jump. */
507 do {
508 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
509 #ifdef DEBUG_IP_FIREWALL_USER
510 if (e->comefrom
511 & (1 << NF_INET_NUMHOOKS)) {
512 duprintf("Back unset "
513 "on hook %u "
514 "rule %u\n",
515 hook, pos);
516 }
517 #endif
518 oldpos = pos;
519 pos = e->counters.pcnt;
520 e->counters.pcnt = 0;
521
522 /* We're at the start. */
523 if (pos == oldpos)
524 goto next;
525
526 e = (struct ip6t_entry *)
527 (entry0 + pos);
528 } while (oldpos == pos + e->next_offset);
529
530 /* Move along one */
531 size = e->next_offset;
532 e = (struct ip6t_entry *)
533 (entry0 + pos + size);
534 e->counters.pcnt = pos;
535 pos += size;
536 } else {
537 int newpos = t->verdict;
538
539 if (strcmp(t->target.u.user.name,
540 XT_STANDARD_TARGET) == 0 &&
541 newpos >= 0) {
542 if (newpos > newinfo->size -
543 sizeof(struct ip6t_entry)) {
544 duprintf("mark_source_chains: "
545 "bad verdict (%i)\n",
546 newpos);
547 return 0;
548 }
549 /* This a jump; chase it. */
550 duprintf("Jump rule %u -> %u\n",
551 pos, newpos);
552 } else {
553 /* ... this is a fallthru */
554 newpos = pos + e->next_offset;
555 }
556 e = (struct ip6t_entry *)
557 (entry0 + newpos);
558 e->counters.pcnt = pos;
559 pos = newpos;
560 }
561 }
562 next:
563 duprintf("Finished chain %u\n", hook);
564 }
565 return 1;
566 }
567
568 static void cleanup_match(struct xt_entry_match *m, struct net *net)
569 {
570 struct xt_mtdtor_param par;
571
572 par.net = net;
573 par.match = m->u.kernel.match;
574 par.matchinfo = m->data;
575 par.family = NFPROTO_IPV6;
576 if (par.match->destroy != NULL)
577 par.match->destroy(&par);
578 module_put(par.match->me);
579 }
580
581 static int
582 check_entry(const struct ip6t_entry *e, const char *name)
583 {
584 const struct xt_entry_target *t;
585
586 if (!ip6_checkentry(&e->ipv6)) {
587 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
588 return -EINVAL;
589 }
590
591 if (e->target_offset + sizeof(struct xt_entry_target) >
592 e->next_offset)
593 return -EINVAL;
594
595 t = ip6t_get_target_c(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
597 return -EINVAL;
598
599 return 0;
600 }
601
602 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
603 {
604 const struct ip6t_ip6 *ipv6 = par->entryinfo;
605 int ret;
606
607 par->match = m->u.kernel.match;
608 par->matchinfo = m->data;
609
610 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
611 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
612 if (ret < 0) {
613 duprintf("ip_tables: check failed for `%s'.\n",
614 par.match->name);
615 return ret;
616 }
617 return 0;
618 }
619
620 static int
621 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
622 {
623 struct xt_match *match;
624 int ret;
625
626 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
627 m->u.user.revision);
628 if (IS_ERR(match)) {
629 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
630 return PTR_ERR(match);
631 }
632 m->u.kernel.match = match;
633
634 ret = check_match(m, par);
635 if (ret)
636 goto err;
637
638 return 0;
639 err:
640 module_put(m->u.kernel.match->me);
641 return ret;
642 }
643
644 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
645 {
646 struct xt_entry_target *t = ip6t_get_target(e);
647 struct xt_tgchk_param par = {
648 .net = net,
649 .table = name,
650 .entryinfo = e,
651 .target = t->u.kernel.target,
652 .targinfo = t->data,
653 .hook_mask = e->comefrom,
654 .family = NFPROTO_IPV6,
655 };
656 int ret;
657
658 t = ip6t_get_target(e);
659 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
660 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
661 if (ret < 0) {
662 duprintf("ip_tables: check failed for `%s'.\n",
663 t->u.kernel.target->name);
664 return ret;
665 }
666 return 0;
667 }
668
669 static int
670 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
671 unsigned int size)
672 {
673 struct xt_entry_target *t;
674 struct xt_target *target;
675 int ret;
676 unsigned int j;
677 struct xt_mtchk_param mtpar;
678 struct xt_entry_match *ematch;
679
680 ret = check_entry(e, name);
681 if (ret)
682 return ret;
683
684 j = 0;
685 mtpar.net = net;
686 mtpar.table = name;
687 mtpar.entryinfo = &e->ipv6;
688 mtpar.hook_mask = e->comefrom;
689 mtpar.family = NFPROTO_IPV6;
690 xt_ematch_foreach(ematch, e) {
691 ret = find_check_match(ematch, &mtpar);
692 if (ret != 0)
693 goto cleanup_matches;
694 ++j;
695 }
696
697 t = ip6t_get_target(e);
698 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
699 t->u.user.revision);
700 if (IS_ERR(target)) {
701 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
702 ret = PTR_ERR(target);
703 goto cleanup_matches;
704 }
705 t->u.kernel.target = target;
706
707 ret = check_target(e, net, name);
708 if (ret)
709 goto err;
710 return 0;
711 err:
712 module_put(t->u.kernel.target->me);
713 cleanup_matches:
714 xt_ematch_foreach(ematch, e) {
715 if (j-- == 0)
716 break;
717 cleanup_match(ematch, net);
718 }
719 return ret;
720 }
721
722 static bool check_underflow(const struct ip6t_entry *e)
723 {
724 const struct xt_entry_target *t;
725 unsigned int verdict;
726
727 if (!unconditional(&e->ipv6))
728 return false;
729 t = ip6t_get_target_c(e);
730 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
731 return false;
732 verdict = ((struct xt_standard_target *)t)->verdict;
733 verdict = -verdict - 1;
734 return verdict == NF_DROP || verdict == NF_ACCEPT;
735 }
736
737 static int
738 check_entry_size_and_hooks(struct ip6t_entry *e,
739 struct xt_table_info *newinfo,
740 const unsigned char *base,
741 const unsigned char *limit,
742 const unsigned int *hook_entries,
743 const unsigned int *underflows,
744 unsigned int valid_hooks)
745 {
746 unsigned int h;
747
748 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
749 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
750 duprintf("Bad offset %p\n", e);
751 return -EINVAL;
752 }
753
754 if (e->next_offset
755 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
756 duprintf("checking: element %p size %u\n",
757 e, e->next_offset);
758 return -EINVAL;
759 }
760
761 /* Check hooks & underflows */
762 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
763 if (!(valid_hooks & (1 << h)))
764 continue;
765 if ((unsigned char *)e - base == hook_entries[h])
766 newinfo->hook_entry[h] = hook_entries[h];
767 if ((unsigned char *)e - base == underflows[h]) {
768 if (!check_underflow(e)) {
769 pr_err("Underflows must be unconditional and "
770 "use the STANDARD target with "
771 "ACCEPT/DROP\n");
772 return -EINVAL;
773 }
774 newinfo->underflow[h] = underflows[h];
775 }
776 }
777
778 /* Clear counters and comefrom */
779 e->counters = ((struct xt_counters) { 0, 0 });
780 e->comefrom = 0;
781 return 0;
782 }
783
784 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
785 {
786 struct xt_tgdtor_param par;
787 struct xt_entry_target *t;
788 struct xt_entry_match *ematch;
789
790 /* Cleanup all matches */
791 xt_ematch_foreach(ematch, e)
792 cleanup_match(ematch, net);
793 t = ip6t_get_target(e);
794
795 par.net = net;
796 par.target = t->u.kernel.target;
797 par.targinfo = t->data;
798 par.family = NFPROTO_IPV6;
799 if (par.target->destroy != NULL)
800 par.target->destroy(&par);
801 module_put(par.target->me);
802 }
803
804 /* Checks and translates the user-supplied table segment (held in
805 newinfo) */
806 static int
807 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
808 const struct ip6t_replace *repl)
809 {
810 struct ip6t_entry *iter;
811 unsigned int i;
812 int ret = 0;
813
814 newinfo->size = repl->size;
815 newinfo->number = repl->num_entries;
816
817 /* Init all hooks to impossible value. */
818 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
819 newinfo->hook_entry[i] = 0xFFFFFFFF;
820 newinfo->underflow[i] = 0xFFFFFFFF;
821 }
822
823 duprintf("translate_table: size %u\n", newinfo->size);
824 i = 0;
825 /* Walk through entries, checking offsets. */
826 xt_entry_foreach(iter, entry0, newinfo->size) {
827 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
828 entry0 + repl->size,
829 repl->hook_entry,
830 repl->underflow,
831 repl->valid_hooks);
832 if (ret != 0)
833 return ret;
834 ++i;
835 if (strcmp(ip6t_get_target(iter)->u.user.name,
836 XT_ERROR_TARGET) == 0)
837 ++newinfo->stacksize;
838 }
839
840 if (i != repl->num_entries) {
841 duprintf("translate_table: %u not %u entries\n",
842 i, repl->num_entries);
843 return -EINVAL;
844 }
845
846 /* Check hooks all assigned */
847 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
848 /* Only hooks which are valid */
849 if (!(repl->valid_hooks & (1 << i)))
850 continue;
851 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
852 duprintf("Invalid hook entry %u %u\n",
853 i, repl->hook_entry[i]);
854 return -EINVAL;
855 }
856 if (newinfo->underflow[i] == 0xFFFFFFFF) {
857 duprintf("Invalid underflow %u %u\n",
858 i, repl->underflow[i]);
859 return -EINVAL;
860 }
861 }
862
863 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
864 return -ELOOP;
865
866 /* Finally, each sanity check must pass */
867 i = 0;
868 xt_entry_foreach(iter, entry0, newinfo->size) {
869 ret = find_check_entry(iter, net, repl->name, repl->size);
870 if (ret != 0)
871 break;
872 ++i;
873 }
874
875 if (ret != 0) {
876 xt_entry_foreach(iter, entry0, newinfo->size) {
877 if (i-- == 0)
878 break;
879 cleanup_entry(iter, net);
880 }
881 return ret;
882 }
883
884 /* And one copy for every other CPU */
885 for_each_possible_cpu(i) {
886 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
887 memcpy(newinfo->entries[i], entry0, newinfo->size);
888 }
889
890 return ret;
891 }
892
893 static void
894 get_counters(const struct xt_table_info *t,
895 struct xt_counters counters[])
896 {
897 struct ip6t_entry *iter;
898 unsigned int cpu;
899 unsigned int i;
900 unsigned int curcpu = get_cpu();
901
902 /* Instead of clearing (by a previous call to memset())
903 * the counters and using adds, we set the counters
904 * with data used by 'current' CPU
905 *
906 * Bottom half has to be disabled to prevent deadlock
907 * if new softirq were to run and call ipt_do_table
908 */
909 local_bh_disable();
910 i = 0;
911 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
912 SET_COUNTER(counters[i], iter->counters.bcnt,
913 iter->counters.pcnt);
914 ++i;
915 }
916 local_bh_enable();
917 /* Processing counters from other cpus, we can let bottom half enabled,
918 * (preemption is disabled)
919 */
920
921 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu)
923 continue;
924 i = 0;
925 local_bh_disable();
926 xt_info_wrlock(cpu);
927 xt_entry_foreach(iter, t->entries[cpu], t->size) {
928 ADD_COUNTER(counters[i], iter->counters.bcnt,
929 iter->counters.pcnt);
930 ++i;
931 }
932 xt_info_wrunlock(cpu);
933 local_bh_enable();
934 }
935 put_cpu();
936 }
937
938 static struct xt_counters *alloc_counters(const struct xt_table *table)
939 {
940 unsigned int countersize;
941 struct xt_counters *counters;
942 const struct xt_table_info *private = table->private;
943
944 /* We need atomic snapshot of counters: rest doesn't change
945 (other than comefrom, which userspace doesn't care
946 about). */
947 countersize = sizeof(struct xt_counters) * private->number;
948 counters = vmalloc(countersize);
949
950 if (counters == NULL)
951 return ERR_PTR(-ENOMEM);
952
953 get_counters(private, counters);
954
955 return counters;
956 }
957
958 static int
959 copy_entries_to_user(unsigned int total_size,
960 const struct xt_table *table,
961 void __user *userptr)
962 {
963 unsigned int off, num;
964 const struct ip6t_entry *e;
965 struct xt_counters *counters;
966 const struct xt_table_info *private = table->private;
967 int ret = 0;
968 const void *loc_cpu_entry;
969
970 counters = alloc_counters(table);
971 if (IS_ERR(counters))
972 return PTR_ERR(counters);
973
974 /* choose the copy that is on our node/cpu, ...
975 * This choice is lazy (because current thread is
976 * allowed to migrate to another cpu)
977 */
978 loc_cpu_entry = private->entries[raw_smp_processor_id()];
979 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
980 ret = -EFAULT;
981 goto free_counters;
982 }
983
984 /* FIXME: use iterator macros --RR */
985 /* ... then go back and fix counters and names */
986 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
987 unsigned int i;
988 const struct xt_entry_match *m;
989 const struct xt_entry_target *t;
990
991 e = (struct ip6t_entry *)(loc_cpu_entry + off);
992 if (copy_to_user(userptr + off
993 + offsetof(struct ip6t_entry, counters),
994 &counters[num],
995 sizeof(counters[num])) != 0) {
996 ret = -EFAULT;
997 goto free_counters;
998 }
999
1000 for (i = sizeof(struct ip6t_entry);
1001 i < e->target_offset;
1002 i += m->u.match_size) {
1003 m = (void *)e + i;
1004
1005 if (copy_to_user(userptr + off + i
1006 + offsetof(struct xt_entry_match,
1007 u.user.name),
1008 m->u.kernel.match->name,
1009 strlen(m->u.kernel.match->name)+1)
1010 != 0) {
1011 ret = -EFAULT;
1012 goto free_counters;
1013 }
1014 }
1015
1016 t = ip6t_get_target_c(e);
1017 if (copy_to_user(userptr + off + e->target_offset
1018 + offsetof(struct xt_entry_target,
1019 u.user.name),
1020 t->u.kernel.target->name,
1021 strlen(t->u.kernel.target->name)+1) != 0) {
1022 ret = -EFAULT;
1023 goto free_counters;
1024 }
1025 }
1026
1027 free_counters:
1028 vfree(counters);
1029 return ret;
1030 }
1031
1032 #ifdef CONFIG_COMPAT
1033 static void compat_standard_from_user(void *dst, const void *src)
1034 {
1035 int v = *(compat_int_t *)src;
1036
1037 if (v > 0)
1038 v += xt_compat_calc_jump(AF_INET6, v);
1039 memcpy(dst, &v, sizeof(v));
1040 }
1041
1042 static int compat_standard_to_user(void __user *dst, const void *src)
1043 {
1044 compat_int_t cv = *(int *)src;
1045
1046 if (cv > 0)
1047 cv -= xt_compat_calc_jump(AF_INET6, cv);
1048 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1049 }
1050
1051 static int compat_calc_entry(const struct ip6t_entry *e,
1052 const struct xt_table_info *info,
1053 const void *base, struct xt_table_info *newinfo)
1054 {
1055 const struct xt_entry_match *ematch;
1056 const struct xt_entry_target *t;
1057 unsigned int entry_offset;
1058 int off, i, ret;
1059
1060 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1061 entry_offset = (void *)e - base;
1062 xt_ematch_foreach(ematch, e)
1063 off += xt_compat_match_offset(ematch->u.kernel.match);
1064 t = ip6t_get_target_c(e);
1065 off += xt_compat_target_offset(t->u.kernel.target);
1066 newinfo->size -= off;
1067 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1068 if (ret)
1069 return ret;
1070
1071 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1072 if (info->hook_entry[i] &&
1073 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1074 newinfo->hook_entry[i] -= off;
1075 if (info->underflow[i] &&
1076 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1077 newinfo->underflow[i] -= off;
1078 }
1079 return 0;
1080 }
1081
1082 static int compat_table_info(const struct xt_table_info *info,
1083 struct xt_table_info *newinfo)
1084 {
1085 struct ip6t_entry *iter;
1086 void *loc_cpu_entry;
1087 int ret;
1088
1089 if (!newinfo || !info)
1090 return -EINVAL;
1091
1092 /* we dont care about newinfo->entries[] */
1093 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1094 newinfo->initial_entries = 0;
1095 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1096 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1097 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1098 if (ret != 0)
1099 return ret;
1100 }
1101 return 0;
1102 }
1103 #endif
1104
1105 static int get_info(struct net *net, void __user *user,
1106 const int *len, int compat)
1107 {
1108 char name[XT_TABLE_MAXNAMELEN];
1109 struct xt_table *t;
1110 int ret;
1111
1112 if (*len != sizeof(struct ip6t_getinfo)) {
1113 duprintf("length %u != %zu\n", *len,
1114 sizeof(struct ip6t_getinfo));
1115 return -EINVAL;
1116 }
1117
1118 if (copy_from_user(name, user, sizeof(name)) != 0)
1119 return -EFAULT;
1120
1121 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1122 #ifdef CONFIG_COMPAT
1123 if (compat)
1124 xt_compat_lock(AF_INET6);
1125 #endif
1126 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1127 "ip6table_%s", name);
1128 if (t && !IS_ERR(t)) {
1129 struct ip6t_getinfo info;
1130 const struct xt_table_info *private = t->private;
1131 #ifdef CONFIG_COMPAT
1132 struct xt_table_info tmp;
1133
1134 if (compat) {
1135 ret = compat_table_info(private, &tmp);
1136 xt_compat_flush_offsets(AF_INET6);
1137 private = &tmp;
1138 }
1139 #endif
1140 info.valid_hooks = t->valid_hooks;
1141 memcpy(info.hook_entry, private->hook_entry,
1142 sizeof(info.hook_entry));
1143 memcpy(info.underflow, private->underflow,
1144 sizeof(info.underflow));
1145 info.num_entries = private->number;
1146 info.size = private->size;
1147 strcpy(info.name, name);
1148
1149 if (copy_to_user(user, &info, *len) != 0)
1150 ret = -EFAULT;
1151 else
1152 ret = 0;
1153
1154 xt_table_unlock(t);
1155 module_put(t->me);
1156 } else
1157 ret = t ? PTR_ERR(t) : -ENOENT;
1158 #ifdef CONFIG_COMPAT
1159 if (compat)
1160 xt_compat_unlock(AF_INET6);
1161 #endif
1162 return ret;
1163 }
1164
1165 static int
1166 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1167 const int *len)
1168 {
1169 int ret;
1170 struct ip6t_get_entries get;
1171 struct xt_table *t;
1172
1173 if (*len < sizeof(get)) {
1174 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1175 return -EINVAL;
1176 }
1177 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1178 return -EFAULT;
1179 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1180 duprintf("get_entries: %u != %zu\n",
1181 *len, sizeof(get) + get.size);
1182 return -EINVAL;
1183 }
1184
1185 t = xt_find_table_lock(net, AF_INET6, get.name);
1186 if (t && !IS_ERR(t)) {
1187 struct xt_table_info *private = t->private;
1188 duprintf("t->private->number = %u\n", private->number);
1189 if (get.size == private->size)
1190 ret = copy_entries_to_user(private->size,
1191 t, uptr->entrytable);
1192 else {
1193 duprintf("get_entries: I've got %u not %u!\n",
1194 private->size, get.size);
1195 ret = -EAGAIN;
1196 }
1197 module_put(t->me);
1198 xt_table_unlock(t);
1199 } else
1200 ret = t ? PTR_ERR(t) : -ENOENT;
1201
1202 return ret;
1203 }
1204
1205 static int
1206 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1207 struct xt_table_info *newinfo, unsigned int num_counters,
1208 void __user *counters_ptr)
1209 {
1210 int ret;
1211 struct xt_table *t;
1212 struct xt_table_info *oldinfo;
1213 struct xt_counters *counters;
1214 const void *loc_cpu_old_entry;
1215 struct ip6t_entry *iter;
1216
1217 ret = 0;
1218 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1219 if (!counters) {
1220 ret = -ENOMEM;
1221 goto out;
1222 }
1223
1224 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1225 "ip6table_%s", name);
1226 if (!t || IS_ERR(t)) {
1227 ret = t ? PTR_ERR(t) : -ENOENT;
1228 goto free_newinfo_counters_untrans;
1229 }
1230
1231 /* You lied! */
1232 if (valid_hooks != t->valid_hooks) {
1233 duprintf("Valid hook crap: %08X vs %08X\n",
1234 valid_hooks, t->valid_hooks);
1235 ret = -EINVAL;
1236 goto put_module;
1237 }
1238
1239 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1240 if (!oldinfo)
1241 goto put_module;
1242
1243 /* Update module usage count based on number of rules */
1244 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1245 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1246 if ((oldinfo->number > oldinfo->initial_entries) ||
1247 (newinfo->number <= oldinfo->initial_entries))
1248 module_put(t->me);
1249 if ((oldinfo->number > oldinfo->initial_entries) &&
1250 (newinfo->number <= oldinfo->initial_entries))
1251 module_put(t->me);
1252
1253 /* Get the old counters, and synchronize with replace */
1254 get_counters(oldinfo, counters);
1255
1256 /* Decrease module usage counts and free resource */
1257 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1258 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1259 cleanup_entry(iter, net);
1260
1261 xt_free_table_info(oldinfo);
1262 if (copy_to_user(counters_ptr, counters,
1263 sizeof(struct xt_counters) * num_counters) != 0)
1264 ret = -EFAULT;
1265 vfree(counters);
1266 xt_table_unlock(t);
1267 return ret;
1268
1269 put_module:
1270 module_put(t->me);
1271 xt_table_unlock(t);
1272 free_newinfo_counters_untrans:
1273 vfree(counters);
1274 out:
1275 return ret;
1276 }
1277
1278 static int
1279 do_replace(struct net *net, const void __user *user, unsigned int len)
1280 {
1281 int ret;
1282 struct ip6t_replace tmp;
1283 struct xt_table_info *newinfo;
1284 void *loc_cpu_entry;
1285 struct ip6t_entry *iter;
1286
1287 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1288 return -EFAULT;
1289
1290 /* overflow check */
1291 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1292 return -ENOMEM;
1293
1294 newinfo = xt_alloc_table_info(tmp.size);
1295 if (!newinfo)
1296 return -ENOMEM;
1297
1298 /* choose the copy that is on our node/cpu */
1299 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1300 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1301 tmp.size) != 0) {
1302 ret = -EFAULT;
1303 goto free_newinfo;
1304 }
1305
1306 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1307 if (ret != 0)
1308 goto free_newinfo;
1309
1310 duprintf("ip_tables: Translated table\n");
1311
1312 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1313 tmp.num_counters, tmp.counters);
1314 if (ret)
1315 goto free_newinfo_untrans;
1316 return 0;
1317
1318 free_newinfo_untrans:
1319 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1320 cleanup_entry(iter, net);
1321 free_newinfo:
1322 xt_free_table_info(newinfo);
1323 return ret;
1324 }
1325
1326 static int
1327 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1328 int compat)
1329 {
1330 unsigned int i, curcpu;
1331 struct xt_counters_info tmp;
1332 struct xt_counters *paddc;
1333 unsigned int num_counters;
1334 char *name;
1335 int size;
1336 void *ptmp;
1337 struct xt_table *t;
1338 const struct xt_table_info *private;
1339 int ret = 0;
1340 const void *loc_cpu_entry;
1341 struct ip6t_entry *iter;
1342 #ifdef CONFIG_COMPAT
1343 struct compat_xt_counters_info compat_tmp;
1344
1345 if (compat) {
1346 ptmp = &compat_tmp;
1347 size = sizeof(struct compat_xt_counters_info);
1348 } else
1349 #endif
1350 {
1351 ptmp = &tmp;
1352 size = sizeof(struct xt_counters_info);
1353 }
1354
1355 if (copy_from_user(ptmp, user, size) != 0)
1356 return -EFAULT;
1357
1358 #ifdef CONFIG_COMPAT
1359 if (compat) {
1360 num_counters = compat_tmp.num_counters;
1361 name = compat_tmp.name;
1362 } else
1363 #endif
1364 {
1365 num_counters = tmp.num_counters;
1366 name = tmp.name;
1367 }
1368
1369 if (len != size + num_counters * sizeof(struct xt_counters))
1370 return -EINVAL;
1371
1372 paddc = vmalloc(len - size);
1373 if (!paddc)
1374 return -ENOMEM;
1375
1376 if (copy_from_user(paddc, user + size, len - size) != 0) {
1377 ret = -EFAULT;
1378 goto free;
1379 }
1380
1381 t = xt_find_table_lock(net, AF_INET6, name);
1382 if (!t || IS_ERR(t)) {
1383 ret = t ? PTR_ERR(t) : -ENOENT;
1384 goto free;
1385 }
1386
1387
1388 local_bh_disable();
1389 private = t->private;
1390 if (private->number != num_counters) {
1391 ret = -EINVAL;
1392 goto unlock_up_free;
1393 }
1394
1395 i = 0;
1396 /* Choose the copy that is on our node */
1397 curcpu = smp_processor_id();
1398 xt_info_wrlock(curcpu);
1399 loc_cpu_entry = private->entries[curcpu];
1400 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1401 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1402 ++i;
1403 }
1404 xt_info_wrunlock(curcpu);
1405
1406 unlock_up_free:
1407 local_bh_enable();
1408 xt_table_unlock(t);
1409 module_put(t->me);
1410 free:
1411 vfree(paddc);
1412
1413 return ret;
1414 }
1415
1416 #ifdef CONFIG_COMPAT
1417 struct compat_ip6t_replace {
1418 char name[XT_TABLE_MAXNAMELEN];
1419 u32 valid_hooks;
1420 u32 num_entries;
1421 u32 size;
1422 u32 hook_entry[NF_INET_NUMHOOKS];
1423 u32 underflow[NF_INET_NUMHOOKS];
1424 u32 num_counters;
1425 compat_uptr_t counters; /* struct xt_counters * */
1426 struct compat_ip6t_entry entries[0];
1427 };
1428
1429 static int
1430 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1431 unsigned int *size, struct xt_counters *counters,
1432 unsigned int i)
1433 {
1434 struct xt_entry_target *t;
1435 struct compat_ip6t_entry __user *ce;
1436 u_int16_t target_offset, next_offset;
1437 compat_uint_t origsize;
1438 const struct xt_entry_match *ematch;
1439 int ret = 0;
1440
1441 origsize = *size;
1442 ce = (struct compat_ip6t_entry __user *)*dstptr;
1443 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1444 copy_to_user(&ce->counters, &counters[i],
1445 sizeof(counters[i])) != 0)
1446 return -EFAULT;
1447
1448 *dstptr += sizeof(struct compat_ip6t_entry);
1449 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1450
1451 xt_ematch_foreach(ematch, e) {
1452 ret = xt_compat_match_to_user(ematch, dstptr, size);
1453 if (ret != 0)
1454 return ret;
1455 }
1456 target_offset = e->target_offset - (origsize - *size);
1457 t = ip6t_get_target(e);
1458 ret = xt_compat_target_to_user(t, dstptr, size);
1459 if (ret)
1460 return ret;
1461 next_offset = e->next_offset - (origsize - *size);
1462 if (put_user(target_offset, &ce->target_offset) != 0 ||
1463 put_user(next_offset, &ce->next_offset) != 0)
1464 return -EFAULT;
1465 return 0;
1466 }
1467
1468 static int
1469 compat_find_calc_match(struct xt_entry_match *m,
1470 const char *name,
1471 const struct ip6t_ip6 *ipv6,
1472 unsigned int hookmask,
1473 int *size)
1474 {
1475 struct xt_match *match;
1476
1477 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1478 m->u.user.revision);
1479 if (IS_ERR(match)) {
1480 duprintf("compat_check_calc_match: `%s' not found\n",
1481 m->u.user.name);
1482 return PTR_ERR(match);
1483 }
1484 m->u.kernel.match = match;
1485 *size += xt_compat_match_offset(match);
1486 return 0;
1487 }
1488
1489 static void compat_release_entry(struct compat_ip6t_entry *e)
1490 {
1491 struct xt_entry_target *t;
1492 struct xt_entry_match *ematch;
1493
1494 /* Cleanup all matches */
1495 xt_ematch_foreach(ematch, e)
1496 module_put(ematch->u.kernel.match->me);
1497 t = compat_ip6t_get_target(e);
1498 module_put(t->u.kernel.target->me);
1499 }
1500
1501 static int
1502 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1503 struct xt_table_info *newinfo,
1504 unsigned int *size,
1505 const unsigned char *base,
1506 const unsigned char *limit,
1507 const unsigned int *hook_entries,
1508 const unsigned int *underflows,
1509 const char *name)
1510 {
1511 struct xt_entry_match *ematch;
1512 struct xt_entry_target *t;
1513 struct xt_target *target;
1514 unsigned int entry_offset;
1515 unsigned int j;
1516 int ret, off, h;
1517
1518 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1519 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1520 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1521 duprintf("Bad offset %p, limit = %p\n", e, limit);
1522 return -EINVAL;
1523 }
1524
1525 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1526 sizeof(struct compat_xt_entry_target)) {
1527 duprintf("checking: element %p size %u\n",
1528 e, e->next_offset);
1529 return -EINVAL;
1530 }
1531
1532 /* For purposes of check_entry casting the compat entry is fine */
1533 ret = check_entry((struct ip6t_entry *)e, name);
1534 if (ret)
1535 return ret;
1536
1537 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1538 entry_offset = (void *)e - (void *)base;
1539 j = 0;
1540 xt_ematch_foreach(ematch, e) {
1541 ret = compat_find_calc_match(ematch, name,
1542 &e->ipv6, e->comefrom, &off);
1543 if (ret != 0)
1544 goto release_matches;
1545 ++j;
1546 }
1547
1548 t = compat_ip6t_get_target(e);
1549 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1550 t->u.user.revision);
1551 if (IS_ERR(target)) {
1552 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1553 t->u.user.name);
1554 ret = PTR_ERR(target);
1555 goto release_matches;
1556 }
1557 t->u.kernel.target = target;
1558
1559 off += xt_compat_target_offset(target);
1560 *size += off;
1561 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1562 if (ret)
1563 goto out;
1564
1565 /* Check hooks & underflows */
1566 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1567 if ((unsigned char *)e - base == hook_entries[h])
1568 newinfo->hook_entry[h] = hook_entries[h];
1569 if ((unsigned char *)e - base == underflows[h])
1570 newinfo->underflow[h] = underflows[h];
1571 }
1572
1573 /* Clear counters and comefrom */
1574 memset(&e->counters, 0, sizeof(e->counters));
1575 e->comefrom = 0;
1576 return 0;
1577
1578 out:
1579 module_put(t->u.kernel.target->me);
1580 release_matches:
1581 xt_ematch_foreach(ematch, e) {
1582 if (j-- == 0)
1583 break;
1584 module_put(ematch->u.kernel.match->me);
1585 }
1586 return ret;
1587 }
1588
1589 static int
1590 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1591 unsigned int *size, const char *name,
1592 struct xt_table_info *newinfo, unsigned char *base)
1593 {
1594 struct xt_entry_target *t;
1595 struct xt_target *target;
1596 struct ip6t_entry *de;
1597 unsigned int origsize;
1598 int ret, h;
1599 struct xt_entry_match *ematch;
1600
1601 ret = 0;
1602 origsize = *size;
1603 de = (struct ip6t_entry *)*dstptr;
1604 memcpy(de, e, sizeof(struct ip6t_entry));
1605 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1606
1607 *dstptr += sizeof(struct ip6t_entry);
1608 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1609
1610 xt_ematch_foreach(ematch, e) {
1611 ret = xt_compat_match_from_user(ematch, dstptr, size);
1612 if (ret != 0)
1613 return ret;
1614 }
1615 de->target_offset = e->target_offset - (origsize - *size);
1616 t = compat_ip6t_get_target(e);
1617 target = t->u.kernel.target;
1618 xt_compat_target_from_user(t, dstptr, size);
1619
1620 de->next_offset = e->next_offset - (origsize - *size);
1621 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1622 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1623 newinfo->hook_entry[h] -= origsize - *size;
1624 if ((unsigned char *)de - base < newinfo->underflow[h])
1625 newinfo->underflow[h] -= origsize - *size;
1626 }
1627 return ret;
1628 }
1629
1630 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1631 const char *name)
1632 {
1633 unsigned int j;
1634 int ret = 0;
1635 struct xt_mtchk_param mtpar;
1636 struct xt_entry_match *ematch;
1637
1638 j = 0;
1639 mtpar.net = net;
1640 mtpar.table = name;
1641 mtpar.entryinfo = &e->ipv6;
1642 mtpar.hook_mask = e->comefrom;
1643 mtpar.family = NFPROTO_IPV6;
1644 xt_ematch_foreach(ematch, e) {
1645 ret = check_match(ematch, &mtpar);
1646 if (ret != 0)
1647 goto cleanup_matches;
1648 ++j;
1649 }
1650
1651 ret = check_target(e, net, name);
1652 if (ret)
1653 goto cleanup_matches;
1654 return 0;
1655
1656 cleanup_matches:
1657 xt_ematch_foreach(ematch, e) {
1658 if (j-- == 0)
1659 break;
1660 cleanup_match(ematch, net);
1661 }
1662 return ret;
1663 }
1664
1665 static int
1666 translate_compat_table(struct net *net,
1667 const char *name,
1668 unsigned int valid_hooks,
1669 struct xt_table_info **pinfo,
1670 void **pentry0,
1671 unsigned int total_size,
1672 unsigned int number,
1673 unsigned int *hook_entries,
1674 unsigned int *underflows)
1675 {
1676 unsigned int i, j;
1677 struct xt_table_info *newinfo, *info;
1678 void *pos, *entry0, *entry1;
1679 struct compat_ip6t_entry *iter0;
1680 struct ip6t_entry *iter1;
1681 unsigned int size;
1682 int ret = 0;
1683
1684 info = *pinfo;
1685 entry0 = *pentry0;
1686 size = total_size;
1687 info->number = number;
1688
1689 /* Init all hooks to impossible value. */
1690 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1691 info->hook_entry[i] = 0xFFFFFFFF;
1692 info->underflow[i] = 0xFFFFFFFF;
1693 }
1694
1695 duprintf("translate_compat_table: size %u\n", info->size);
1696 j = 0;
1697 xt_compat_lock(AF_INET6);
1698 /* Walk through entries, checking offsets. */
1699 xt_entry_foreach(iter0, entry0, total_size) {
1700 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1701 entry0,
1702 entry0 + total_size,
1703 hook_entries,
1704 underflows,
1705 name);
1706 if (ret != 0)
1707 goto out_unlock;
1708 ++j;
1709 }
1710
1711 ret = -EINVAL;
1712 if (j != number) {
1713 duprintf("translate_compat_table: %u not %u entries\n",
1714 j, number);
1715 goto out_unlock;
1716 }
1717
1718 /* Check hooks all assigned */
1719 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1720 /* Only hooks which are valid */
1721 if (!(valid_hooks & (1 << i)))
1722 continue;
1723 if (info->hook_entry[i] == 0xFFFFFFFF) {
1724 duprintf("Invalid hook entry %u %u\n",
1725 i, hook_entries[i]);
1726 goto out_unlock;
1727 }
1728 if (info->underflow[i] == 0xFFFFFFFF) {
1729 duprintf("Invalid underflow %u %u\n",
1730 i, underflows[i]);
1731 goto out_unlock;
1732 }
1733 }
1734
1735 ret = -ENOMEM;
1736 newinfo = xt_alloc_table_info(size);
1737 if (!newinfo)
1738 goto out_unlock;
1739
1740 newinfo->number = number;
1741 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1742 newinfo->hook_entry[i] = info->hook_entry[i];
1743 newinfo->underflow[i] = info->underflow[i];
1744 }
1745 entry1 = newinfo->entries[raw_smp_processor_id()];
1746 pos = entry1;
1747 size = total_size;
1748 xt_entry_foreach(iter0, entry0, total_size) {
1749 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1750 name, newinfo, entry1);
1751 if (ret != 0)
1752 break;
1753 }
1754 xt_compat_flush_offsets(AF_INET6);
1755 xt_compat_unlock(AF_INET6);
1756 if (ret)
1757 goto free_newinfo;
1758
1759 ret = -ELOOP;
1760 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1761 goto free_newinfo;
1762
1763 i = 0;
1764 xt_entry_foreach(iter1, entry1, newinfo->size) {
1765 ret = compat_check_entry(iter1, net, name);
1766 if (ret != 0)
1767 break;
1768 ++i;
1769 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1770 XT_ERROR_TARGET) == 0)
1771 ++newinfo->stacksize;
1772 }
1773 if (ret) {
1774 /*
1775 * The first i matches need cleanup_entry (calls ->destroy)
1776 * because they had called ->check already. The other j-i
1777 * entries need only release.
1778 */
1779 int skip = i;
1780 j -= i;
1781 xt_entry_foreach(iter0, entry0, newinfo->size) {
1782 if (skip-- > 0)
1783 continue;
1784 if (j-- == 0)
1785 break;
1786 compat_release_entry(iter0);
1787 }
1788 xt_entry_foreach(iter1, entry1, newinfo->size) {
1789 if (i-- == 0)
1790 break;
1791 cleanup_entry(iter1, net);
1792 }
1793 xt_free_table_info(newinfo);
1794 return ret;
1795 }
1796
1797 /* And one copy for every other CPU */
1798 for_each_possible_cpu(i)
1799 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1800 memcpy(newinfo->entries[i], entry1, newinfo->size);
1801
1802 *pinfo = newinfo;
1803 *pentry0 = entry1;
1804 xt_free_table_info(info);
1805 return 0;
1806
1807 free_newinfo:
1808 xt_free_table_info(newinfo);
1809 out:
1810 xt_entry_foreach(iter0, entry0, total_size) {
1811 if (j-- == 0)
1812 break;
1813 compat_release_entry(iter0);
1814 }
1815 return ret;
1816 out_unlock:
1817 xt_compat_flush_offsets(AF_INET6);
1818 xt_compat_unlock(AF_INET6);
1819 goto out;
1820 }
1821
1822 static int
1823 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1824 {
1825 int ret;
1826 struct compat_ip6t_replace tmp;
1827 struct xt_table_info *newinfo;
1828 void *loc_cpu_entry;
1829 struct ip6t_entry *iter;
1830
1831 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1832 return -EFAULT;
1833
1834 /* overflow check */
1835 if (tmp.size >= INT_MAX / num_possible_cpus())
1836 return -ENOMEM;
1837 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1838 return -ENOMEM;
1839
1840 newinfo = xt_alloc_table_info(tmp.size);
1841 if (!newinfo)
1842 return -ENOMEM;
1843
1844 /* choose the copy that is on our node/cpu */
1845 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1846 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1847 tmp.size) != 0) {
1848 ret = -EFAULT;
1849 goto free_newinfo;
1850 }
1851
1852 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1853 &newinfo, &loc_cpu_entry, tmp.size,
1854 tmp.num_entries, tmp.hook_entry,
1855 tmp.underflow);
1856 if (ret != 0)
1857 goto free_newinfo;
1858
1859 duprintf("compat_do_replace: Translated table\n");
1860
1861 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1862 tmp.num_counters, compat_ptr(tmp.counters));
1863 if (ret)
1864 goto free_newinfo_untrans;
1865 return 0;
1866
1867 free_newinfo_untrans:
1868 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1869 cleanup_entry(iter, net);
1870 free_newinfo:
1871 xt_free_table_info(newinfo);
1872 return ret;
1873 }
1874
1875 static int
1876 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1877 unsigned int len)
1878 {
1879 int ret;
1880
1881 if (!capable(CAP_NET_ADMIN))
1882 return -EPERM;
1883
1884 switch (cmd) {
1885 case IP6T_SO_SET_REPLACE:
1886 ret = compat_do_replace(sock_net(sk), user, len);
1887 break;
1888
1889 case IP6T_SO_SET_ADD_COUNTERS:
1890 ret = do_add_counters(sock_net(sk), user, len, 1);
1891 break;
1892
1893 default:
1894 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1895 ret = -EINVAL;
1896 }
1897
1898 return ret;
1899 }
1900
1901 struct compat_ip6t_get_entries {
1902 char name[XT_TABLE_MAXNAMELEN];
1903 compat_uint_t size;
1904 struct compat_ip6t_entry entrytable[0];
1905 };
1906
1907 static int
1908 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1909 void __user *userptr)
1910 {
1911 struct xt_counters *counters;
1912 const struct xt_table_info *private = table->private;
1913 void __user *pos;
1914 unsigned int size;
1915 int ret = 0;
1916 const void *loc_cpu_entry;
1917 unsigned int i = 0;
1918 struct ip6t_entry *iter;
1919
1920 counters = alloc_counters(table);
1921 if (IS_ERR(counters))
1922 return PTR_ERR(counters);
1923
1924 /* choose the copy that is on our node/cpu, ...
1925 * This choice is lazy (because current thread is
1926 * allowed to migrate to another cpu)
1927 */
1928 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1929 pos = userptr;
1930 size = total_size;
1931 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1932 ret = compat_copy_entry_to_user(iter, &pos,
1933 &size, counters, i++);
1934 if (ret != 0)
1935 break;
1936 }
1937
1938 vfree(counters);
1939 return ret;
1940 }
1941
1942 static int
1943 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1944 int *len)
1945 {
1946 int ret;
1947 struct compat_ip6t_get_entries get;
1948 struct xt_table *t;
1949
1950 if (*len < sizeof(get)) {
1951 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1952 return -EINVAL;
1953 }
1954
1955 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1956 return -EFAULT;
1957
1958 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1959 duprintf("compat_get_entries: %u != %zu\n",
1960 *len, sizeof(get) + get.size);
1961 return -EINVAL;
1962 }
1963
1964 xt_compat_lock(AF_INET6);
1965 t = xt_find_table_lock(net, AF_INET6, get.name);
1966 if (t && !IS_ERR(t)) {
1967 const struct xt_table_info *private = t->private;
1968 struct xt_table_info info;
1969 duprintf("t->private->number = %u\n", private->number);
1970 ret = compat_table_info(private, &info);
1971 if (!ret && get.size == info.size) {
1972 ret = compat_copy_entries_to_user(private->size,
1973 t, uptr->entrytable);
1974 } else if (!ret) {
1975 duprintf("compat_get_entries: I've got %u not %u!\n",
1976 private->size, get.size);
1977 ret = -EAGAIN;
1978 }
1979 xt_compat_flush_offsets(AF_INET6);
1980 module_put(t->me);
1981 xt_table_unlock(t);
1982 } else
1983 ret = t ? PTR_ERR(t) : -ENOENT;
1984
1985 xt_compat_unlock(AF_INET6);
1986 return ret;
1987 }
1988
1989 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1990
1991 static int
1992 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1993 {
1994 int ret;
1995
1996 if (!capable(CAP_NET_ADMIN))
1997 return -EPERM;
1998
1999 switch (cmd) {
2000 case IP6T_SO_GET_INFO:
2001 ret = get_info(sock_net(sk), user, len, 1);
2002 break;
2003 case IP6T_SO_GET_ENTRIES:
2004 ret = compat_get_entries(sock_net(sk), user, len);
2005 break;
2006 default:
2007 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2008 }
2009 return ret;
2010 }
2011 #endif
2012
2013 static int
2014 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2015 {
2016 int ret;
2017
2018 if (!capable(CAP_NET_ADMIN))
2019 return -EPERM;
2020
2021 switch (cmd) {
2022 case IP6T_SO_SET_REPLACE:
2023 ret = do_replace(sock_net(sk), user, len);
2024 break;
2025
2026 case IP6T_SO_SET_ADD_COUNTERS:
2027 ret = do_add_counters(sock_net(sk), user, len, 0);
2028 break;
2029
2030 default:
2031 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2032 ret = -EINVAL;
2033 }
2034
2035 return ret;
2036 }
2037
2038 static int
2039 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2040 {
2041 int ret;
2042
2043 if (!capable(CAP_NET_ADMIN))
2044 return -EPERM;
2045
2046 switch (cmd) {
2047 case IP6T_SO_GET_INFO:
2048 ret = get_info(sock_net(sk), user, len, 0);
2049 break;
2050
2051 case IP6T_SO_GET_ENTRIES:
2052 ret = get_entries(sock_net(sk), user, len);
2053 break;
2054
2055 case IP6T_SO_GET_REVISION_MATCH:
2056 case IP6T_SO_GET_REVISION_TARGET: {
2057 struct xt_get_revision rev;
2058 int target;
2059
2060 if (*len != sizeof(rev)) {
2061 ret = -EINVAL;
2062 break;
2063 }
2064 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2065 ret = -EFAULT;
2066 break;
2067 }
2068
2069 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2070 target = 1;
2071 else
2072 target = 0;
2073
2074 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2075 rev.revision,
2076 target, &ret),
2077 "ip6t_%s", rev.name);
2078 break;
2079 }
2080
2081 default:
2082 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2083 ret = -EINVAL;
2084 }
2085
2086 return ret;
2087 }
2088
2089 struct xt_table *ip6t_register_table(struct net *net,
2090 const struct xt_table *table,
2091 const struct ip6t_replace *repl)
2092 {
2093 int ret;
2094 struct xt_table_info *newinfo;
2095 struct xt_table_info bootstrap = {0};
2096 void *loc_cpu_entry;
2097 struct xt_table *new_table;
2098
2099 newinfo = xt_alloc_table_info(repl->size);
2100 if (!newinfo) {
2101 ret = -ENOMEM;
2102 goto out;
2103 }
2104
2105 /* choose the copy on our node/cpu, but dont care about preemption */
2106 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2107 memcpy(loc_cpu_entry, repl->entries, repl->size);
2108
2109 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2110 if (ret != 0)
2111 goto out_free;
2112
2113 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2114 if (IS_ERR(new_table)) {
2115 ret = PTR_ERR(new_table);
2116 goto out_free;
2117 }
2118 return new_table;
2119
2120 out_free:
2121 xt_free_table_info(newinfo);
2122 out:
2123 return ERR_PTR(ret);
2124 }
2125
2126 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2127 {
2128 struct xt_table_info *private;
2129 void *loc_cpu_entry;
2130 struct module *table_owner = table->me;
2131 struct ip6t_entry *iter;
2132
2133 private = xt_unregister_table(table);
2134
2135 /* Decrease module usage counts and free resources */
2136 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2137 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2138 cleanup_entry(iter, net);
2139 if (private->number > private->initial_entries)
2140 module_put(table_owner);
2141 xt_free_table_info(private);
2142 }
2143
2144 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2145 static inline bool
2146 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2147 u_int8_t type, u_int8_t code,
2148 bool invert)
2149 {
2150 return (type == test_type && code >= min_code && code <= max_code)
2151 ^ invert;
2152 }
2153
2154 static bool
2155 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2156 {
2157 const struct icmp6hdr *ic;
2158 struct icmp6hdr _icmph;
2159 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2160
2161 /* Must not be a fragment. */
2162 if (par->fragoff != 0)
2163 return false;
2164
2165 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2166 if (ic == NULL) {
2167 /* We've been asked to examine this packet, and we
2168 * can't. Hence, no choice but to drop.
2169 */
2170 duprintf("Dropping evil ICMP tinygram.\n");
2171 par->hotdrop = true;
2172 return false;
2173 }
2174
2175 return icmp6_type_code_match(icmpinfo->type,
2176 icmpinfo->code[0],
2177 icmpinfo->code[1],
2178 ic->icmp6_type, ic->icmp6_code,
2179 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2180 }
2181
2182 /* Called when user tries to insert an entry of this type. */
2183 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2184 {
2185 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2186
2187 /* Must specify no unknown invflags */
2188 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2189 }
2190
2191 /* The built-in targets: standard (NULL) and error. */
2192 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2193 {
2194 .name = XT_STANDARD_TARGET,
2195 .targetsize = sizeof(int),
2196 .family = NFPROTO_IPV6,
2197 #ifdef CONFIG_COMPAT
2198 .compatsize = sizeof(compat_int_t),
2199 .compat_from_user = compat_standard_from_user,
2200 .compat_to_user = compat_standard_to_user,
2201 #endif
2202 },
2203 {
2204 .name = XT_ERROR_TARGET,
2205 .target = ip6t_error,
2206 .targetsize = XT_FUNCTION_MAXNAMELEN,
2207 .family = NFPROTO_IPV6,
2208 },
2209 };
2210
2211 static struct nf_sockopt_ops ip6t_sockopts = {
2212 .pf = PF_INET6,
2213 .set_optmin = IP6T_BASE_CTL,
2214 .set_optmax = IP6T_SO_SET_MAX+1,
2215 .set = do_ip6t_set_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_set = compat_do_ip6t_set_ctl,
2218 #endif
2219 .get_optmin = IP6T_BASE_CTL,
2220 .get_optmax = IP6T_SO_GET_MAX+1,
2221 .get = do_ip6t_get_ctl,
2222 #ifdef CONFIG_COMPAT
2223 .compat_get = compat_do_ip6t_get_ctl,
2224 #endif
2225 .owner = THIS_MODULE,
2226 };
2227
2228 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2229 {
2230 .name = "icmp6",
2231 .match = icmp6_match,
2232 .matchsize = sizeof(struct ip6t_icmp),
2233 .checkentry = icmp6_checkentry,
2234 .proto = IPPROTO_ICMPV6,
2235 .family = NFPROTO_IPV6,
2236 },
2237 };
2238
2239 static int __net_init ip6_tables_net_init(struct net *net)
2240 {
2241 return xt_proto_init(net, NFPROTO_IPV6);
2242 }
2243
2244 static void __net_exit ip6_tables_net_exit(struct net *net)
2245 {
2246 xt_proto_fini(net, NFPROTO_IPV6);
2247 }
2248
2249 static struct pernet_operations ip6_tables_net_ops = {
2250 .init = ip6_tables_net_init,
2251 .exit = ip6_tables_net_exit,
2252 };
2253
2254 static int __init ip6_tables_init(void)
2255 {
2256 int ret;
2257
2258 ret = register_pernet_subsys(&ip6_tables_net_ops);
2259 if (ret < 0)
2260 goto err1;
2261
2262 /* Noone else will be downing sem now, so we won't sleep */
2263 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2264 if (ret < 0)
2265 goto err2;
2266 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2267 if (ret < 0)
2268 goto err4;
2269
2270 /* Register setsockopt */
2271 ret = nf_register_sockopt(&ip6t_sockopts);
2272 if (ret < 0)
2273 goto err5;
2274
2275 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2276 return 0;
2277
2278 err5:
2279 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2280 err4:
2281 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2282 err2:
2283 unregister_pernet_subsys(&ip6_tables_net_ops);
2284 err1:
2285 return ret;
2286 }
2287
2288 static void __exit ip6_tables_fini(void)
2289 {
2290 nf_unregister_sockopt(&ip6t_sockopts);
2291
2292 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2293 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2294 unregister_pernet_subsys(&ip6_tables_net_ops);
2295 }
2296
2297 /*
2298 * find the offset to specified header or the protocol number of last header
2299 * if target < 0. "last header" is transport protocol header, ESP, or
2300 * "No next header".
2301 *
2302 * If target header is found, its offset is set in *offset and return protocol
2303 * number. Otherwise, return -1.
2304 *
2305 * If the first fragment doesn't contain the final protocol header or
2306 * NEXTHDR_NONE it is considered invalid.
2307 *
2308 * Note that non-1st fragment is special case that "the protocol number
2309 * of last header" is "next header" field in Fragment header. In this case,
2310 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2311 * isn't NULL.
2312 *
2313 */
2314 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2315 int target, unsigned short *fragoff)
2316 {
2317 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2318 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2319 unsigned int len = skb->len - start;
2320
2321 if (fragoff)
2322 *fragoff = 0;
2323
2324 while (nexthdr != target) {
2325 struct ipv6_opt_hdr _hdr, *hp;
2326 unsigned int hdrlen;
2327
2328 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2329 if (target < 0)
2330 break;
2331 return -ENOENT;
2332 }
2333
2334 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2335 if (hp == NULL)
2336 return -EBADMSG;
2337 if (nexthdr == NEXTHDR_FRAGMENT) {
2338 unsigned short _frag_off;
2339 __be16 *fp;
2340 fp = skb_header_pointer(skb,
2341 start+offsetof(struct frag_hdr,
2342 frag_off),
2343 sizeof(_frag_off),
2344 &_frag_off);
2345 if (fp == NULL)
2346 return -EBADMSG;
2347
2348 _frag_off = ntohs(*fp) & ~0x7;
2349 if (_frag_off) {
2350 if (target < 0 &&
2351 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2352 hp->nexthdr == NEXTHDR_NONE)) {
2353 if (fragoff)
2354 *fragoff = _frag_off;
2355 return hp->nexthdr;
2356 }
2357 return -ENOENT;
2358 }
2359 hdrlen = 8;
2360 } else if (nexthdr == NEXTHDR_AUTH)
2361 hdrlen = (hp->hdrlen + 2) << 2;
2362 else
2363 hdrlen = ipv6_optlen(hp);
2364
2365 nexthdr = hp->nexthdr;
2366 len -= hdrlen;
2367 start += hdrlen;
2368 }
2369
2370 *offset = start;
2371 return nexthdr;
2372 }
2373
2374 EXPORT_SYMBOL(ip6t_register_table);
2375 EXPORT_SYMBOL(ip6t_unregister_table);
2376 EXPORT_SYMBOL(ip6t_do_table);
2377 EXPORT_SYMBOL(ip6t_ext_hdr);
2378 EXPORT_SYMBOL(ipv6_find_hdr);
2379
2380 module_init(ip6_tables_init);
2381 module_exit(ip6_tables_fini);