]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
net: Allow userns root to control ipv6
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
59
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
65
66 void *ip6t_alloc_initial_table(const struct xt_table *info)
67 {
68 return xt_alloc_initial_table(ip6t, IP6T);
69 }
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
71
72 /*
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
78
79 Hence the start of any table is given by get_table() below. */
80
81 /* Returns whether matches rule or not. */
82 /* Performance critical - called for every packet */
83 static inline bool
84 ip6_packet_match(const struct sk_buff *skb,
85 const char *indev,
86 const char *outdev,
87 const struct ip6t_ip6 *ip6info,
88 unsigned int *protoff,
89 int *fragoff, bool *hotdrop)
90 {
91 unsigned long ret;
92 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
93
94 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
95
96 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
97 &ip6info->src), IP6T_INV_SRCIP) ||
98 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
99 &ip6info->dst), IP6T_INV_DSTIP)) {
100 dprintf("Source or dest mismatch.\n");
101 /*
102 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
103 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
104 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
105 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
106 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
107 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
108 return false;
109 }
110
111 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
112
113 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
114 dprintf("VIA in mismatch (%s vs %s).%s\n",
115 indev, ip6info->iniface,
116 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
117 return false;
118 }
119
120 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
121
122 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
123 dprintf("VIA out mismatch (%s vs %s).%s\n",
124 outdev, ip6info->outiface,
125 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
126 return false;
127 }
128
129 /* ... might want to do something with class and flowlabel here ... */
130
131 /* look for the desired protocol header */
132 if((ip6info->flags & IP6T_F_PROTO)) {
133 int protohdr;
134 unsigned short _frag_off;
135
136 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
137 if (protohdr < 0) {
138 if (_frag_off == 0)
139 *hotdrop = true;
140 return false;
141 }
142 *fragoff = _frag_off;
143
144 dprintf("Packet protocol %hi ?= %s%hi.\n",
145 protohdr,
146 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
147 ip6info->proto);
148
149 if (ip6info->proto == protohdr) {
150 if(ip6info->invflags & IP6T_INV_PROTO) {
151 return false;
152 }
153 return true;
154 }
155
156 /* We need match for the '-p all', too! */
157 if ((ip6info->proto != 0) &&
158 !(ip6info->invflags & IP6T_INV_PROTO))
159 return false;
160 }
161 return true;
162 }
163
164 /* should be ip6 safe */
165 static bool
166 ip6_checkentry(const struct ip6t_ip6 *ipv6)
167 {
168 if (ipv6->flags & ~IP6T_F_MASK) {
169 duprintf("Unknown flag bits set: %08X\n",
170 ipv6->flags & ~IP6T_F_MASK);
171 return false;
172 }
173 if (ipv6->invflags & ~IP6T_INV_MASK) {
174 duprintf("Unknown invflag bits set: %08X\n",
175 ipv6->invflags & ~IP6T_INV_MASK);
176 return false;
177 }
178 return true;
179 }
180
181 static unsigned int
182 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
183 {
184 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
185
186 return NF_DROP;
187 }
188
189 static inline struct ip6t_entry *
190 get_entry(const void *base, unsigned int offset)
191 {
192 return (struct ip6t_entry *)(base + offset);
193 }
194
195 /* All zeroes == unconditional rule. */
196 /* Mildly perf critical (only if packet tracing is on) */
197 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
198 {
199 static const struct ip6t_ip6 uncond;
200
201 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
202 }
203
204 static inline const struct xt_entry_target *
205 ip6t_get_target_c(const struct ip6t_entry *e)
206 {
207 return ip6t_get_target((struct ip6t_entry *)e);
208 }
209
210 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
211 /* This cries for unification! */
212 static const char *const hooknames[] = {
213 [NF_INET_PRE_ROUTING] = "PREROUTING",
214 [NF_INET_LOCAL_IN] = "INPUT",
215 [NF_INET_FORWARD] = "FORWARD",
216 [NF_INET_LOCAL_OUT] = "OUTPUT",
217 [NF_INET_POST_ROUTING] = "POSTROUTING",
218 };
219
220 enum nf_ip_trace_comments {
221 NF_IP6_TRACE_COMMENT_RULE,
222 NF_IP6_TRACE_COMMENT_RETURN,
223 NF_IP6_TRACE_COMMENT_POLICY,
224 };
225
226 static const char *const comments[] = {
227 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
228 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
229 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
230 };
231
232 static struct nf_loginfo trace_loginfo = {
233 .type = NF_LOG_TYPE_LOG,
234 .u = {
235 .log = {
236 .level = 4,
237 .logflags = NF_LOG_MASK,
238 },
239 },
240 };
241
242 /* Mildly perf critical (only if packet tracing is on) */
243 static inline int
244 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
245 const char *hookname, const char **chainname,
246 const char **comment, unsigned int *rulenum)
247 {
248 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
249
250 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
251 /* Head of user chain: ERROR target with chainname */
252 *chainname = t->target.data;
253 (*rulenum) = 0;
254 } else if (s == e) {
255 (*rulenum)++;
256
257 if (s->target_offset == sizeof(struct ip6t_entry) &&
258 strcmp(t->target.u.kernel.target->name,
259 XT_STANDARD_TARGET) == 0 &&
260 t->verdict < 0 &&
261 unconditional(&s->ipv6)) {
262 /* Tail of chains: STANDARD target (return/policy) */
263 *comment = *chainname == hookname
264 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
265 : comments[NF_IP6_TRACE_COMMENT_RETURN];
266 }
267 return 1;
268 } else
269 (*rulenum)++;
270
271 return 0;
272 }
273
274 static void trace_packet(const struct sk_buff *skb,
275 unsigned int hook,
276 const struct net_device *in,
277 const struct net_device *out,
278 const char *tablename,
279 const struct xt_table_info *private,
280 const struct ip6t_entry *e)
281 {
282 const void *table_base;
283 const struct ip6t_entry *root;
284 const char *hookname, *chainname, *comment;
285 const struct ip6t_entry *iter;
286 unsigned int rulenum = 0;
287
288 table_base = private->entries[smp_processor_id()];
289 root = get_entry(table_base, private->hook_entry[hook]);
290
291 hookname = chainname = hooknames[hook];
292 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
293
294 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
295 if (get_chainname_rulenum(iter, e, hookname,
296 &chainname, &comment, &rulenum) != 0)
297 break;
298
299 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
300 "TRACE: %s:%s:%s:%u ",
301 tablename, chainname, comment, rulenum);
302 }
303 #endif
304
305 static inline __pure struct ip6t_entry *
306 ip6t_next_entry(const struct ip6t_entry *entry)
307 {
308 return (void *)entry + entry->next_offset;
309 }
310
311 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
312 unsigned int
313 ip6t_do_table(struct sk_buff *skb,
314 unsigned int hook,
315 const struct net_device *in,
316 const struct net_device *out,
317 struct xt_table *table)
318 {
319 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
320 /* Initializing verdict to NF_DROP keeps gcc happy. */
321 unsigned int verdict = NF_DROP;
322 const char *indev, *outdev;
323 const void *table_base;
324 struct ip6t_entry *e, **jumpstack;
325 unsigned int *stackptr, origptr, cpu;
326 const struct xt_table_info *private;
327 struct xt_action_param acpar;
328 unsigned int addend;
329
330 /* Initialization */
331 indev = in ? in->name : nulldevname;
332 outdev = out ? out->name : nulldevname;
333 /* We handle fragments by dealing with the first fragment as
334 * if it was a normal packet. All other fragments are treated
335 * normally, except that they will NEVER match rules that ask
336 * things we don't know, ie. tcp syn flag or ports). If the
337 * rule is also a fragment-specific rule, non-fragments won't
338 * match it. */
339 acpar.hotdrop = false;
340 acpar.in = in;
341 acpar.out = out;
342 acpar.family = NFPROTO_IPV6;
343 acpar.hooknum = hook;
344
345 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
346
347 local_bh_disable();
348 addend = xt_write_recseq_begin();
349 private = table->private;
350 cpu = smp_processor_id();
351 table_base = private->entries[cpu];
352 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
353 stackptr = per_cpu_ptr(private->stackptr, cpu);
354 origptr = *stackptr;
355
356 e = get_entry(table_base, private->hook_entry[hook]);
357
358 do {
359 const struct xt_entry_target *t;
360 const struct xt_entry_match *ematch;
361
362 IP_NF_ASSERT(e);
363 acpar.thoff = 0;
364 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
365 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
366 no_match:
367 e = ip6t_next_entry(e);
368 continue;
369 }
370
371 xt_ematch_foreach(ematch, e) {
372 acpar.match = ematch->u.kernel.match;
373 acpar.matchinfo = ematch->data;
374 if (!acpar.match->match(skb, &acpar))
375 goto no_match;
376 }
377
378 ADD_COUNTER(e->counters, skb->len, 1);
379
380 t = ip6t_get_target_c(e);
381 IP_NF_ASSERT(t->u.kernel.target);
382
383 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
384 /* The packet is traced: log it */
385 if (unlikely(skb->nf_trace))
386 trace_packet(skb, hook, in, out,
387 table->name, private, e);
388 #endif
389 /* Standard target? */
390 if (!t->u.kernel.target->target) {
391 int v;
392
393 v = ((struct xt_standard_target *)t)->verdict;
394 if (v < 0) {
395 /* Pop from stack? */
396 if (v != XT_RETURN) {
397 verdict = (unsigned int)(-v) - 1;
398 break;
399 }
400 if (*stackptr <= origptr)
401 e = get_entry(table_base,
402 private->underflow[hook]);
403 else
404 e = ip6t_next_entry(jumpstack[--*stackptr]);
405 continue;
406 }
407 if (table_base + v != ip6t_next_entry(e) &&
408 !(e->ipv6.flags & IP6T_F_GOTO)) {
409 if (*stackptr >= private->stacksize) {
410 verdict = NF_DROP;
411 break;
412 }
413 jumpstack[(*stackptr)++] = e;
414 }
415
416 e = get_entry(table_base, v);
417 continue;
418 }
419
420 acpar.target = t->u.kernel.target;
421 acpar.targinfo = t->data;
422
423 verdict = t->u.kernel.target->target(skb, &acpar);
424 if (verdict == XT_CONTINUE)
425 e = ip6t_next_entry(e);
426 else
427 /* Verdict */
428 break;
429 } while (!acpar.hotdrop);
430
431 *stackptr = origptr;
432
433 xt_write_recseq_end(addend);
434 local_bh_enable();
435
436 #ifdef DEBUG_ALLOW_ALL
437 return NF_ACCEPT;
438 #else
439 if (acpar.hotdrop)
440 return NF_DROP;
441 else return verdict;
442 #endif
443 }
444
445 /* Figures out from what hook each rule can be called: returns 0 if
446 there are loops. Puts hook bitmask in comefrom. */
447 static int
448 mark_source_chains(const struct xt_table_info *newinfo,
449 unsigned int valid_hooks, void *entry0)
450 {
451 unsigned int hook;
452
453 /* No recursion; use packet counter to save back ptrs (reset
454 to 0 as we leave), and comefrom to save source hook bitmask */
455 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
456 unsigned int pos = newinfo->hook_entry[hook];
457 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
458
459 if (!(valid_hooks & (1 << hook)))
460 continue;
461
462 /* Set initial back pointer. */
463 e->counters.pcnt = pos;
464
465 for (;;) {
466 const struct xt_standard_target *t
467 = (void *)ip6t_get_target_c(e);
468 int visited = e->comefrom & (1 << hook);
469
470 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
471 pr_err("iptables: loop hook %u pos %u %08X.\n",
472 hook, pos, e->comefrom);
473 return 0;
474 }
475 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
476
477 /* Unconditional return/END. */
478 if ((e->target_offset == sizeof(struct ip6t_entry) &&
479 (strcmp(t->target.u.user.name,
480 XT_STANDARD_TARGET) == 0) &&
481 t->verdict < 0 &&
482 unconditional(&e->ipv6)) || visited) {
483 unsigned int oldpos, size;
484
485 if ((strcmp(t->target.u.user.name,
486 XT_STANDARD_TARGET) == 0) &&
487 t->verdict < -NF_MAX_VERDICT - 1) {
488 duprintf("mark_source_chains: bad "
489 "negative verdict (%i)\n",
490 t->verdict);
491 return 0;
492 }
493
494 /* Return: backtrack through the last
495 big jump. */
496 do {
497 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
498 #ifdef DEBUG_IP_FIREWALL_USER
499 if (e->comefrom
500 & (1 << NF_INET_NUMHOOKS)) {
501 duprintf("Back unset "
502 "on hook %u "
503 "rule %u\n",
504 hook, pos);
505 }
506 #endif
507 oldpos = pos;
508 pos = e->counters.pcnt;
509 e->counters.pcnt = 0;
510
511 /* We're at the start. */
512 if (pos == oldpos)
513 goto next;
514
515 e = (struct ip6t_entry *)
516 (entry0 + pos);
517 } while (oldpos == pos + e->next_offset);
518
519 /* Move along one */
520 size = e->next_offset;
521 e = (struct ip6t_entry *)
522 (entry0 + pos + size);
523 e->counters.pcnt = pos;
524 pos += size;
525 } else {
526 int newpos = t->verdict;
527
528 if (strcmp(t->target.u.user.name,
529 XT_STANDARD_TARGET) == 0 &&
530 newpos >= 0) {
531 if (newpos > newinfo->size -
532 sizeof(struct ip6t_entry)) {
533 duprintf("mark_source_chains: "
534 "bad verdict (%i)\n",
535 newpos);
536 return 0;
537 }
538 /* This a jump; chase it. */
539 duprintf("Jump rule %u -> %u\n",
540 pos, newpos);
541 } else {
542 /* ... this is a fallthru */
543 newpos = pos + e->next_offset;
544 }
545 e = (struct ip6t_entry *)
546 (entry0 + newpos);
547 e->counters.pcnt = pos;
548 pos = newpos;
549 }
550 }
551 next:
552 duprintf("Finished chain %u\n", hook);
553 }
554 return 1;
555 }
556
557 static void cleanup_match(struct xt_entry_match *m, struct net *net)
558 {
559 struct xt_mtdtor_param par;
560
561 par.net = net;
562 par.match = m->u.kernel.match;
563 par.matchinfo = m->data;
564 par.family = NFPROTO_IPV6;
565 if (par.match->destroy != NULL)
566 par.match->destroy(&par);
567 module_put(par.match->me);
568 }
569
570 static int
571 check_entry(const struct ip6t_entry *e, const char *name)
572 {
573 const struct xt_entry_target *t;
574
575 if (!ip6_checkentry(&e->ipv6)) {
576 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
577 return -EINVAL;
578 }
579
580 if (e->target_offset + sizeof(struct xt_entry_target) >
581 e->next_offset)
582 return -EINVAL;
583
584 t = ip6t_get_target_c(e);
585 if (e->target_offset + t->u.target_size > e->next_offset)
586 return -EINVAL;
587
588 return 0;
589 }
590
591 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
592 {
593 const struct ip6t_ip6 *ipv6 = par->entryinfo;
594 int ret;
595
596 par->match = m->u.kernel.match;
597 par->matchinfo = m->data;
598
599 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
600 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
601 if (ret < 0) {
602 duprintf("ip_tables: check failed for `%s'.\n",
603 par.match->name);
604 return ret;
605 }
606 return 0;
607 }
608
609 static int
610 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
611 {
612 struct xt_match *match;
613 int ret;
614
615 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
616 m->u.user.revision);
617 if (IS_ERR(match)) {
618 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
619 return PTR_ERR(match);
620 }
621 m->u.kernel.match = match;
622
623 ret = check_match(m, par);
624 if (ret)
625 goto err;
626
627 return 0;
628 err:
629 module_put(m->u.kernel.match->me);
630 return ret;
631 }
632
633 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
634 {
635 struct xt_entry_target *t = ip6t_get_target(e);
636 struct xt_tgchk_param par = {
637 .net = net,
638 .table = name,
639 .entryinfo = e,
640 .target = t->u.kernel.target,
641 .targinfo = t->data,
642 .hook_mask = e->comefrom,
643 .family = NFPROTO_IPV6,
644 };
645 int ret;
646
647 t = ip6t_get_target(e);
648 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
649 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
650 if (ret < 0) {
651 duprintf("ip_tables: check failed for `%s'.\n",
652 t->u.kernel.target->name);
653 return ret;
654 }
655 return 0;
656 }
657
658 static int
659 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
660 unsigned int size)
661 {
662 struct xt_entry_target *t;
663 struct xt_target *target;
664 int ret;
665 unsigned int j;
666 struct xt_mtchk_param mtpar;
667 struct xt_entry_match *ematch;
668
669 ret = check_entry(e, name);
670 if (ret)
671 return ret;
672
673 j = 0;
674 mtpar.net = net;
675 mtpar.table = name;
676 mtpar.entryinfo = &e->ipv6;
677 mtpar.hook_mask = e->comefrom;
678 mtpar.family = NFPROTO_IPV6;
679 xt_ematch_foreach(ematch, e) {
680 ret = find_check_match(ematch, &mtpar);
681 if (ret != 0)
682 goto cleanup_matches;
683 ++j;
684 }
685
686 t = ip6t_get_target(e);
687 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
688 t->u.user.revision);
689 if (IS_ERR(target)) {
690 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
691 ret = PTR_ERR(target);
692 goto cleanup_matches;
693 }
694 t->u.kernel.target = target;
695
696 ret = check_target(e, net, name);
697 if (ret)
698 goto err;
699 return 0;
700 err:
701 module_put(t->u.kernel.target->me);
702 cleanup_matches:
703 xt_ematch_foreach(ematch, e) {
704 if (j-- == 0)
705 break;
706 cleanup_match(ematch, net);
707 }
708 return ret;
709 }
710
711 static bool check_underflow(const struct ip6t_entry *e)
712 {
713 const struct xt_entry_target *t;
714 unsigned int verdict;
715
716 if (!unconditional(&e->ipv6))
717 return false;
718 t = ip6t_get_target_c(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
720 return false;
721 verdict = ((struct xt_standard_target *)t)->verdict;
722 verdict = -verdict - 1;
723 return verdict == NF_DROP || verdict == NF_ACCEPT;
724 }
725
726 static int
727 check_entry_size_and_hooks(struct ip6t_entry *e,
728 struct xt_table_info *newinfo,
729 const unsigned char *base,
730 const unsigned char *limit,
731 const unsigned int *hook_entries,
732 const unsigned int *underflows,
733 unsigned int valid_hooks)
734 {
735 unsigned int h;
736
737 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
738 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
739 duprintf("Bad offset %p\n", e);
740 return -EINVAL;
741 }
742
743 if (e->next_offset
744 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
745 duprintf("checking: element %p size %u\n",
746 e, e->next_offset);
747 return -EINVAL;
748 }
749
750 /* Check hooks & underflows */
751 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
752 if (!(valid_hooks & (1 << h)))
753 continue;
754 if ((unsigned char *)e - base == hook_entries[h])
755 newinfo->hook_entry[h] = hook_entries[h];
756 if ((unsigned char *)e - base == underflows[h]) {
757 if (!check_underflow(e)) {
758 pr_err("Underflows must be unconditional and "
759 "use the STANDARD target with "
760 "ACCEPT/DROP\n");
761 return -EINVAL;
762 }
763 newinfo->underflow[h] = underflows[h];
764 }
765 }
766
767 /* Clear counters and comefrom */
768 e->counters = ((struct xt_counters) { 0, 0 });
769 e->comefrom = 0;
770 return 0;
771 }
772
773 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
774 {
775 struct xt_tgdtor_param par;
776 struct xt_entry_target *t;
777 struct xt_entry_match *ematch;
778
779 /* Cleanup all matches */
780 xt_ematch_foreach(ematch, e)
781 cleanup_match(ematch, net);
782 t = ip6t_get_target(e);
783
784 par.net = net;
785 par.target = t->u.kernel.target;
786 par.targinfo = t->data;
787 par.family = NFPROTO_IPV6;
788 if (par.target->destroy != NULL)
789 par.target->destroy(&par);
790 module_put(par.target->me);
791 }
792
793 /* Checks and translates the user-supplied table segment (held in
794 newinfo) */
795 static int
796 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
797 const struct ip6t_replace *repl)
798 {
799 struct ip6t_entry *iter;
800 unsigned int i;
801 int ret = 0;
802
803 newinfo->size = repl->size;
804 newinfo->number = repl->num_entries;
805
806 /* Init all hooks to impossible value. */
807 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
808 newinfo->hook_entry[i] = 0xFFFFFFFF;
809 newinfo->underflow[i] = 0xFFFFFFFF;
810 }
811
812 duprintf("translate_table: size %u\n", newinfo->size);
813 i = 0;
814 /* Walk through entries, checking offsets. */
815 xt_entry_foreach(iter, entry0, newinfo->size) {
816 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
817 entry0 + repl->size,
818 repl->hook_entry,
819 repl->underflow,
820 repl->valid_hooks);
821 if (ret != 0)
822 return ret;
823 ++i;
824 if (strcmp(ip6t_get_target(iter)->u.user.name,
825 XT_ERROR_TARGET) == 0)
826 ++newinfo->stacksize;
827 }
828
829 if (i != repl->num_entries) {
830 duprintf("translate_table: %u not %u entries\n",
831 i, repl->num_entries);
832 return -EINVAL;
833 }
834
835 /* Check hooks all assigned */
836 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
837 /* Only hooks which are valid */
838 if (!(repl->valid_hooks & (1 << i)))
839 continue;
840 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
841 duprintf("Invalid hook entry %u %u\n",
842 i, repl->hook_entry[i]);
843 return -EINVAL;
844 }
845 if (newinfo->underflow[i] == 0xFFFFFFFF) {
846 duprintf("Invalid underflow %u %u\n",
847 i, repl->underflow[i]);
848 return -EINVAL;
849 }
850 }
851
852 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
853 return -ELOOP;
854
855 /* Finally, each sanity check must pass */
856 i = 0;
857 xt_entry_foreach(iter, entry0, newinfo->size) {
858 ret = find_check_entry(iter, net, repl->name, repl->size);
859 if (ret != 0)
860 break;
861 ++i;
862 }
863
864 if (ret != 0) {
865 xt_entry_foreach(iter, entry0, newinfo->size) {
866 if (i-- == 0)
867 break;
868 cleanup_entry(iter, net);
869 }
870 return ret;
871 }
872
873 /* And one copy for every other CPU */
874 for_each_possible_cpu(i) {
875 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
876 memcpy(newinfo->entries[i], entry0, newinfo->size);
877 }
878
879 return ret;
880 }
881
882 static void
883 get_counters(const struct xt_table_info *t,
884 struct xt_counters counters[])
885 {
886 struct ip6t_entry *iter;
887 unsigned int cpu;
888 unsigned int i;
889
890 for_each_possible_cpu(cpu) {
891 seqcount_t *s = &per_cpu(xt_recseq, cpu);
892
893 i = 0;
894 xt_entry_foreach(iter, t->entries[cpu], t->size) {
895 u64 bcnt, pcnt;
896 unsigned int start;
897
898 do {
899 start = read_seqcount_begin(s);
900 bcnt = iter->counters.bcnt;
901 pcnt = iter->counters.pcnt;
902 } while (read_seqcount_retry(s, start));
903
904 ADD_COUNTER(counters[i], bcnt, pcnt);
905 ++i;
906 }
907 }
908 }
909
910 static struct xt_counters *alloc_counters(const struct xt_table *table)
911 {
912 unsigned int countersize;
913 struct xt_counters *counters;
914 const struct xt_table_info *private = table->private;
915
916 /* We need atomic snapshot of counters: rest doesn't change
917 (other than comefrom, which userspace doesn't care
918 about). */
919 countersize = sizeof(struct xt_counters) * private->number;
920 counters = vzalloc(countersize);
921
922 if (counters == NULL)
923 return ERR_PTR(-ENOMEM);
924
925 get_counters(private, counters);
926
927 return counters;
928 }
929
930 static int
931 copy_entries_to_user(unsigned int total_size,
932 const struct xt_table *table,
933 void __user *userptr)
934 {
935 unsigned int off, num;
936 const struct ip6t_entry *e;
937 struct xt_counters *counters;
938 const struct xt_table_info *private = table->private;
939 int ret = 0;
940 const void *loc_cpu_entry;
941
942 counters = alloc_counters(table);
943 if (IS_ERR(counters))
944 return PTR_ERR(counters);
945
946 /* choose the copy that is on our node/cpu, ...
947 * This choice is lazy (because current thread is
948 * allowed to migrate to another cpu)
949 */
950 loc_cpu_entry = private->entries[raw_smp_processor_id()];
951 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
952 ret = -EFAULT;
953 goto free_counters;
954 }
955
956 /* FIXME: use iterator macros --RR */
957 /* ... then go back and fix counters and names */
958 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
959 unsigned int i;
960 const struct xt_entry_match *m;
961 const struct xt_entry_target *t;
962
963 e = (struct ip6t_entry *)(loc_cpu_entry + off);
964 if (copy_to_user(userptr + off
965 + offsetof(struct ip6t_entry, counters),
966 &counters[num],
967 sizeof(counters[num])) != 0) {
968 ret = -EFAULT;
969 goto free_counters;
970 }
971
972 for (i = sizeof(struct ip6t_entry);
973 i < e->target_offset;
974 i += m->u.match_size) {
975 m = (void *)e + i;
976
977 if (copy_to_user(userptr + off + i
978 + offsetof(struct xt_entry_match,
979 u.user.name),
980 m->u.kernel.match->name,
981 strlen(m->u.kernel.match->name)+1)
982 != 0) {
983 ret = -EFAULT;
984 goto free_counters;
985 }
986 }
987
988 t = ip6t_get_target_c(e);
989 if (copy_to_user(userptr + off + e->target_offset
990 + offsetof(struct xt_entry_target,
991 u.user.name),
992 t->u.kernel.target->name,
993 strlen(t->u.kernel.target->name)+1) != 0) {
994 ret = -EFAULT;
995 goto free_counters;
996 }
997 }
998
999 free_counters:
1000 vfree(counters);
1001 return ret;
1002 }
1003
1004 #ifdef CONFIG_COMPAT
1005 static void compat_standard_from_user(void *dst, const void *src)
1006 {
1007 int v = *(compat_int_t *)src;
1008
1009 if (v > 0)
1010 v += xt_compat_calc_jump(AF_INET6, v);
1011 memcpy(dst, &v, sizeof(v));
1012 }
1013
1014 static int compat_standard_to_user(void __user *dst, const void *src)
1015 {
1016 compat_int_t cv = *(int *)src;
1017
1018 if (cv > 0)
1019 cv -= xt_compat_calc_jump(AF_INET6, cv);
1020 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1021 }
1022
1023 static int compat_calc_entry(const struct ip6t_entry *e,
1024 const struct xt_table_info *info,
1025 const void *base, struct xt_table_info *newinfo)
1026 {
1027 const struct xt_entry_match *ematch;
1028 const struct xt_entry_target *t;
1029 unsigned int entry_offset;
1030 int off, i, ret;
1031
1032 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1033 entry_offset = (void *)e - base;
1034 xt_ematch_foreach(ematch, e)
1035 off += xt_compat_match_offset(ematch->u.kernel.match);
1036 t = ip6t_get_target_c(e);
1037 off += xt_compat_target_offset(t->u.kernel.target);
1038 newinfo->size -= off;
1039 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1040 if (ret)
1041 return ret;
1042
1043 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1044 if (info->hook_entry[i] &&
1045 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1046 newinfo->hook_entry[i] -= off;
1047 if (info->underflow[i] &&
1048 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1049 newinfo->underflow[i] -= off;
1050 }
1051 return 0;
1052 }
1053
1054 static int compat_table_info(const struct xt_table_info *info,
1055 struct xt_table_info *newinfo)
1056 {
1057 struct ip6t_entry *iter;
1058 void *loc_cpu_entry;
1059 int ret;
1060
1061 if (!newinfo || !info)
1062 return -EINVAL;
1063
1064 /* we dont care about newinfo->entries[] */
1065 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1066 newinfo->initial_entries = 0;
1067 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1068 xt_compat_init_offsets(AF_INET6, info->number);
1069 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1070 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1071 if (ret != 0)
1072 return ret;
1073 }
1074 return 0;
1075 }
1076 #endif
1077
1078 static int get_info(struct net *net, void __user *user,
1079 const int *len, int compat)
1080 {
1081 char name[XT_TABLE_MAXNAMELEN];
1082 struct xt_table *t;
1083 int ret;
1084
1085 if (*len != sizeof(struct ip6t_getinfo)) {
1086 duprintf("length %u != %zu\n", *len,
1087 sizeof(struct ip6t_getinfo));
1088 return -EINVAL;
1089 }
1090
1091 if (copy_from_user(name, user, sizeof(name)) != 0)
1092 return -EFAULT;
1093
1094 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1095 #ifdef CONFIG_COMPAT
1096 if (compat)
1097 xt_compat_lock(AF_INET6);
1098 #endif
1099 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1100 "ip6table_%s", name);
1101 if (t && !IS_ERR(t)) {
1102 struct ip6t_getinfo info;
1103 const struct xt_table_info *private = t->private;
1104 #ifdef CONFIG_COMPAT
1105 struct xt_table_info tmp;
1106
1107 if (compat) {
1108 ret = compat_table_info(private, &tmp);
1109 xt_compat_flush_offsets(AF_INET6);
1110 private = &tmp;
1111 }
1112 #endif
1113 memset(&info, 0, sizeof(info));
1114 info.valid_hooks = t->valid_hooks;
1115 memcpy(info.hook_entry, private->hook_entry,
1116 sizeof(info.hook_entry));
1117 memcpy(info.underflow, private->underflow,
1118 sizeof(info.underflow));
1119 info.num_entries = private->number;
1120 info.size = private->size;
1121 strcpy(info.name, name);
1122
1123 if (copy_to_user(user, &info, *len) != 0)
1124 ret = -EFAULT;
1125 else
1126 ret = 0;
1127
1128 xt_table_unlock(t);
1129 module_put(t->me);
1130 } else
1131 ret = t ? PTR_ERR(t) : -ENOENT;
1132 #ifdef CONFIG_COMPAT
1133 if (compat)
1134 xt_compat_unlock(AF_INET6);
1135 #endif
1136 return ret;
1137 }
1138
1139 static int
1140 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1141 const int *len)
1142 {
1143 int ret;
1144 struct ip6t_get_entries get;
1145 struct xt_table *t;
1146
1147 if (*len < sizeof(get)) {
1148 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1149 return -EINVAL;
1150 }
1151 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1152 return -EFAULT;
1153 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1154 duprintf("get_entries: %u != %zu\n",
1155 *len, sizeof(get) + get.size);
1156 return -EINVAL;
1157 }
1158
1159 t = xt_find_table_lock(net, AF_INET6, get.name);
1160 if (t && !IS_ERR(t)) {
1161 struct xt_table_info *private = t->private;
1162 duprintf("t->private->number = %u\n", private->number);
1163 if (get.size == private->size)
1164 ret = copy_entries_to_user(private->size,
1165 t, uptr->entrytable);
1166 else {
1167 duprintf("get_entries: I've got %u not %u!\n",
1168 private->size, get.size);
1169 ret = -EAGAIN;
1170 }
1171 module_put(t->me);
1172 xt_table_unlock(t);
1173 } else
1174 ret = t ? PTR_ERR(t) : -ENOENT;
1175
1176 return ret;
1177 }
1178
1179 static int
1180 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1181 struct xt_table_info *newinfo, unsigned int num_counters,
1182 void __user *counters_ptr)
1183 {
1184 int ret;
1185 struct xt_table *t;
1186 struct xt_table_info *oldinfo;
1187 struct xt_counters *counters;
1188 const void *loc_cpu_old_entry;
1189 struct ip6t_entry *iter;
1190
1191 ret = 0;
1192 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1193 if (!counters) {
1194 ret = -ENOMEM;
1195 goto out;
1196 }
1197
1198 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1199 "ip6table_%s", name);
1200 if (!t || IS_ERR(t)) {
1201 ret = t ? PTR_ERR(t) : -ENOENT;
1202 goto free_newinfo_counters_untrans;
1203 }
1204
1205 /* You lied! */
1206 if (valid_hooks != t->valid_hooks) {
1207 duprintf("Valid hook crap: %08X vs %08X\n",
1208 valid_hooks, t->valid_hooks);
1209 ret = -EINVAL;
1210 goto put_module;
1211 }
1212
1213 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1214 if (!oldinfo)
1215 goto put_module;
1216
1217 /* Update module usage count based on number of rules */
1218 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1219 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1220 if ((oldinfo->number > oldinfo->initial_entries) ||
1221 (newinfo->number <= oldinfo->initial_entries))
1222 module_put(t->me);
1223 if ((oldinfo->number > oldinfo->initial_entries) &&
1224 (newinfo->number <= oldinfo->initial_entries))
1225 module_put(t->me);
1226
1227 /* Get the old counters, and synchronize with replace */
1228 get_counters(oldinfo, counters);
1229
1230 /* Decrease module usage counts and free resource */
1231 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1232 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1233 cleanup_entry(iter, net);
1234
1235 xt_free_table_info(oldinfo);
1236 if (copy_to_user(counters_ptr, counters,
1237 sizeof(struct xt_counters) * num_counters) != 0)
1238 ret = -EFAULT;
1239 vfree(counters);
1240 xt_table_unlock(t);
1241 return ret;
1242
1243 put_module:
1244 module_put(t->me);
1245 xt_table_unlock(t);
1246 free_newinfo_counters_untrans:
1247 vfree(counters);
1248 out:
1249 return ret;
1250 }
1251
1252 static int
1253 do_replace(struct net *net, const void __user *user, unsigned int len)
1254 {
1255 int ret;
1256 struct ip6t_replace tmp;
1257 struct xt_table_info *newinfo;
1258 void *loc_cpu_entry;
1259 struct ip6t_entry *iter;
1260
1261 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1262 return -EFAULT;
1263
1264 /* overflow check */
1265 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1266 return -ENOMEM;
1267 tmp.name[sizeof(tmp.name)-1] = 0;
1268
1269 newinfo = xt_alloc_table_info(tmp.size);
1270 if (!newinfo)
1271 return -ENOMEM;
1272
1273 /* choose the copy that is on our node/cpu */
1274 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1275 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1276 tmp.size) != 0) {
1277 ret = -EFAULT;
1278 goto free_newinfo;
1279 }
1280
1281 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1282 if (ret != 0)
1283 goto free_newinfo;
1284
1285 duprintf("ip_tables: Translated table\n");
1286
1287 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1288 tmp.num_counters, tmp.counters);
1289 if (ret)
1290 goto free_newinfo_untrans;
1291 return 0;
1292
1293 free_newinfo_untrans:
1294 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1295 cleanup_entry(iter, net);
1296 free_newinfo:
1297 xt_free_table_info(newinfo);
1298 return ret;
1299 }
1300
1301 static int
1302 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1303 int compat)
1304 {
1305 unsigned int i, curcpu;
1306 struct xt_counters_info tmp;
1307 struct xt_counters *paddc;
1308 unsigned int num_counters;
1309 char *name;
1310 int size;
1311 void *ptmp;
1312 struct xt_table *t;
1313 const struct xt_table_info *private;
1314 int ret = 0;
1315 const void *loc_cpu_entry;
1316 struct ip6t_entry *iter;
1317 unsigned int addend;
1318 #ifdef CONFIG_COMPAT
1319 struct compat_xt_counters_info compat_tmp;
1320
1321 if (compat) {
1322 ptmp = &compat_tmp;
1323 size = sizeof(struct compat_xt_counters_info);
1324 } else
1325 #endif
1326 {
1327 ptmp = &tmp;
1328 size = sizeof(struct xt_counters_info);
1329 }
1330
1331 if (copy_from_user(ptmp, user, size) != 0)
1332 return -EFAULT;
1333
1334 #ifdef CONFIG_COMPAT
1335 if (compat) {
1336 num_counters = compat_tmp.num_counters;
1337 name = compat_tmp.name;
1338 } else
1339 #endif
1340 {
1341 num_counters = tmp.num_counters;
1342 name = tmp.name;
1343 }
1344
1345 if (len != size + num_counters * sizeof(struct xt_counters))
1346 return -EINVAL;
1347
1348 paddc = vmalloc(len - size);
1349 if (!paddc)
1350 return -ENOMEM;
1351
1352 if (copy_from_user(paddc, user + size, len - size) != 0) {
1353 ret = -EFAULT;
1354 goto free;
1355 }
1356
1357 t = xt_find_table_lock(net, AF_INET6, name);
1358 if (!t || IS_ERR(t)) {
1359 ret = t ? PTR_ERR(t) : -ENOENT;
1360 goto free;
1361 }
1362
1363
1364 local_bh_disable();
1365 private = t->private;
1366 if (private->number != num_counters) {
1367 ret = -EINVAL;
1368 goto unlock_up_free;
1369 }
1370
1371 i = 0;
1372 /* Choose the copy that is on our node */
1373 curcpu = smp_processor_id();
1374 addend = xt_write_recseq_begin();
1375 loc_cpu_entry = private->entries[curcpu];
1376 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1377 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1378 ++i;
1379 }
1380 xt_write_recseq_end(addend);
1381
1382 unlock_up_free:
1383 local_bh_enable();
1384 xt_table_unlock(t);
1385 module_put(t->me);
1386 free:
1387 vfree(paddc);
1388
1389 return ret;
1390 }
1391
1392 #ifdef CONFIG_COMPAT
1393 struct compat_ip6t_replace {
1394 char name[XT_TABLE_MAXNAMELEN];
1395 u32 valid_hooks;
1396 u32 num_entries;
1397 u32 size;
1398 u32 hook_entry[NF_INET_NUMHOOKS];
1399 u32 underflow[NF_INET_NUMHOOKS];
1400 u32 num_counters;
1401 compat_uptr_t counters; /* struct xt_counters * */
1402 struct compat_ip6t_entry entries[0];
1403 };
1404
1405 static int
1406 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1407 unsigned int *size, struct xt_counters *counters,
1408 unsigned int i)
1409 {
1410 struct xt_entry_target *t;
1411 struct compat_ip6t_entry __user *ce;
1412 u_int16_t target_offset, next_offset;
1413 compat_uint_t origsize;
1414 const struct xt_entry_match *ematch;
1415 int ret = 0;
1416
1417 origsize = *size;
1418 ce = (struct compat_ip6t_entry __user *)*dstptr;
1419 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1420 copy_to_user(&ce->counters, &counters[i],
1421 sizeof(counters[i])) != 0)
1422 return -EFAULT;
1423
1424 *dstptr += sizeof(struct compat_ip6t_entry);
1425 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1426
1427 xt_ematch_foreach(ematch, e) {
1428 ret = xt_compat_match_to_user(ematch, dstptr, size);
1429 if (ret != 0)
1430 return ret;
1431 }
1432 target_offset = e->target_offset - (origsize - *size);
1433 t = ip6t_get_target(e);
1434 ret = xt_compat_target_to_user(t, dstptr, size);
1435 if (ret)
1436 return ret;
1437 next_offset = e->next_offset - (origsize - *size);
1438 if (put_user(target_offset, &ce->target_offset) != 0 ||
1439 put_user(next_offset, &ce->next_offset) != 0)
1440 return -EFAULT;
1441 return 0;
1442 }
1443
1444 static int
1445 compat_find_calc_match(struct xt_entry_match *m,
1446 const char *name,
1447 const struct ip6t_ip6 *ipv6,
1448 unsigned int hookmask,
1449 int *size)
1450 {
1451 struct xt_match *match;
1452
1453 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1454 m->u.user.revision);
1455 if (IS_ERR(match)) {
1456 duprintf("compat_check_calc_match: `%s' not found\n",
1457 m->u.user.name);
1458 return PTR_ERR(match);
1459 }
1460 m->u.kernel.match = match;
1461 *size += xt_compat_match_offset(match);
1462 return 0;
1463 }
1464
1465 static void compat_release_entry(struct compat_ip6t_entry *e)
1466 {
1467 struct xt_entry_target *t;
1468 struct xt_entry_match *ematch;
1469
1470 /* Cleanup all matches */
1471 xt_ematch_foreach(ematch, e)
1472 module_put(ematch->u.kernel.match->me);
1473 t = compat_ip6t_get_target(e);
1474 module_put(t->u.kernel.target->me);
1475 }
1476
1477 static int
1478 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1479 struct xt_table_info *newinfo,
1480 unsigned int *size,
1481 const unsigned char *base,
1482 const unsigned char *limit,
1483 const unsigned int *hook_entries,
1484 const unsigned int *underflows,
1485 const char *name)
1486 {
1487 struct xt_entry_match *ematch;
1488 struct xt_entry_target *t;
1489 struct xt_target *target;
1490 unsigned int entry_offset;
1491 unsigned int j;
1492 int ret, off, h;
1493
1494 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1495 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1496 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1497 duprintf("Bad offset %p, limit = %p\n", e, limit);
1498 return -EINVAL;
1499 }
1500
1501 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1502 sizeof(struct compat_xt_entry_target)) {
1503 duprintf("checking: element %p size %u\n",
1504 e, e->next_offset);
1505 return -EINVAL;
1506 }
1507
1508 /* For purposes of check_entry casting the compat entry is fine */
1509 ret = check_entry((struct ip6t_entry *)e, name);
1510 if (ret)
1511 return ret;
1512
1513 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1514 entry_offset = (void *)e - (void *)base;
1515 j = 0;
1516 xt_ematch_foreach(ematch, e) {
1517 ret = compat_find_calc_match(ematch, name,
1518 &e->ipv6, e->comefrom, &off);
1519 if (ret != 0)
1520 goto release_matches;
1521 ++j;
1522 }
1523
1524 t = compat_ip6t_get_target(e);
1525 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1526 t->u.user.revision);
1527 if (IS_ERR(target)) {
1528 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1529 t->u.user.name);
1530 ret = PTR_ERR(target);
1531 goto release_matches;
1532 }
1533 t->u.kernel.target = target;
1534
1535 off += xt_compat_target_offset(target);
1536 *size += off;
1537 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1538 if (ret)
1539 goto out;
1540
1541 /* Check hooks & underflows */
1542 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1543 if ((unsigned char *)e - base == hook_entries[h])
1544 newinfo->hook_entry[h] = hook_entries[h];
1545 if ((unsigned char *)e - base == underflows[h])
1546 newinfo->underflow[h] = underflows[h];
1547 }
1548
1549 /* Clear counters and comefrom */
1550 memset(&e->counters, 0, sizeof(e->counters));
1551 e->comefrom = 0;
1552 return 0;
1553
1554 out:
1555 module_put(t->u.kernel.target->me);
1556 release_matches:
1557 xt_ematch_foreach(ematch, e) {
1558 if (j-- == 0)
1559 break;
1560 module_put(ematch->u.kernel.match->me);
1561 }
1562 return ret;
1563 }
1564
1565 static int
1566 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1567 unsigned int *size, const char *name,
1568 struct xt_table_info *newinfo, unsigned char *base)
1569 {
1570 struct xt_entry_target *t;
1571 struct ip6t_entry *de;
1572 unsigned int origsize;
1573 int ret, h;
1574 struct xt_entry_match *ematch;
1575
1576 ret = 0;
1577 origsize = *size;
1578 de = (struct ip6t_entry *)*dstptr;
1579 memcpy(de, e, sizeof(struct ip6t_entry));
1580 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1581
1582 *dstptr += sizeof(struct ip6t_entry);
1583 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1584
1585 xt_ematch_foreach(ematch, e) {
1586 ret = xt_compat_match_from_user(ematch, dstptr, size);
1587 if (ret != 0)
1588 return ret;
1589 }
1590 de->target_offset = e->target_offset - (origsize - *size);
1591 t = compat_ip6t_get_target(e);
1592 xt_compat_target_from_user(t, dstptr, size);
1593
1594 de->next_offset = e->next_offset - (origsize - *size);
1595 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1596 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1597 newinfo->hook_entry[h] -= origsize - *size;
1598 if ((unsigned char *)de - base < newinfo->underflow[h])
1599 newinfo->underflow[h] -= origsize - *size;
1600 }
1601 return ret;
1602 }
1603
1604 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1605 const char *name)
1606 {
1607 unsigned int j;
1608 int ret = 0;
1609 struct xt_mtchk_param mtpar;
1610 struct xt_entry_match *ematch;
1611
1612 j = 0;
1613 mtpar.net = net;
1614 mtpar.table = name;
1615 mtpar.entryinfo = &e->ipv6;
1616 mtpar.hook_mask = e->comefrom;
1617 mtpar.family = NFPROTO_IPV6;
1618 xt_ematch_foreach(ematch, e) {
1619 ret = check_match(ematch, &mtpar);
1620 if (ret != 0)
1621 goto cleanup_matches;
1622 ++j;
1623 }
1624
1625 ret = check_target(e, net, name);
1626 if (ret)
1627 goto cleanup_matches;
1628 return 0;
1629
1630 cleanup_matches:
1631 xt_ematch_foreach(ematch, e) {
1632 if (j-- == 0)
1633 break;
1634 cleanup_match(ematch, net);
1635 }
1636 return ret;
1637 }
1638
1639 static int
1640 translate_compat_table(struct net *net,
1641 const char *name,
1642 unsigned int valid_hooks,
1643 struct xt_table_info **pinfo,
1644 void **pentry0,
1645 unsigned int total_size,
1646 unsigned int number,
1647 unsigned int *hook_entries,
1648 unsigned int *underflows)
1649 {
1650 unsigned int i, j;
1651 struct xt_table_info *newinfo, *info;
1652 void *pos, *entry0, *entry1;
1653 struct compat_ip6t_entry *iter0;
1654 struct ip6t_entry *iter1;
1655 unsigned int size;
1656 int ret = 0;
1657
1658 info = *pinfo;
1659 entry0 = *pentry0;
1660 size = total_size;
1661 info->number = number;
1662
1663 /* Init all hooks to impossible value. */
1664 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1665 info->hook_entry[i] = 0xFFFFFFFF;
1666 info->underflow[i] = 0xFFFFFFFF;
1667 }
1668
1669 duprintf("translate_compat_table: size %u\n", info->size);
1670 j = 0;
1671 xt_compat_lock(AF_INET6);
1672 xt_compat_init_offsets(AF_INET6, number);
1673 /* Walk through entries, checking offsets. */
1674 xt_entry_foreach(iter0, entry0, total_size) {
1675 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1676 entry0,
1677 entry0 + total_size,
1678 hook_entries,
1679 underflows,
1680 name);
1681 if (ret != 0)
1682 goto out_unlock;
1683 ++j;
1684 }
1685
1686 ret = -EINVAL;
1687 if (j != number) {
1688 duprintf("translate_compat_table: %u not %u entries\n",
1689 j, number);
1690 goto out_unlock;
1691 }
1692
1693 /* Check hooks all assigned */
1694 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1695 /* Only hooks which are valid */
1696 if (!(valid_hooks & (1 << i)))
1697 continue;
1698 if (info->hook_entry[i] == 0xFFFFFFFF) {
1699 duprintf("Invalid hook entry %u %u\n",
1700 i, hook_entries[i]);
1701 goto out_unlock;
1702 }
1703 if (info->underflow[i] == 0xFFFFFFFF) {
1704 duprintf("Invalid underflow %u %u\n",
1705 i, underflows[i]);
1706 goto out_unlock;
1707 }
1708 }
1709
1710 ret = -ENOMEM;
1711 newinfo = xt_alloc_table_info(size);
1712 if (!newinfo)
1713 goto out_unlock;
1714
1715 newinfo->number = number;
1716 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1717 newinfo->hook_entry[i] = info->hook_entry[i];
1718 newinfo->underflow[i] = info->underflow[i];
1719 }
1720 entry1 = newinfo->entries[raw_smp_processor_id()];
1721 pos = entry1;
1722 size = total_size;
1723 xt_entry_foreach(iter0, entry0, total_size) {
1724 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1725 name, newinfo, entry1);
1726 if (ret != 0)
1727 break;
1728 }
1729 xt_compat_flush_offsets(AF_INET6);
1730 xt_compat_unlock(AF_INET6);
1731 if (ret)
1732 goto free_newinfo;
1733
1734 ret = -ELOOP;
1735 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1736 goto free_newinfo;
1737
1738 i = 0;
1739 xt_entry_foreach(iter1, entry1, newinfo->size) {
1740 ret = compat_check_entry(iter1, net, name);
1741 if (ret != 0)
1742 break;
1743 ++i;
1744 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1745 XT_ERROR_TARGET) == 0)
1746 ++newinfo->stacksize;
1747 }
1748 if (ret) {
1749 /*
1750 * The first i matches need cleanup_entry (calls ->destroy)
1751 * because they had called ->check already. The other j-i
1752 * entries need only release.
1753 */
1754 int skip = i;
1755 j -= i;
1756 xt_entry_foreach(iter0, entry0, newinfo->size) {
1757 if (skip-- > 0)
1758 continue;
1759 if (j-- == 0)
1760 break;
1761 compat_release_entry(iter0);
1762 }
1763 xt_entry_foreach(iter1, entry1, newinfo->size) {
1764 if (i-- == 0)
1765 break;
1766 cleanup_entry(iter1, net);
1767 }
1768 xt_free_table_info(newinfo);
1769 return ret;
1770 }
1771
1772 /* And one copy for every other CPU */
1773 for_each_possible_cpu(i)
1774 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1775 memcpy(newinfo->entries[i], entry1, newinfo->size);
1776
1777 *pinfo = newinfo;
1778 *pentry0 = entry1;
1779 xt_free_table_info(info);
1780 return 0;
1781
1782 free_newinfo:
1783 xt_free_table_info(newinfo);
1784 out:
1785 xt_entry_foreach(iter0, entry0, total_size) {
1786 if (j-- == 0)
1787 break;
1788 compat_release_entry(iter0);
1789 }
1790 return ret;
1791 out_unlock:
1792 xt_compat_flush_offsets(AF_INET6);
1793 xt_compat_unlock(AF_INET6);
1794 goto out;
1795 }
1796
1797 static int
1798 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1799 {
1800 int ret;
1801 struct compat_ip6t_replace tmp;
1802 struct xt_table_info *newinfo;
1803 void *loc_cpu_entry;
1804 struct ip6t_entry *iter;
1805
1806 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1807 return -EFAULT;
1808
1809 /* overflow check */
1810 if (tmp.size >= INT_MAX / num_possible_cpus())
1811 return -ENOMEM;
1812 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1813 return -ENOMEM;
1814 tmp.name[sizeof(tmp.name)-1] = 0;
1815
1816 newinfo = xt_alloc_table_info(tmp.size);
1817 if (!newinfo)
1818 return -ENOMEM;
1819
1820 /* choose the copy that is on our node/cpu */
1821 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1822 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1823 tmp.size) != 0) {
1824 ret = -EFAULT;
1825 goto free_newinfo;
1826 }
1827
1828 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1829 &newinfo, &loc_cpu_entry, tmp.size,
1830 tmp.num_entries, tmp.hook_entry,
1831 tmp.underflow);
1832 if (ret != 0)
1833 goto free_newinfo;
1834
1835 duprintf("compat_do_replace: Translated table\n");
1836
1837 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1838 tmp.num_counters, compat_ptr(tmp.counters));
1839 if (ret)
1840 goto free_newinfo_untrans;
1841 return 0;
1842
1843 free_newinfo_untrans:
1844 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1845 cleanup_entry(iter, net);
1846 free_newinfo:
1847 xt_free_table_info(newinfo);
1848 return ret;
1849 }
1850
1851 static int
1852 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1853 unsigned int len)
1854 {
1855 int ret;
1856
1857 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1858 return -EPERM;
1859
1860 switch (cmd) {
1861 case IP6T_SO_SET_REPLACE:
1862 ret = compat_do_replace(sock_net(sk), user, len);
1863 break;
1864
1865 case IP6T_SO_SET_ADD_COUNTERS:
1866 ret = do_add_counters(sock_net(sk), user, len, 1);
1867 break;
1868
1869 default:
1870 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1871 ret = -EINVAL;
1872 }
1873
1874 return ret;
1875 }
1876
1877 struct compat_ip6t_get_entries {
1878 char name[XT_TABLE_MAXNAMELEN];
1879 compat_uint_t size;
1880 struct compat_ip6t_entry entrytable[0];
1881 };
1882
1883 static int
1884 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1885 void __user *userptr)
1886 {
1887 struct xt_counters *counters;
1888 const struct xt_table_info *private = table->private;
1889 void __user *pos;
1890 unsigned int size;
1891 int ret = 0;
1892 const void *loc_cpu_entry;
1893 unsigned int i = 0;
1894 struct ip6t_entry *iter;
1895
1896 counters = alloc_counters(table);
1897 if (IS_ERR(counters))
1898 return PTR_ERR(counters);
1899
1900 /* choose the copy that is on our node/cpu, ...
1901 * This choice is lazy (because current thread is
1902 * allowed to migrate to another cpu)
1903 */
1904 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1905 pos = userptr;
1906 size = total_size;
1907 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1908 ret = compat_copy_entry_to_user(iter, &pos,
1909 &size, counters, i++);
1910 if (ret != 0)
1911 break;
1912 }
1913
1914 vfree(counters);
1915 return ret;
1916 }
1917
1918 static int
1919 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1920 int *len)
1921 {
1922 int ret;
1923 struct compat_ip6t_get_entries get;
1924 struct xt_table *t;
1925
1926 if (*len < sizeof(get)) {
1927 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1928 return -EINVAL;
1929 }
1930
1931 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1932 return -EFAULT;
1933
1934 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1935 duprintf("compat_get_entries: %u != %zu\n",
1936 *len, sizeof(get) + get.size);
1937 return -EINVAL;
1938 }
1939
1940 xt_compat_lock(AF_INET6);
1941 t = xt_find_table_lock(net, AF_INET6, get.name);
1942 if (t && !IS_ERR(t)) {
1943 const struct xt_table_info *private = t->private;
1944 struct xt_table_info info;
1945 duprintf("t->private->number = %u\n", private->number);
1946 ret = compat_table_info(private, &info);
1947 if (!ret && get.size == info.size) {
1948 ret = compat_copy_entries_to_user(private->size,
1949 t, uptr->entrytable);
1950 } else if (!ret) {
1951 duprintf("compat_get_entries: I've got %u not %u!\n",
1952 private->size, get.size);
1953 ret = -EAGAIN;
1954 }
1955 xt_compat_flush_offsets(AF_INET6);
1956 module_put(t->me);
1957 xt_table_unlock(t);
1958 } else
1959 ret = t ? PTR_ERR(t) : -ENOENT;
1960
1961 xt_compat_unlock(AF_INET6);
1962 return ret;
1963 }
1964
1965 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1966
1967 static int
1968 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1969 {
1970 int ret;
1971
1972 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1973 return -EPERM;
1974
1975 switch (cmd) {
1976 case IP6T_SO_GET_INFO:
1977 ret = get_info(sock_net(sk), user, len, 1);
1978 break;
1979 case IP6T_SO_GET_ENTRIES:
1980 ret = compat_get_entries(sock_net(sk), user, len);
1981 break;
1982 default:
1983 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1984 }
1985 return ret;
1986 }
1987 #endif
1988
1989 static int
1990 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1991 {
1992 int ret;
1993
1994 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1995 return -EPERM;
1996
1997 switch (cmd) {
1998 case IP6T_SO_SET_REPLACE:
1999 ret = do_replace(sock_net(sk), user, len);
2000 break;
2001
2002 case IP6T_SO_SET_ADD_COUNTERS:
2003 ret = do_add_counters(sock_net(sk), user, len, 0);
2004 break;
2005
2006 default:
2007 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2008 ret = -EINVAL;
2009 }
2010
2011 return ret;
2012 }
2013
2014 static int
2015 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2016 {
2017 int ret;
2018
2019 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2020 return -EPERM;
2021
2022 switch (cmd) {
2023 case IP6T_SO_GET_INFO:
2024 ret = get_info(sock_net(sk), user, len, 0);
2025 break;
2026
2027 case IP6T_SO_GET_ENTRIES:
2028 ret = get_entries(sock_net(sk), user, len);
2029 break;
2030
2031 case IP6T_SO_GET_REVISION_MATCH:
2032 case IP6T_SO_GET_REVISION_TARGET: {
2033 struct xt_get_revision rev;
2034 int target;
2035
2036 if (*len != sizeof(rev)) {
2037 ret = -EINVAL;
2038 break;
2039 }
2040 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2041 ret = -EFAULT;
2042 break;
2043 }
2044 rev.name[sizeof(rev.name)-1] = 0;
2045
2046 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2047 target = 1;
2048 else
2049 target = 0;
2050
2051 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2052 rev.revision,
2053 target, &ret),
2054 "ip6t_%s", rev.name);
2055 break;
2056 }
2057
2058 default:
2059 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2060 ret = -EINVAL;
2061 }
2062
2063 return ret;
2064 }
2065
2066 struct xt_table *ip6t_register_table(struct net *net,
2067 const struct xt_table *table,
2068 const struct ip6t_replace *repl)
2069 {
2070 int ret;
2071 struct xt_table_info *newinfo;
2072 struct xt_table_info bootstrap = {0};
2073 void *loc_cpu_entry;
2074 struct xt_table *new_table;
2075
2076 newinfo = xt_alloc_table_info(repl->size);
2077 if (!newinfo) {
2078 ret = -ENOMEM;
2079 goto out;
2080 }
2081
2082 /* choose the copy on our node/cpu, but dont care about preemption */
2083 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2084 memcpy(loc_cpu_entry, repl->entries, repl->size);
2085
2086 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2087 if (ret != 0)
2088 goto out_free;
2089
2090 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2091 if (IS_ERR(new_table)) {
2092 ret = PTR_ERR(new_table);
2093 goto out_free;
2094 }
2095 return new_table;
2096
2097 out_free:
2098 xt_free_table_info(newinfo);
2099 out:
2100 return ERR_PTR(ret);
2101 }
2102
2103 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2104 {
2105 struct xt_table_info *private;
2106 void *loc_cpu_entry;
2107 struct module *table_owner = table->me;
2108 struct ip6t_entry *iter;
2109
2110 private = xt_unregister_table(table);
2111
2112 /* Decrease module usage counts and free resources */
2113 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2114 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2115 cleanup_entry(iter, net);
2116 if (private->number > private->initial_entries)
2117 module_put(table_owner);
2118 xt_free_table_info(private);
2119 }
2120
2121 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2122 static inline bool
2123 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2124 u_int8_t type, u_int8_t code,
2125 bool invert)
2126 {
2127 return (type == test_type && code >= min_code && code <= max_code)
2128 ^ invert;
2129 }
2130
2131 static bool
2132 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2133 {
2134 const struct icmp6hdr *ic;
2135 struct icmp6hdr _icmph;
2136 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2137
2138 /* Must not be a fragment. */
2139 if (par->fragoff != 0)
2140 return false;
2141
2142 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2143 if (ic == NULL) {
2144 /* We've been asked to examine this packet, and we
2145 * can't. Hence, no choice but to drop.
2146 */
2147 duprintf("Dropping evil ICMP tinygram.\n");
2148 par->hotdrop = true;
2149 return false;
2150 }
2151
2152 return icmp6_type_code_match(icmpinfo->type,
2153 icmpinfo->code[0],
2154 icmpinfo->code[1],
2155 ic->icmp6_type, ic->icmp6_code,
2156 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2157 }
2158
2159 /* Called when user tries to insert an entry of this type. */
2160 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2161 {
2162 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2163
2164 /* Must specify no unknown invflags */
2165 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2166 }
2167
2168 /* The built-in targets: standard (NULL) and error. */
2169 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2170 {
2171 .name = XT_STANDARD_TARGET,
2172 .targetsize = sizeof(int),
2173 .family = NFPROTO_IPV6,
2174 #ifdef CONFIG_COMPAT
2175 .compatsize = sizeof(compat_int_t),
2176 .compat_from_user = compat_standard_from_user,
2177 .compat_to_user = compat_standard_to_user,
2178 #endif
2179 },
2180 {
2181 .name = XT_ERROR_TARGET,
2182 .target = ip6t_error,
2183 .targetsize = XT_FUNCTION_MAXNAMELEN,
2184 .family = NFPROTO_IPV6,
2185 },
2186 };
2187
2188 static struct nf_sockopt_ops ip6t_sockopts = {
2189 .pf = PF_INET6,
2190 .set_optmin = IP6T_BASE_CTL,
2191 .set_optmax = IP6T_SO_SET_MAX+1,
2192 .set = do_ip6t_set_ctl,
2193 #ifdef CONFIG_COMPAT
2194 .compat_set = compat_do_ip6t_set_ctl,
2195 #endif
2196 .get_optmin = IP6T_BASE_CTL,
2197 .get_optmax = IP6T_SO_GET_MAX+1,
2198 .get = do_ip6t_get_ctl,
2199 #ifdef CONFIG_COMPAT
2200 .compat_get = compat_do_ip6t_get_ctl,
2201 #endif
2202 .owner = THIS_MODULE,
2203 };
2204
2205 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2206 {
2207 .name = "icmp6",
2208 .match = icmp6_match,
2209 .matchsize = sizeof(struct ip6t_icmp),
2210 .checkentry = icmp6_checkentry,
2211 .proto = IPPROTO_ICMPV6,
2212 .family = NFPROTO_IPV6,
2213 },
2214 };
2215
2216 static int __net_init ip6_tables_net_init(struct net *net)
2217 {
2218 return xt_proto_init(net, NFPROTO_IPV6);
2219 }
2220
2221 static void __net_exit ip6_tables_net_exit(struct net *net)
2222 {
2223 xt_proto_fini(net, NFPROTO_IPV6);
2224 }
2225
2226 static struct pernet_operations ip6_tables_net_ops = {
2227 .init = ip6_tables_net_init,
2228 .exit = ip6_tables_net_exit,
2229 };
2230
2231 static int __init ip6_tables_init(void)
2232 {
2233 int ret;
2234
2235 ret = register_pernet_subsys(&ip6_tables_net_ops);
2236 if (ret < 0)
2237 goto err1;
2238
2239 /* No one else will be downing sem now, so we won't sleep */
2240 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2241 if (ret < 0)
2242 goto err2;
2243 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2244 if (ret < 0)
2245 goto err4;
2246
2247 /* Register setsockopt */
2248 ret = nf_register_sockopt(&ip6t_sockopts);
2249 if (ret < 0)
2250 goto err5;
2251
2252 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2253 return 0;
2254
2255 err5:
2256 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2257 err4:
2258 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2259 err2:
2260 unregister_pernet_subsys(&ip6_tables_net_ops);
2261 err1:
2262 return ret;
2263 }
2264
2265 static void __exit ip6_tables_fini(void)
2266 {
2267 nf_unregister_sockopt(&ip6t_sockopts);
2268
2269 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2270 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2271 unregister_pernet_subsys(&ip6_tables_net_ops);
2272 }
2273
2274 /*
2275 * find the offset to specified header or the protocol number of last header
2276 * if target < 0. "last header" is transport protocol header, ESP, or
2277 * "No next header".
2278 *
2279 * Note that *offset is used as input/output parameter. an if it is not zero,
2280 * then it must be a valid offset to an inner IPv6 header. This can be used
2281 * to explore inner IPv6 header, eg. ICMPv6 error messages.
2282 *
2283 * If target header is found, its offset is set in *offset and return protocol
2284 * number. Otherwise, return -1.
2285 *
2286 * If the first fragment doesn't contain the final protocol header or
2287 * NEXTHDR_NONE it is considered invalid.
2288 *
2289 * Note that non-1st fragment is special case that "the protocol number
2290 * of last header" is "next header" field in Fragment header. In this case,
2291 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2292 * isn't NULL.
2293 *
2294 * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG
2295 * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and
2296 * target < 0, then this function will stop at the AH header.
2297 */
2298 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2299 int target, unsigned short *fragoff, int *flags)
2300 {
2301 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2302 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2303 unsigned int len;
2304
2305 if (fragoff)
2306 *fragoff = 0;
2307
2308 if (*offset) {
2309 struct ipv6hdr _ip6, *ip6;
2310
2311 ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
2312 if (!ip6 || (ip6->version != 6)) {
2313 printk(KERN_ERR "IPv6 header not found\n");
2314 return -EBADMSG;
2315 }
2316 start = *offset + sizeof(struct ipv6hdr);
2317 nexthdr = ip6->nexthdr;
2318 }
2319 len = skb->len - start;
2320
2321 while (nexthdr != target) {
2322 struct ipv6_opt_hdr _hdr, *hp;
2323 unsigned int hdrlen;
2324
2325 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2326 if (target < 0)
2327 break;
2328 return -ENOENT;
2329 }
2330
2331 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2332 if (hp == NULL)
2333 return -EBADMSG;
2334 if (nexthdr == NEXTHDR_FRAGMENT) {
2335 unsigned short _frag_off;
2336 __be16 *fp;
2337
2338 if (flags) /* Indicate that this is a fragment */
2339 *flags |= IP6T_FH_F_FRAG;
2340 fp = skb_header_pointer(skb,
2341 start+offsetof(struct frag_hdr,
2342 frag_off),
2343 sizeof(_frag_off),
2344 &_frag_off);
2345 if (fp == NULL)
2346 return -EBADMSG;
2347
2348 _frag_off = ntohs(*fp) & ~0x7;
2349 if (_frag_off) {
2350 if (target < 0 &&
2351 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2352 hp->nexthdr == NEXTHDR_NONE)) {
2353 if (fragoff)
2354 *fragoff = _frag_off;
2355 return hp->nexthdr;
2356 }
2357 return -ENOENT;
2358 }
2359 hdrlen = 8;
2360 } else if (nexthdr == NEXTHDR_AUTH) {
2361 if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0))
2362 break;
2363 hdrlen = (hp->hdrlen + 2) << 2;
2364 } else
2365 hdrlen = ipv6_optlen(hp);
2366
2367 nexthdr = hp->nexthdr;
2368 len -= hdrlen;
2369 start += hdrlen;
2370 }
2371
2372 *offset = start;
2373 return nexthdr;
2374 }
2375
2376 EXPORT_SYMBOL(ip6t_register_table);
2377 EXPORT_SYMBOL(ip6t_unregister_table);
2378 EXPORT_SYMBOL(ip6t_do_table);
2379 EXPORT_SYMBOL(ipv6_find_hdr);
2380
2381 module_init(ip6_tables_init);
2382 module_exit(ip6_tables_fini);