]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
netfilter: x_tables: fix unconditional helper
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
45
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
48 #else
49 #define dprintf(format, args...)
50 #endif
51
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
54 #else
55 #define duprintf(format, args...)
56 #endif
57
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
71 {
72 return xt_alloc_initial_table(ip6t, IP6T);
73 }
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
75
76 /*
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
82
83 Hence the start of any table is given by get_table() below. */
84
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
88 ip6_packet_match(const struct sk_buff *skb,
89 const char *indev,
90 const char *outdev,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
94 {
95 unsigned long ret;
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
97
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
99
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
105 /*
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 return false;
113 }
114
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
116
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
121 return false;
122 }
123
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
130 return false;
131 }
132
133 /* ... might want to do something with class and flowlabel here ... */
134
135 /* look for the desired protocol header */
136 if (ip6info->flags & IP6T_F_PROTO) {
137 int protohdr;
138 unsigned short _frag_off;
139
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
141 if (protohdr < 0) {
142 if (_frag_off == 0)
143 *hotdrop = true;
144 return false;
145 }
146 *fragoff = _frag_off;
147
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
149 protohdr,
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
151 ip6info->proto);
152
153 if (ip6info->proto == protohdr) {
154 if (ip6info->invflags & IP6T_INV_PROTO)
155 return false;
156
157 return true;
158 }
159
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
163 return false;
164 }
165 return true;
166 }
167
168 /* should be ip6 safe */
169 static bool
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
171 {
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
175 return false;
176 }
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
180 return false;
181 }
182 return true;
183 }
184
185 static unsigned int
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
187 {
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189
190 return NF_DROP;
191 }
192
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
195 {
196 return (struct ip6t_entry *)(base + offset);
197 }
198
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_entry *e)
202 {
203 static const struct ip6t_ip6 uncond;
204
205 return e->target_offset == sizeof(struct ip6t_entry) &&
206 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
207 }
208
209 static inline const struct xt_entry_target *
210 ip6t_get_target_c(const struct ip6t_entry *e)
211 {
212 return ip6t_get_target((struct ip6t_entry *)e);
213 }
214
215 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
216 /* This cries for unification! */
217 static const char *const hooknames[] = {
218 [NF_INET_PRE_ROUTING] = "PREROUTING",
219 [NF_INET_LOCAL_IN] = "INPUT",
220 [NF_INET_FORWARD] = "FORWARD",
221 [NF_INET_LOCAL_OUT] = "OUTPUT",
222 [NF_INET_POST_ROUTING] = "POSTROUTING",
223 };
224
225 enum nf_ip_trace_comments {
226 NF_IP6_TRACE_COMMENT_RULE,
227 NF_IP6_TRACE_COMMENT_RETURN,
228 NF_IP6_TRACE_COMMENT_POLICY,
229 };
230
231 static const char *const comments[] = {
232 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
233 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
234 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
235 };
236
237 static struct nf_loginfo trace_loginfo = {
238 .type = NF_LOG_TYPE_LOG,
239 .u = {
240 .log = {
241 .level = LOGLEVEL_WARNING,
242 .logflags = NF_LOG_MASK,
243 },
244 },
245 };
246
247 /* Mildly perf critical (only if packet tracing is on) */
248 static inline int
249 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
250 const char *hookname, const char **chainname,
251 const char **comment, unsigned int *rulenum)
252 {
253 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
254
255 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
256 /* Head of user chain: ERROR target with chainname */
257 *chainname = t->target.data;
258 (*rulenum) = 0;
259 } else if (s == e) {
260 (*rulenum)++;
261
262 if (unconditional(s) &&
263 strcmp(t->target.u.kernel.target->name,
264 XT_STANDARD_TARGET) == 0 &&
265 t->verdict < 0) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
270 }
271 return 1;
272 } else
273 (*rulenum)++;
274
275 return 0;
276 }
277
278 static void trace_packet(struct net *net,
279 const struct sk_buff *skb,
280 unsigned int hook,
281 const struct net_device *in,
282 const struct net_device *out,
283 const char *tablename,
284 const struct xt_table_info *private,
285 const struct ip6t_entry *e)
286 {
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
291
292 root = get_entry(private->entries, private->hook_entry[hook]);
293
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
300 break;
301
302 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
305 }
306 #endif
307
308 static inline struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
310 {
311 return (void *)entry + entry->next_offset;
312 }
313
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 unsigned int
316 ip6t_do_table(struct sk_buff *skb,
317 const struct nf_hook_state *state,
318 struct xt_table *table)
319 {
320 unsigned int hook = state->hook;
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int stackidx, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 stackidx = 0;
334 indev = state->in ? state->in->name : nulldevname;
335 outdev = state->out ? state->out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 acpar.hotdrop = false;
343 acpar.net = state->net;
344 acpar.in = state->in;
345 acpar.out = state->out;
346 acpar.family = NFPROTO_IPV6;
347 acpar.hooknum = hook;
348
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350
351 local_bh_disable();
352 addend = xt_write_recseq_begin();
353 private = table->private;
354 /*
355 * Ensure we load private-> members after we've fetched the base
356 * pointer.
357 */
358 smp_read_barrier_depends();
359 cpu = smp_processor_id();
360 table_base = private->entries;
361 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
362
363 /* Switch to alternate jumpstack if we're being invoked via TEE.
364 * TEE issues XT_CONTINUE verdict on original skb so we must not
365 * clobber the jumpstack.
366 *
367 * For recursion via REJECT or SYNPROXY the stack will be clobbered
368 * but it is no problem since absolute verdict is issued by these.
369 */
370 if (static_key_false(&xt_tee_enabled))
371 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
372
373 e = get_entry(table_base, private->hook_entry[hook]);
374
375 do {
376 const struct xt_entry_target *t;
377 const struct xt_entry_match *ematch;
378 struct xt_counters *counter;
379
380 IP_NF_ASSERT(e);
381 acpar.thoff = 0;
382 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
383 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
384 no_match:
385 e = ip6t_next_entry(e);
386 continue;
387 }
388
389 xt_ematch_foreach(ematch, e) {
390 acpar.match = ematch->u.kernel.match;
391 acpar.matchinfo = ematch->data;
392 if (!acpar.match->match(skb, &acpar))
393 goto no_match;
394 }
395
396 counter = xt_get_this_cpu_counter(&e->counters);
397 ADD_COUNTER(*counter, skb->len, 1);
398
399 t = ip6t_get_target_c(e);
400 IP_NF_ASSERT(t->u.kernel.target);
401
402 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(state->net, skb, hook, state->in,
406 state->out, table->name, private, e);
407 #endif
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
410 int v;
411
412 v = ((struct xt_standard_target *)t)->verdict;
413 if (v < 0) {
414 /* Pop from stack? */
415 if (v != XT_RETURN) {
416 verdict = (unsigned int)(-v) - 1;
417 break;
418 }
419 if (stackidx == 0)
420 e = get_entry(table_base,
421 private->underflow[hook]);
422 else
423 e = ip6t_next_entry(jumpstack[--stackidx]);
424 continue;
425 }
426 if (table_base + v != ip6t_next_entry(e) &&
427 !(e->ipv6.flags & IP6T_F_GOTO)) {
428 jumpstack[stackidx++] = e;
429 }
430
431 e = get_entry(table_base, v);
432 continue;
433 }
434
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
437
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == XT_CONTINUE)
440 e = ip6t_next_entry(e);
441 else
442 /* Verdict */
443 break;
444 } while (!acpar.hotdrop);
445
446 xt_write_recseq_end(addend);
447 local_bh_enable();
448
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (acpar.hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
456 }
457
458 static bool next_offset_ok(const struct xt_table_info *t, unsigned int newpos)
459 {
460 if (newpos > t->size - sizeof(struct ip6t_entry))
461 return false;
462
463 if (newpos % __alignof__(struct ip6t_entry) != 0)
464 return false;
465
466 return true;
467 }
468
469 /* Figures out from what hook each rule can be called: returns 0 if
470 there are loops. Puts hook bitmask in comefrom. */
471 static int
472 mark_source_chains(const struct xt_table_info *newinfo,
473 unsigned int valid_hooks, void *entry0)
474 {
475 unsigned int hook;
476
477 /* No recursion; use packet counter to save back ptrs (reset
478 to 0 as we leave), and comefrom to save source hook bitmask */
479 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
480 unsigned int pos = newinfo->hook_entry[hook];
481 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
482
483 if (!(valid_hooks & (1 << hook)))
484 continue;
485
486 /* Set initial back pointer. */
487 e->counters.pcnt = pos;
488
489 for (;;) {
490 const struct xt_standard_target *t
491 = (void *)ip6t_get_target_c(e);
492 int visited = e->comefrom & (1 << hook);
493
494 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
495 pr_err("iptables: loop hook %u pos %u %08X.\n",
496 hook, pos, e->comefrom);
497 return 0;
498 }
499 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
500
501 /* Unconditional return/END. */
502 if ((unconditional(e) &&
503 (strcmp(t->target.u.user.name,
504 XT_STANDARD_TARGET) == 0) &&
505 t->verdict < 0) || visited) {
506 unsigned int oldpos, size;
507
508 if ((strcmp(t->target.u.user.name,
509 XT_STANDARD_TARGET) == 0) &&
510 t->verdict < -NF_MAX_VERDICT - 1) {
511 duprintf("mark_source_chains: bad "
512 "negative verdict (%i)\n",
513 t->verdict);
514 return 0;
515 }
516
517 /* Return: backtrack through the last
518 big jump. */
519 do {
520 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
521 #ifdef DEBUG_IP_FIREWALL_USER
522 if (e->comefrom
523 & (1 << NF_INET_NUMHOOKS)) {
524 duprintf("Back unset "
525 "on hook %u "
526 "rule %u\n",
527 hook, pos);
528 }
529 #endif
530 oldpos = pos;
531 pos = e->counters.pcnt;
532 e->counters.pcnt = 0;
533
534 /* We're at the start. */
535 if (pos == oldpos)
536 goto next;
537
538 e = (struct ip6t_entry *)
539 (entry0 + pos);
540 } while (oldpos == pos + e->next_offset);
541
542 /* Move along one */
543 size = e->next_offset;
544 if (!next_offset_ok(newinfo, pos + size))
545 return 0;
546 e = (struct ip6t_entry *)
547 (entry0 + pos + size);
548 e->counters.pcnt = pos;
549 pos += size;
550 } else {
551 int newpos = t->verdict;
552
553 if (strcmp(t->target.u.user.name,
554 XT_STANDARD_TARGET) == 0 &&
555 newpos >= 0) {
556 /* This a jump; chase it. */
557 duprintf("Jump rule %u -> %u\n",
558 pos, newpos);
559 } else {
560 /* ... this is a fallthru */
561 newpos = pos + e->next_offset;
562 }
563
564 if (!next_offset_ok(newinfo, newpos))
565 return 0;
566
567 e = (struct ip6t_entry *)
568 (entry0 + newpos);
569 e->counters.pcnt = pos;
570 pos = newpos;
571 }
572 }
573 next:
574 duprintf("Finished chain %u\n", hook);
575 }
576 return 1;
577 }
578
579 static void cleanup_match(struct xt_entry_match *m, struct net *net)
580 {
581 struct xt_mtdtor_param par;
582
583 par.net = net;
584 par.match = m->u.kernel.match;
585 par.matchinfo = m->data;
586 par.family = NFPROTO_IPV6;
587 if (par.match->destroy != NULL)
588 par.match->destroy(&par);
589 module_put(par.match->me);
590 }
591
592 static int
593 check_entry(const struct ip6t_entry *e)
594 {
595 const struct xt_entry_target *t;
596
597 if (!ip6_checkentry(&e->ipv6))
598 return -EINVAL;
599
600 if (e->target_offset + sizeof(struct xt_entry_target) >
601 e->next_offset)
602 return -EINVAL;
603
604 t = ip6t_get_target_c(e);
605 if (e->target_offset + t->u.target_size > e->next_offset)
606 return -EINVAL;
607
608 return 0;
609 }
610
611 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
612 {
613 const struct ip6t_ip6 *ipv6 = par->entryinfo;
614 int ret;
615
616 par->match = m->u.kernel.match;
617 par->matchinfo = m->data;
618
619 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
620 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
621 if (ret < 0) {
622 duprintf("ip_tables: check failed for `%s'.\n",
623 par.match->name);
624 return ret;
625 }
626 return 0;
627 }
628
629 static int
630 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
631 {
632 struct xt_match *match;
633 int ret;
634
635 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
636 m->u.user.revision);
637 if (IS_ERR(match)) {
638 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
639 return PTR_ERR(match);
640 }
641 m->u.kernel.match = match;
642
643 ret = check_match(m, par);
644 if (ret)
645 goto err;
646
647 return 0;
648 err:
649 module_put(m->u.kernel.match->me);
650 return ret;
651 }
652
653 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
654 {
655 struct xt_entry_target *t = ip6t_get_target(e);
656 struct xt_tgchk_param par = {
657 .net = net,
658 .table = name,
659 .entryinfo = e,
660 .target = t->u.kernel.target,
661 .targinfo = t->data,
662 .hook_mask = e->comefrom,
663 .family = NFPROTO_IPV6,
664 };
665 int ret;
666
667 t = ip6t_get_target(e);
668 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
669 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
670 if (ret < 0) {
671 duprintf("ip_tables: check failed for `%s'.\n",
672 t->u.kernel.target->name);
673 return ret;
674 }
675 return 0;
676 }
677
678 static int
679 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
680 unsigned int size)
681 {
682 struct xt_entry_target *t;
683 struct xt_target *target;
684 int ret;
685 unsigned int j;
686 struct xt_mtchk_param mtpar;
687 struct xt_entry_match *ematch;
688
689 e->counters.pcnt = xt_percpu_counter_alloc();
690 if (IS_ERR_VALUE(e->counters.pcnt))
691 return -ENOMEM;
692
693 j = 0;
694 mtpar.net = net;
695 mtpar.table = name;
696 mtpar.entryinfo = &e->ipv6;
697 mtpar.hook_mask = e->comefrom;
698 mtpar.family = NFPROTO_IPV6;
699 xt_ematch_foreach(ematch, e) {
700 ret = find_check_match(ematch, &mtpar);
701 if (ret != 0)
702 goto cleanup_matches;
703 ++j;
704 }
705
706 t = ip6t_get_target(e);
707 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
708 t->u.user.revision);
709 if (IS_ERR(target)) {
710 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
711 ret = PTR_ERR(target);
712 goto cleanup_matches;
713 }
714 t->u.kernel.target = target;
715
716 ret = check_target(e, net, name);
717 if (ret)
718 goto err;
719 return 0;
720 err:
721 module_put(t->u.kernel.target->me);
722 cleanup_matches:
723 xt_ematch_foreach(ematch, e) {
724 if (j-- == 0)
725 break;
726 cleanup_match(ematch, net);
727 }
728
729 xt_percpu_counter_free(e->counters.pcnt);
730
731 return ret;
732 }
733
734 static bool check_underflow(const struct ip6t_entry *e)
735 {
736 const struct xt_entry_target *t;
737 unsigned int verdict;
738
739 if (!unconditional(e))
740 return false;
741 t = ip6t_get_target_c(e);
742 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
743 return false;
744 verdict = ((struct xt_standard_target *)t)->verdict;
745 verdict = -verdict - 1;
746 return verdict == NF_DROP || verdict == NF_ACCEPT;
747 }
748
749 static int
750 check_entry_size_and_hooks(struct ip6t_entry *e,
751 struct xt_table_info *newinfo,
752 const unsigned char *base,
753 const unsigned char *limit,
754 const unsigned int *hook_entries,
755 const unsigned int *underflows,
756 unsigned int valid_hooks)
757 {
758 unsigned int h;
759 int err;
760
761 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
762 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
763 (unsigned char *)e + e->next_offset > limit) {
764 duprintf("Bad offset %p\n", e);
765 return -EINVAL;
766 }
767
768 if (e->next_offset
769 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
770 duprintf("checking: element %p size %u\n",
771 e, e->next_offset);
772 return -EINVAL;
773 }
774
775 err = check_entry(e);
776 if (err)
777 return err;
778
779 /* Check hooks & underflows */
780 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
781 if (!(valid_hooks & (1 << h)))
782 continue;
783 if ((unsigned char *)e - base == hook_entries[h])
784 newinfo->hook_entry[h] = hook_entries[h];
785 if ((unsigned char *)e - base == underflows[h]) {
786 if (!check_underflow(e)) {
787 pr_debug("Underflows must be unconditional and "
788 "use the STANDARD target with "
789 "ACCEPT/DROP\n");
790 return -EINVAL;
791 }
792 newinfo->underflow[h] = underflows[h];
793 }
794 }
795
796 /* Clear counters and comefrom */
797 e->counters = ((struct xt_counters) { 0, 0 });
798 e->comefrom = 0;
799 return 0;
800 }
801
802 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
803 {
804 struct xt_tgdtor_param par;
805 struct xt_entry_target *t;
806 struct xt_entry_match *ematch;
807
808 /* Cleanup all matches */
809 xt_ematch_foreach(ematch, e)
810 cleanup_match(ematch, net);
811 t = ip6t_get_target(e);
812
813 par.net = net;
814 par.target = t->u.kernel.target;
815 par.targinfo = t->data;
816 par.family = NFPROTO_IPV6;
817 if (par.target->destroy != NULL)
818 par.target->destroy(&par);
819 module_put(par.target->me);
820
821 xt_percpu_counter_free(e->counters.pcnt);
822 }
823
824 /* Checks and translates the user-supplied table segment (held in
825 newinfo) */
826 static int
827 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
828 const struct ip6t_replace *repl)
829 {
830 struct ip6t_entry *iter;
831 unsigned int i;
832 int ret = 0;
833
834 newinfo->size = repl->size;
835 newinfo->number = repl->num_entries;
836
837 /* Init all hooks to impossible value. */
838 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
839 newinfo->hook_entry[i] = 0xFFFFFFFF;
840 newinfo->underflow[i] = 0xFFFFFFFF;
841 }
842
843 duprintf("translate_table: size %u\n", newinfo->size);
844 i = 0;
845 /* Walk through entries, checking offsets. */
846 xt_entry_foreach(iter, entry0, newinfo->size) {
847 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
848 entry0 + repl->size,
849 repl->hook_entry,
850 repl->underflow,
851 repl->valid_hooks);
852 if (ret != 0)
853 return ret;
854 ++i;
855 if (strcmp(ip6t_get_target(iter)->u.user.name,
856 XT_ERROR_TARGET) == 0)
857 ++newinfo->stacksize;
858 }
859
860 if (i != repl->num_entries) {
861 duprintf("translate_table: %u not %u entries\n",
862 i, repl->num_entries);
863 return -EINVAL;
864 }
865
866 /* Check hooks all assigned */
867 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
868 /* Only hooks which are valid */
869 if (!(repl->valid_hooks & (1 << i)))
870 continue;
871 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
872 duprintf("Invalid hook entry %u %u\n",
873 i, repl->hook_entry[i]);
874 return -EINVAL;
875 }
876 if (newinfo->underflow[i] == 0xFFFFFFFF) {
877 duprintf("Invalid underflow %u %u\n",
878 i, repl->underflow[i]);
879 return -EINVAL;
880 }
881 }
882
883 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
884 return -ELOOP;
885
886 /* Finally, each sanity check must pass */
887 i = 0;
888 xt_entry_foreach(iter, entry0, newinfo->size) {
889 ret = find_check_entry(iter, net, repl->name, repl->size);
890 if (ret != 0)
891 break;
892 ++i;
893 }
894
895 if (ret != 0) {
896 xt_entry_foreach(iter, entry0, newinfo->size) {
897 if (i-- == 0)
898 break;
899 cleanup_entry(iter, net);
900 }
901 return ret;
902 }
903
904 return ret;
905 }
906
907 static void
908 get_counters(const struct xt_table_info *t,
909 struct xt_counters counters[])
910 {
911 struct ip6t_entry *iter;
912 unsigned int cpu;
913 unsigned int i;
914
915 for_each_possible_cpu(cpu) {
916 seqcount_t *s = &per_cpu(xt_recseq, cpu);
917
918 i = 0;
919 xt_entry_foreach(iter, t->entries, t->size) {
920 struct xt_counters *tmp;
921 u64 bcnt, pcnt;
922 unsigned int start;
923
924 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
925 do {
926 start = read_seqcount_begin(s);
927 bcnt = tmp->bcnt;
928 pcnt = tmp->pcnt;
929 } while (read_seqcount_retry(s, start));
930
931 ADD_COUNTER(counters[i], bcnt, pcnt);
932 ++i;
933 }
934 }
935 }
936
937 static struct xt_counters *alloc_counters(const struct xt_table *table)
938 {
939 unsigned int countersize;
940 struct xt_counters *counters;
941 const struct xt_table_info *private = table->private;
942
943 /* We need atomic snapshot of counters: rest doesn't change
944 (other than comefrom, which userspace doesn't care
945 about). */
946 countersize = sizeof(struct xt_counters) * private->number;
947 counters = vzalloc(countersize);
948
949 if (counters == NULL)
950 return ERR_PTR(-ENOMEM);
951
952 get_counters(private, counters);
953
954 return counters;
955 }
956
957 static int
958 copy_entries_to_user(unsigned int total_size,
959 const struct xt_table *table,
960 void __user *userptr)
961 {
962 unsigned int off, num;
963 const struct ip6t_entry *e;
964 struct xt_counters *counters;
965 const struct xt_table_info *private = table->private;
966 int ret = 0;
967 const void *loc_cpu_entry;
968
969 counters = alloc_counters(table);
970 if (IS_ERR(counters))
971 return PTR_ERR(counters);
972
973 loc_cpu_entry = private->entries;
974 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
975 ret = -EFAULT;
976 goto free_counters;
977 }
978
979 /* FIXME: use iterator macros --RR */
980 /* ... then go back and fix counters and names */
981 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
982 unsigned int i;
983 const struct xt_entry_match *m;
984 const struct xt_entry_target *t;
985
986 e = (struct ip6t_entry *)(loc_cpu_entry + off);
987 if (copy_to_user(userptr + off
988 + offsetof(struct ip6t_entry, counters),
989 &counters[num],
990 sizeof(counters[num])) != 0) {
991 ret = -EFAULT;
992 goto free_counters;
993 }
994
995 for (i = sizeof(struct ip6t_entry);
996 i < e->target_offset;
997 i += m->u.match_size) {
998 m = (void *)e + i;
999
1000 if (copy_to_user(userptr + off + i
1001 + offsetof(struct xt_entry_match,
1002 u.user.name),
1003 m->u.kernel.match->name,
1004 strlen(m->u.kernel.match->name)+1)
1005 != 0) {
1006 ret = -EFAULT;
1007 goto free_counters;
1008 }
1009 }
1010
1011 t = ip6t_get_target_c(e);
1012 if (copy_to_user(userptr + off + e->target_offset
1013 + offsetof(struct xt_entry_target,
1014 u.user.name),
1015 t->u.kernel.target->name,
1016 strlen(t->u.kernel.target->name)+1) != 0) {
1017 ret = -EFAULT;
1018 goto free_counters;
1019 }
1020 }
1021
1022 free_counters:
1023 vfree(counters);
1024 return ret;
1025 }
1026
1027 #ifdef CONFIG_COMPAT
1028 static void compat_standard_from_user(void *dst, const void *src)
1029 {
1030 int v = *(compat_int_t *)src;
1031
1032 if (v > 0)
1033 v += xt_compat_calc_jump(AF_INET6, v);
1034 memcpy(dst, &v, sizeof(v));
1035 }
1036
1037 static int compat_standard_to_user(void __user *dst, const void *src)
1038 {
1039 compat_int_t cv = *(int *)src;
1040
1041 if (cv > 0)
1042 cv -= xt_compat_calc_jump(AF_INET6, cv);
1043 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1044 }
1045
1046 static int compat_calc_entry(const struct ip6t_entry *e,
1047 const struct xt_table_info *info,
1048 const void *base, struct xt_table_info *newinfo)
1049 {
1050 const struct xt_entry_match *ematch;
1051 const struct xt_entry_target *t;
1052 unsigned int entry_offset;
1053 int off, i, ret;
1054
1055 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1056 entry_offset = (void *)e - base;
1057 xt_ematch_foreach(ematch, e)
1058 off += xt_compat_match_offset(ematch->u.kernel.match);
1059 t = ip6t_get_target_c(e);
1060 off += xt_compat_target_offset(t->u.kernel.target);
1061 newinfo->size -= off;
1062 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1063 if (ret)
1064 return ret;
1065
1066 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1067 if (info->hook_entry[i] &&
1068 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1069 newinfo->hook_entry[i] -= off;
1070 if (info->underflow[i] &&
1071 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1072 newinfo->underflow[i] -= off;
1073 }
1074 return 0;
1075 }
1076
1077 static int compat_table_info(const struct xt_table_info *info,
1078 struct xt_table_info *newinfo)
1079 {
1080 struct ip6t_entry *iter;
1081 const void *loc_cpu_entry;
1082 int ret;
1083
1084 if (!newinfo || !info)
1085 return -EINVAL;
1086
1087 /* we dont care about newinfo->entries */
1088 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1089 newinfo->initial_entries = 0;
1090 loc_cpu_entry = info->entries;
1091 xt_compat_init_offsets(AF_INET6, info->number);
1092 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1093 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1094 if (ret != 0)
1095 return ret;
1096 }
1097 return 0;
1098 }
1099 #endif
1100
1101 static int get_info(struct net *net, void __user *user,
1102 const int *len, int compat)
1103 {
1104 char name[XT_TABLE_MAXNAMELEN];
1105 struct xt_table *t;
1106 int ret;
1107
1108 if (*len != sizeof(struct ip6t_getinfo)) {
1109 duprintf("length %u != %zu\n", *len,
1110 sizeof(struct ip6t_getinfo));
1111 return -EINVAL;
1112 }
1113
1114 if (copy_from_user(name, user, sizeof(name)) != 0)
1115 return -EFAULT;
1116
1117 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1118 #ifdef CONFIG_COMPAT
1119 if (compat)
1120 xt_compat_lock(AF_INET6);
1121 #endif
1122 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1123 "ip6table_%s", name);
1124 if (!IS_ERR_OR_NULL(t)) {
1125 struct ip6t_getinfo info;
1126 const struct xt_table_info *private = t->private;
1127 #ifdef CONFIG_COMPAT
1128 struct xt_table_info tmp;
1129
1130 if (compat) {
1131 ret = compat_table_info(private, &tmp);
1132 xt_compat_flush_offsets(AF_INET6);
1133 private = &tmp;
1134 }
1135 #endif
1136 memset(&info, 0, sizeof(info));
1137 info.valid_hooks = t->valid_hooks;
1138 memcpy(info.hook_entry, private->hook_entry,
1139 sizeof(info.hook_entry));
1140 memcpy(info.underflow, private->underflow,
1141 sizeof(info.underflow));
1142 info.num_entries = private->number;
1143 info.size = private->size;
1144 strcpy(info.name, name);
1145
1146 if (copy_to_user(user, &info, *len) != 0)
1147 ret = -EFAULT;
1148 else
1149 ret = 0;
1150
1151 xt_table_unlock(t);
1152 module_put(t->me);
1153 } else
1154 ret = t ? PTR_ERR(t) : -ENOENT;
1155 #ifdef CONFIG_COMPAT
1156 if (compat)
1157 xt_compat_unlock(AF_INET6);
1158 #endif
1159 return ret;
1160 }
1161
1162 static int
1163 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1164 const int *len)
1165 {
1166 int ret;
1167 struct ip6t_get_entries get;
1168 struct xt_table *t;
1169
1170 if (*len < sizeof(get)) {
1171 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1172 return -EINVAL;
1173 }
1174 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1175 return -EFAULT;
1176 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1177 duprintf("get_entries: %u != %zu\n",
1178 *len, sizeof(get) + get.size);
1179 return -EINVAL;
1180 }
1181
1182 t = xt_find_table_lock(net, AF_INET6, get.name);
1183 if (!IS_ERR_OR_NULL(t)) {
1184 struct xt_table_info *private = t->private;
1185 duprintf("t->private->number = %u\n", private->number);
1186 if (get.size == private->size)
1187 ret = copy_entries_to_user(private->size,
1188 t, uptr->entrytable);
1189 else {
1190 duprintf("get_entries: I've got %u not %u!\n",
1191 private->size, get.size);
1192 ret = -EAGAIN;
1193 }
1194 module_put(t->me);
1195 xt_table_unlock(t);
1196 } else
1197 ret = t ? PTR_ERR(t) : -ENOENT;
1198
1199 return ret;
1200 }
1201
1202 static int
1203 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1204 struct xt_table_info *newinfo, unsigned int num_counters,
1205 void __user *counters_ptr)
1206 {
1207 int ret;
1208 struct xt_table *t;
1209 struct xt_table_info *oldinfo;
1210 struct xt_counters *counters;
1211 struct ip6t_entry *iter;
1212
1213 ret = 0;
1214 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1215 if (!counters) {
1216 ret = -ENOMEM;
1217 goto out;
1218 }
1219
1220 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1221 "ip6table_%s", name);
1222 if (IS_ERR_OR_NULL(t)) {
1223 ret = t ? PTR_ERR(t) : -ENOENT;
1224 goto free_newinfo_counters_untrans;
1225 }
1226
1227 /* You lied! */
1228 if (valid_hooks != t->valid_hooks) {
1229 duprintf("Valid hook crap: %08X vs %08X\n",
1230 valid_hooks, t->valid_hooks);
1231 ret = -EINVAL;
1232 goto put_module;
1233 }
1234
1235 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1236 if (!oldinfo)
1237 goto put_module;
1238
1239 /* Update module usage count based on number of rules */
1240 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1241 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1242 if ((oldinfo->number > oldinfo->initial_entries) ||
1243 (newinfo->number <= oldinfo->initial_entries))
1244 module_put(t->me);
1245 if ((oldinfo->number > oldinfo->initial_entries) &&
1246 (newinfo->number <= oldinfo->initial_entries))
1247 module_put(t->me);
1248
1249 /* Get the old counters, and synchronize with replace */
1250 get_counters(oldinfo, counters);
1251
1252 /* Decrease module usage counts and free resource */
1253 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1254 cleanup_entry(iter, net);
1255
1256 xt_free_table_info(oldinfo);
1257 if (copy_to_user(counters_ptr, counters,
1258 sizeof(struct xt_counters) * num_counters) != 0) {
1259 /* Silent error, can't fail, new table is already in place */
1260 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1261 }
1262 vfree(counters);
1263 xt_table_unlock(t);
1264 return ret;
1265
1266 put_module:
1267 module_put(t->me);
1268 xt_table_unlock(t);
1269 free_newinfo_counters_untrans:
1270 vfree(counters);
1271 out:
1272 return ret;
1273 }
1274
1275 static int
1276 do_replace(struct net *net, const void __user *user, unsigned int len)
1277 {
1278 int ret;
1279 struct ip6t_replace tmp;
1280 struct xt_table_info *newinfo;
1281 void *loc_cpu_entry;
1282 struct ip6t_entry *iter;
1283
1284 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1285 return -EFAULT;
1286
1287 /* overflow check */
1288 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1289 return -ENOMEM;
1290 if (tmp.num_counters == 0)
1291 return -EINVAL;
1292
1293 tmp.name[sizeof(tmp.name)-1] = 0;
1294
1295 newinfo = xt_alloc_table_info(tmp.size);
1296 if (!newinfo)
1297 return -ENOMEM;
1298
1299 loc_cpu_entry = newinfo->entries;
1300 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1301 tmp.size) != 0) {
1302 ret = -EFAULT;
1303 goto free_newinfo;
1304 }
1305
1306 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1307 if (ret != 0)
1308 goto free_newinfo;
1309
1310 duprintf("ip_tables: Translated table\n");
1311
1312 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1313 tmp.num_counters, tmp.counters);
1314 if (ret)
1315 goto free_newinfo_untrans;
1316 return 0;
1317
1318 free_newinfo_untrans:
1319 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1320 cleanup_entry(iter, net);
1321 free_newinfo:
1322 xt_free_table_info(newinfo);
1323 return ret;
1324 }
1325
1326 static int
1327 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1328 int compat)
1329 {
1330 unsigned int i;
1331 struct xt_counters_info tmp;
1332 struct xt_counters *paddc;
1333 unsigned int num_counters;
1334 char *name;
1335 int size;
1336 void *ptmp;
1337 struct xt_table *t;
1338 const struct xt_table_info *private;
1339 int ret = 0;
1340 struct ip6t_entry *iter;
1341 unsigned int addend;
1342 #ifdef CONFIG_COMPAT
1343 struct compat_xt_counters_info compat_tmp;
1344
1345 if (compat) {
1346 ptmp = &compat_tmp;
1347 size = sizeof(struct compat_xt_counters_info);
1348 } else
1349 #endif
1350 {
1351 ptmp = &tmp;
1352 size = sizeof(struct xt_counters_info);
1353 }
1354
1355 if (copy_from_user(ptmp, user, size) != 0)
1356 return -EFAULT;
1357
1358 #ifdef CONFIG_COMPAT
1359 if (compat) {
1360 num_counters = compat_tmp.num_counters;
1361 name = compat_tmp.name;
1362 } else
1363 #endif
1364 {
1365 num_counters = tmp.num_counters;
1366 name = tmp.name;
1367 }
1368
1369 if (len != size + num_counters * sizeof(struct xt_counters))
1370 return -EINVAL;
1371
1372 paddc = vmalloc(len - size);
1373 if (!paddc)
1374 return -ENOMEM;
1375
1376 if (copy_from_user(paddc, user + size, len - size) != 0) {
1377 ret = -EFAULT;
1378 goto free;
1379 }
1380
1381 t = xt_find_table_lock(net, AF_INET6, name);
1382 if (IS_ERR_OR_NULL(t)) {
1383 ret = t ? PTR_ERR(t) : -ENOENT;
1384 goto free;
1385 }
1386
1387 local_bh_disable();
1388 private = t->private;
1389 if (private->number != num_counters) {
1390 ret = -EINVAL;
1391 goto unlock_up_free;
1392 }
1393
1394 i = 0;
1395 addend = xt_write_recseq_begin();
1396 xt_entry_foreach(iter, private->entries, private->size) {
1397 struct xt_counters *tmp;
1398
1399 tmp = xt_get_this_cpu_counter(&iter->counters);
1400 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1401 ++i;
1402 }
1403 xt_write_recseq_end(addend);
1404 unlock_up_free:
1405 local_bh_enable();
1406 xt_table_unlock(t);
1407 module_put(t->me);
1408 free:
1409 vfree(paddc);
1410
1411 return ret;
1412 }
1413
1414 #ifdef CONFIG_COMPAT
1415 struct compat_ip6t_replace {
1416 char name[XT_TABLE_MAXNAMELEN];
1417 u32 valid_hooks;
1418 u32 num_entries;
1419 u32 size;
1420 u32 hook_entry[NF_INET_NUMHOOKS];
1421 u32 underflow[NF_INET_NUMHOOKS];
1422 u32 num_counters;
1423 compat_uptr_t counters; /* struct xt_counters * */
1424 struct compat_ip6t_entry entries[0];
1425 };
1426
1427 static int
1428 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1429 unsigned int *size, struct xt_counters *counters,
1430 unsigned int i)
1431 {
1432 struct xt_entry_target *t;
1433 struct compat_ip6t_entry __user *ce;
1434 u_int16_t target_offset, next_offset;
1435 compat_uint_t origsize;
1436 const struct xt_entry_match *ematch;
1437 int ret = 0;
1438
1439 origsize = *size;
1440 ce = (struct compat_ip6t_entry __user *)*dstptr;
1441 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1442 copy_to_user(&ce->counters, &counters[i],
1443 sizeof(counters[i])) != 0)
1444 return -EFAULT;
1445
1446 *dstptr += sizeof(struct compat_ip6t_entry);
1447 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1448
1449 xt_ematch_foreach(ematch, e) {
1450 ret = xt_compat_match_to_user(ematch, dstptr, size);
1451 if (ret != 0)
1452 return ret;
1453 }
1454 target_offset = e->target_offset - (origsize - *size);
1455 t = ip6t_get_target(e);
1456 ret = xt_compat_target_to_user(t, dstptr, size);
1457 if (ret)
1458 return ret;
1459 next_offset = e->next_offset - (origsize - *size);
1460 if (put_user(target_offset, &ce->target_offset) != 0 ||
1461 put_user(next_offset, &ce->next_offset) != 0)
1462 return -EFAULT;
1463 return 0;
1464 }
1465
1466 static int
1467 compat_find_calc_match(struct xt_entry_match *m,
1468 const char *name,
1469 const struct ip6t_ip6 *ipv6,
1470 int *size)
1471 {
1472 struct xt_match *match;
1473
1474 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1475 m->u.user.revision);
1476 if (IS_ERR(match)) {
1477 duprintf("compat_check_calc_match: `%s' not found\n",
1478 m->u.user.name);
1479 return PTR_ERR(match);
1480 }
1481 m->u.kernel.match = match;
1482 *size += xt_compat_match_offset(match);
1483 return 0;
1484 }
1485
1486 static void compat_release_entry(struct compat_ip6t_entry *e)
1487 {
1488 struct xt_entry_target *t;
1489 struct xt_entry_match *ematch;
1490
1491 /* Cleanup all matches */
1492 xt_ematch_foreach(ematch, e)
1493 module_put(ematch->u.kernel.match->me);
1494 t = compat_ip6t_get_target(e);
1495 module_put(t->u.kernel.target->me);
1496 }
1497
1498 static int
1499 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1500 struct xt_table_info *newinfo,
1501 unsigned int *size,
1502 const unsigned char *base,
1503 const unsigned char *limit,
1504 const unsigned int *hook_entries,
1505 const unsigned int *underflows,
1506 const char *name)
1507 {
1508 struct xt_entry_match *ematch;
1509 struct xt_entry_target *t;
1510 struct xt_target *target;
1511 unsigned int entry_offset;
1512 unsigned int j;
1513 int ret, off, h;
1514
1515 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1516 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1517 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1518 (unsigned char *)e + e->next_offset > limit) {
1519 duprintf("Bad offset %p, limit = %p\n", e, limit);
1520 return -EINVAL;
1521 }
1522
1523 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1524 sizeof(struct compat_xt_entry_target)) {
1525 duprintf("checking: element %p size %u\n",
1526 e, e->next_offset);
1527 return -EINVAL;
1528 }
1529
1530 /* For purposes of check_entry casting the compat entry is fine */
1531 ret = check_entry((struct ip6t_entry *)e);
1532 if (ret)
1533 return ret;
1534
1535 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1536 entry_offset = (void *)e - (void *)base;
1537 j = 0;
1538 xt_ematch_foreach(ematch, e) {
1539 ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
1540 if (ret != 0)
1541 goto release_matches;
1542 ++j;
1543 }
1544
1545 t = compat_ip6t_get_target(e);
1546 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1547 t->u.user.revision);
1548 if (IS_ERR(target)) {
1549 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1550 t->u.user.name);
1551 ret = PTR_ERR(target);
1552 goto release_matches;
1553 }
1554 t->u.kernel.target = target;
1555
1556 off += xt_compat_target_offset(target);
1557 *size += off;
1558 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1559 if (ret)
1560 goto out;
1561
1562 /* Check hooks & underflows */
1563 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1564 if ((unsigned char *)e - base == hook_entries[h])
1565 newinfo->hook_entry[h] = hook_entries[h];
1566 if ((unsigned char *)e - base == underflows[h])
1567 newinfo->underflow[h] = underflows[h];
1568 }
1569
1570 /* Clear counters and comefrom */
1571 memset(&e->counters, 0, sizeof(e->counters));
1572 e->comefrom = 0;
1573 return 0;
1574
1575 out:
1576 module_put(t->u.kernel.target->me);
1577 release_matches:
1578 xt_ematch_foreach(ematch, e) {
1579 if (j-- == 0)
1580 break;
1581 module_put(ematch->u.kernel.match->me);
1582 }
1583 return ret;
1584 }
1585
1586 static int
1587 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1588 unsigned int *size, const char *name,
1589 struct xt_table_info *newinfo, unsigned char *base)
1590 {
1591 struct xt_entry_target *t;
1592 struct ip6t_entry *de;
1593 unsigned int origsize;
1594 int ret, h;
1595 struct xt_entry_match *ematch;
1596
1597 ret = 0;
1598 origsize = *size;
1599 de = (struct ip6t_entry *)*dstptr;
1600 memcpy(de, e, sizeof(struct ip6t_entry));
1601 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1602
1603 *dstptr += sizeof(struct ip6t_entry);
1604 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1605
1606 xt_ematch_foreach(ematch, e) {
1607 ret = xt_compat_match_from_user(ematch, dstptr, size);
1608 if (ret != 0)
1609 return ret;
1610 }
1611 de->target_offset = e->target_offset - (origsize - *size);
1612 t = compat_ip6t_get_target(e);
1613 xt_compat_target_from_user(t, dstptr, size);
1614
1615 de->next_offset = e->next_offset - (origsize - *size);
1616 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1617 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1618 newinfo->hook_entry[h] -= origsize - *size;
1619 if ((unsigned char *)de - base < newinfo->underflow[h])
1620 newinfo->underflow[h] -= origsize - *size;
1621 }
1622 return ret;
1623 }
1624
1625 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1626 const char *name)
1627 {
1628 unsigned int j;
1629 int ret = 0;
1630 struct xt_mtchk_param mtpar;
1631 struct xt_entry_match *ematch;
1632
1633 e->counters.pcnt = xt_percpu_counter_alloc();
1634 if (IS_ERR_VALUE(e->counters.pcnt))
1635 return -ENOMEM;
1636 j = 0;
1637 mtpar.net = net;
1638 mtpar.table = name;
1639 mtpar.entryinfo = &e->ipv6;
1640 mtpar.hook_mask = e->comefrom;
1641 mtpar.family = NFPROTO_IPV6;
1642 xt_ematch_foreach(ematch, e) {
1643 ret = check_match(ematch, &mtpar);
1644 if (ret != 0)
1645 goto cleanup_matches;
1646 ++j;
1647 }
1648
1649 ret = check_target(e, net, name);
1650 if (ret)
1651 goto cleanup_matches;
1652 return 0;
1653
1654 cleanup_matches:
1655 xt_ematch_foreach(ematch, e) {
1656 if (j-- == 0)
1657 break;
1658 cleanup_match(ematch, net);
1659 }
1660
1661 xt_percpu_counter_free(e->counters.pcnt);
1662
1663 return ret;
1664 }
1665
1666 static int
1667 translate_compat_table(struct net *net,
1668 const char *name,
1669 unsigned int valid_hooks,
1670 struct xt_table_info **pinfo,
1671 void **pentry0,
1672 unsigned int total_size,
1673 unsigned int number,
1674 unsigned int *hook_entries,
1675 unsigned int *underflows)
1676 {
1677 unsigned int i, j;
1678 struct xt_table_info *newinfo, *info;
1679 void *pos, *entry0, *entry1;
1680 struct compat_ip6t_entry *iter0;
1681 struct ip6t_entry *iter1;
1682 unsigned int size;
1683 int ret = 0;
1684
1685 info = *pinfo;
1686 entry0 = *pentry0;
1687 size = total_size;
1688 info->number = number;
1689
1690 /* Init all hooks to impossible value. */
1691 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1692 info->hook_entry[i] = 0xFFFFFFFF;
1693 info->underflow[i] = 0xFFFFFFFF;
1694 }
1695
1696 duprintf("translate_compat_table: size %u\n", info->size);
1697 j = 0;
1698 xt_compat_lock(AF_INET6);
1699 xt_compat_init_offsets(AF_INET6, number);
1700 /* Walk through entries, checking offsets. */
1701 xt_entry_foreach(iter0, entry0, total_size) {
1702 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1703 entry0,
1704 entry0 + total_size,
1705 hook_entries,
1706 underflows,
1707 name);
1708 if (ret != 0)
1709 goto out_unlock;
1710 ++j;
1711 }
1712
1713 ret = -EINVAL;
1714 if (j != number) {
1715 duprintf("translate_compat_table: %u not %u entries\n",
1716 j, number);
1717 goto out_unlock;
1718 }
1719
1720 /* Check hooks all assigned */
1721 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1722 /* Only hooks which are valid */
1723 if (!(valid_hooks & (1 << i)))
1724 continue;
1725 if (info->hook_entry[i] == 0xFFFFFFFF) {
1726 duprintf("Invalid hook entry %u %u\n",
1727 i, hook_entries[i]);
1728 goto out_unlock;
1729 }
1730 if (info->underflow[i] == 0xFFFFFFFF) {
1731 duprintf("Invalid underflow %u %u\n",
1732 i, underflows[i]);
1733 goto out_unlock;
1734 }
1735 }
1736
1737 ret = -ENOMEM;
1738 newinfo = xt_alloc_table_info(size);
1739 if (!newinfo)
1740 goto out_unlock;
1741
1742 newinfo->number = number;
1743 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1744 newinfo->hook_entry[i] = info->hook_entry[i];
1745 newinfo->underflow[i] = info->underflow[i];
1746 }
1747 entry1 = newinfo->entries;
1748 pos = entry1;
1749 size = total_size;
1750 xt_entry_foreach(iter0, entry0, total_size) {
1751 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1752 name, newinfo, entry1);
1753 if (ret != 0)
1754 break;
1755 }
1756 xt_compat_flush_offsets(AF_INET6);
1757 xt_compat_unlock(AF_INET6);
1758 if (ret)
1759 goto free_newinfo;
1760
1761 ret = -ELOOP;
1762 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1763 goto free_newinfo;
1764
1765 i = 0;
1766 xt_entry_foreach(iter1, entry1, newinfo->size) {
1767 ret = compat_check_entry(iter1, net, name);
1768 if (ret != 0)
1769 break;
1770 ++i;
1771 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1772 XT_ERROR_TARGET) == 0)
1773 ++newinfo->stacksize;
1774 }
1775 if (ret) {
1776 /*
1777 * The first i matches need cleanup_entry (calls ->destroy)
1778 * because they had called ->check already. The other j-i
1779 * entries need only release.
1780 */
1781 int skip = i;
1782 j -= i;
1783 xt_entry_foreach(iter0, entry0, newinfo->size) {
1784 if (skip-- > 0)
1785 continue;
1786 if (j-- == 0)
1787 break;
1788 compat_release_entry(iter0);
1789 }
1790 xt_entry_foreach(iter1, entry1, newinfo->size) {
1791 if (i-- == 0)
1792 break;
1793 cleanup_entry(iter1, net);
1794 }
1795 xt_free_table_info(newinfo);
1796 return ret;
1797 }
1798
1799 *pinfo = newinfo;
1800 *pentry0 = entry1;
1801 xt_free_table_info(info);
1802 return 0;
1803
1804 free_newinfo:
1805 xt_free_table_info(newinfo);
1806 out:
1807 xt_entry_foreach(iter0, entry0, total_size) {
1808 if (j-- == 0)
1809 break;
1810 compat_release_entry(iter0);
1811 }
1812 return ret;
1813 out_unlock:
1814 xt_compat_flush_offsets(AF_INET6);
1815 xt_compat_unlock(AF_INET6);
1816 goto out;
1817 }
1818
1819 static int
1820 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1821 {
1822 int ret;
1823 struct compat_ip6t_replace tmp;
1824 struct xt_table_info *newinfo;
1825 void *loc_cpu_entry;
1826 struct ip6t_entry *iter;
1827
1828 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1829 return -EFAULT;
1830
1831 /* overflow check */
1832 if (tmp.size >= INT_MAX / num_possible_cpus())
1833 return -ENOMEM;
1834 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1835 return -ENOMEM;
1836 if (tmp.num_counters == 0)
1837 return -EINVAL;
1838
1839 tmp.name[sizeof(tmp.name)-1] = 0;
1840
1841 newinfo = xt_alloc_table_info(tmp.size);
1842 if (!newinfo)
1843 return -ENOMEM;
1844
1845 loc_cpu_entry = newinfo->entries;
1846 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1847 tmp.size) != 0) {
1848 ret = -EFAULT;
1849 goto free_newinfo;
1850 }
1851
1852 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1853 &newinfo, &loc_cpu_entry, tmp.size,
1854 tmp.num_entries, tmp.hook_entry,
1855 tmp.underflow);
1856 if (ret != 0)
1857 goto free_newinfo;
1858
1859 duprintf("compat_do_replace: Translated table\n");
1860
1861 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1862 tmp.num_counters, compat_ptr(tmp.counters));
1863 if (ret)
1864 goto free_newinfo_untrans;
1865 return 0;
1866
1867 free_newinfo_untrans:
1868 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1869 cleanup_entry(iter, net);
1870 free_newinfo:
1871 xt_free_table_info(newinfo);
1872 return ret;
1873 }
1874
1875 static int
1876 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1877 unsigned int len)
1878 {
1879 int ret;
1880
1881 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1882 return -EPERM;
1883
1884 switch (cmd) {
1885 case IP6T_SO_SET_REPLACE:
1886 ret = compat_do_replace(sock_net(sk), user, len);
1887 break;
1888
1889 case IP6T_SO_SET_ADD_COUNTERS:
1890 ret = do_add_counters(sock_net(sk), user, len, 1);
1891 break;
1892
1893 default:
1894 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1895 ret = -EINVAL;
1896 }
1897
1898 return ret;
1899 }
1900
1901 struct compat_ip6t_get_entries {
1902 char name[XT_TABLE_MAXNAMELEN];
1903 compat_uint_t size;
1904 struct compat_ip6t_entry entrytable[0];
1905 };
1906
1907 static int
1908 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1909 void __user *userptr)
1910 {
1911 struct xt_counters *counters;
1912 const struct xt_table_info *private = table->private;
1913 void __user *pos;
1914 unsigned int size;
1915 int ret = 0;
1916 unsigned int i = 0;
1917 struct ip6t_entry *iter;
1918
1919 counters = alloc_counters(table);
1920 if (IS_ERR(counters))
1921 return PTR_ERR(counters);
1922
1923 pos = userptr;
1924 size = total_size;
1925 xt_entry_foreach(iter, private->entries, total_size) {
1926 ret = compat_copy_entry_to_user(iter, &pos,
1927 &size, counters, i++);
1928 if (ret != 0)
1929 break;
1930 }
1931
1932 vfree(counters);
1933 return ret;
1934 }
1935
1936 static int
1937 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1938 int *len)
1939 {
1940 int ret;
1941 struct compat_ip6t_get_entries get;
1942 struct xt_table *t;
1943
1944 if (*len < sizeof(get)) {
1945 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1946 return -EINVAL;
1947 }
1948
1949 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1950 return -EFAULT;
1951
1952 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1953 duprintf("compat_get_entries: %u != %zu\n",
1954 *len, sizeof(get) + get.size);
1955 return -EINVAL;
1956 }
1957
1958 xt_compat_lock(AF_INET6);
1959 t = xt_find_table_lock(net, AF_INET6, get.name);
1960 if (!IS_ERR_OR_NULL(t)) {
1961 const struct xt_table_info *private = t->private;
1962 struct xt_table_info info;
1963 duprintf("t->private->number = %u\n", private->number);
1964 ret = compat_table_info(private, &info);
1965 if (!ret && get.size == info.size) {
1966 ret = compat_copy_entries_to_user(private->size,
1967 t, uptr->entrytable);
1968 } else if (!ret) {
1969 duprintf("compat_get_entries: I've got %u not %u!\n",
1970 private->size, get.size);
1971 ret = -EAGAIN;
1972 }
1973 xt_compat_flush_offsets(AF_INET6);
1974 module_put(t->me);
1975 xt_table_unlock(t);
1976 } else
1977 ret = t ? PTR_ERR(t) : -ENOENT;
1978
1979 xt_compat_unlock(AF_INET6);
1980 return ret;
1981 }
1982
1983 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1984
1985 static int
1986 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1987 {
1988 int ret;
1989
1990 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1991 return -EPERM;
1992
1993 switch (cmd) {
1994 case IP6T_SO_GET_INFO:
1995 ret = get_info(sock_net(sk), user, len, 1);
1996 break;
1997 case IP6T_SO_GET_ENTRIES:
1998 ret = compat_get_entries(sock_net(sk), user, len);
1999 break;
2000 default:
2001 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2002 }
2003 return ret;
2004 }
2005 #endif
2006
2007 static int
2008 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2009 {
2010 int ret;
2011
2012 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2013 return -EPERM;
2014
2015 switch (cmd) {
2016 case IP6T_SO_SET_REPLACE:
2017 ret = do_replace(sock_net(sk), user, len);
2018 break;
2019
2020 case IP6T_SO_SET_ADD_COUNTERS:
2021 ret = do_add_counters(sock_net(sk), user, len, 0);
2022 break;
2023
2024 default:
2025 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2026 ret = -EINVAL;
2027 }
2028
2029 return ret;
2030 }
2031
2032 static int
2033 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2034 {
2035 int ret;
2036
2037 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2038 return -EPERM;
2039
2040 switch (cmd) {
2041 case IP6T_SO_GET_INFO:
2042 ret = get_info(sock_net(sk), user, len, 0);
2043 break;
2044
2045 case IP6T_SO_GET_ENTRIES:
2046 ret = get_entries(sock_net(sk), user, len);
2047 break;
2048
2049 case IP6T_SO_GET_REVISION_MATCH:
2050 case IP6T_SO_GET_REVISION_TARGET: {
2051 struct xt_get_revision rev;
2052 int target;
2053
2054 if (*len != sizeof(rev)) {
2055 ret = -EINVAL;
2056 break;
2057 }
2058 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2059 ret = -EFAULT;
2060 break;
2061 }
2062 rev.name[sizeof(rev.name)-1] = 0;
2063
2064 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2065 target = 1;
2066 else
2067 target = 0;
2068
2069 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2070 rev.revision,
2071 target, &ret),
2072 "ip6t_%s", rev.name);
2073 break;
2074 }
2075
2076 default:
2077 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2078 ret = -EINVAL;
2079 }
2080
2081 return ret;
2082 }
2083
2084 struct xt_table *ip6t_register_table(struct net *net,
2085 const struct xt_table *table,
2086 const struct ip6t_replace *repl)
2087 {
2088 int ret;
2089 struct xt_table_info *newinfo;
2090 struct xt_table_info bootstrap = {0};
2091 void *loc_cpu_entry;
2092 struct xt_table *new_table;
2093
2094 newinfo = xt_alloc_table_info(repl->size);
2095 if (!newinfo) {
2096 ret = -ENOMEM;
2097 goto out;
2098 }
2099
2100 loc_cpu_entry = newinfo->entries;
2101 memcpy(loc_cpu_entry, repl->entries, repl->size);
2102
2103 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2104 if (ret != 0)
2105 goto out_free;
2106
2107 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2108 if (IS_ERR(new_table)) {
2109 ret = PTR_ERR(new_table);
2110 goto out_free;
2111 }
2112 return new_table;
2113
2114 out_free:
2115 xt_free_table_info(newinfo);
2116 out:
2117 return ERR_PTR(ret);
2118 }
2119
2120 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2121 {
2122 struct xt_table_info *private;
2123 void *loc_cpu_entry;
2124 struct module *table_owner = table->me;
2125 struct ip6t_entry *iter;
2126
2127 private = xt_unregister_table(table);
2128
2129 /* Decrease module usage counts and free resources */
2130 loc_cpu_entry = private->entries;
2131 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2132 cleanup_entry(iter, net);
2133 if (private->number > private->initial_entries)
2134 module_put(table_owner);
2135 xt_free_table_info(private);
2136 }
2137
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2139 static inline bool
2140 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2141 u_int8_t type, u_int8_t code,
2142 bool invert)
2143 {
2144 return (type == test_type && code >= min_code && code <= max_code)
2145 ^ invert;
2146 }
2147
2148 static bool
2149 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2150 {
2151 const struct icmp6hdr *ic;
2152 struct icmp6hdr _icmph;
2153 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2154
2155 /* Must not be a fragment. */
2156 if (par->fragoff != 0)
2157 return false;
2158
2159 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2160 if (ic == NULL) {
2161 /* We've been asked to examine this packet, and we
2162 * can't. Hence, no choice but to drop.
2163 */
2164 duprintf("Dropping evil ICMP tinygram.\n");
2165 par->hotdrop = true;
2166 return false;
2167 }
2168
2169 return icmp6_type_code_match(icmpinfo->type,
2170 icmpinfo->code[0],
2171 icmpinfo->code[1],
2172 ic->icmp6_type, ic->icmp6_code,
2173 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2174 }
2175
2176 /* Called when user tries to insert an entry of this type. */
2177 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2178 {
2179 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2180
2181 /* Must specify no unknown invflags */
2182 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2183 }
2184
2185 /* The built-in targets: standard (NULL) and error. */
2186 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2187 {
2188 .name = XT_STANDARD_TARGET,
2189 .targetsize = sizeof(int),
2190 .family = NFPROTO_IPV6,
2191 #ifdef CONFIG_COMPAT
2192 .compatsize = sizeof(compat_int_t),
2193 .compat_from_user = compat_standard_from_user,
2194 .compat_to_user = compat_standard_to_user,
2195 #endif
2196 },
2197 {
2198 .name = XT_ERROR_TARGET,
2199 .target = ip6t_error,
2200 .targetsize = XT_FUNCTION_MAXNAMELEN,
2201 .family = NFPROTO_IPV6,
2202 },
2203 };
2204
2205 static struct nf_sockopt_ops ip6t_sockopts = {
2206 .pf = PF_INET6,
2207 .set_optmin = IP6T_BASE_CTL,
2208 .set_optmax = IP6T_SO_SET_MAX+1,
2209 .set = do_ip6t_set_ctl,
2210 #ifdef CONFIG_COMPAT
2211 .compat_set = compat_do_ip6t_set_ctl,
2212 #endif
2213 .get_optmin = IP6T_BASE_CTL,
2214 .get_optmax = IP6T_SO_GET_MAX+1,
2215 .get = do_ip6t_get_ctl,
2216 #ifdef CONFIG_COMPAT
2217 .compat_get = compat_do_ip6t_get_ctl,
2218 #endif
2219 .owner = THIS_MODULE,
2220 };
2221
2222 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2223 {
2224 .name = "icmp6",
2225 .match = icmp6_match,
2226 .matchsize = sizeof(struct ip6t_icmp),
2227 .checkentry = icmp6_checkentry,
2228 .proto = IPPROTO_ICMPV6,
2229 .family = NFPROTO_IPV6,
2230 },
2231 };
2232
2233 static int __net_init ip6_tables_net_init(struct net *net)
2234 {
2235 return xt_proto_init(net, NFPROTO_IPV6);
2236 }
2237
2238 static void __net_exit ip6_tables_net_exit(struct net *net)
2239 {
2240 xt_proto_fini(net, NFPROTO_IPV6);
2241 }
2242
2243 static struct pernet_operations ip6_tables_net_ops = {
2244 .init = ip6_tables_net_init,
2245 .exit = ip6_tables_net_exit,
2246 };
2247
2248 static int __init ip6_tables_init(void)
2249 {
2250 int ret;
2251
2252 ret = register_pernet_subsys(&ip6_tables_net_ops);
2253 if (ret < 0)
2254 goto err1;
2255
2256 /* No one else will be downing sem now, so we won't sleep */
2257 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2258 if (ret < 0)
2259 goto err2;
2260 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2261 if (ret < 0)
2262 goto err4;
2263
2264 /* Register setsockopt */
2265 ret = nf_register_sockopt(&ip6t_sockopts);
2266 if (ret < 0)
2267 goto err5;
2268
2269 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2270 return 0;
2271
2272 err5:
2273 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2274 err4:
2275 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2276 err2:
2277 unregister_pernet_subsys(&ip6_tables_net_ops);
2278 err1:
2279 return ret;
2280 }
2281
2282 static void __exit ip6_tables_fini(void)
2283 {
2284 nf_unregister_sockopt(&ip6t_sockopts);
2285
2286 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2287 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2288 unregister_pernet_subsys(&ip6_tables_net_ops);
2289 }
2290
2291 EXPORT_SYMBOL(ip6t_register_table);
2292 EXPORT_SYMBOL(ip6t_unregister_table);
2293 EXPORT_SYMBOL(ip6t_do_table);
2294
2295 module_init(ip6_tables_init);
2296 module_exit(ip6_tables_fini);