]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
Merge tag 'iio-fixes-for-4.9a' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 #ifdef CONFIG_NETFILTER_DEBUG
43 #define IP_NF_ASSERT(x) WARN_ON(!(x))
44 #else
45 #define IP_NF_ASSERT(x)
46 #endif
47
48 void *ip6t_alloc_initial_table(const struct xt_table *info)
49 {
50 return xt_alloc_initial_table(ip6t, IP6T);
51 }
52 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
53
54 /*
55 We keep a set of rules for each CPU, so we can avoid write-locking
56 them in the softirq when updating the counters and therefore
57 only need to read-lock in the softirq; doing a write_lock_bh() in user
58 context stops packets coming through and allows user context to read
59 the counters or update the rules.
60
61 Hence the start of any table is given by get_table() below. */
62
63 /* Returns whether matches rule or not. */
64 /* Performance critical - called for every packet */
65 static inline bool
66 ip6_packet_match(const struct sk_buff *skb,
67 const char *indev,
68 const char *outdev,
69 const struct ip6t_ip6 *ip6info,
70 unsigned int *protoff,
71 int *fragoff, bool *hotdrop)
72 {
73 unsigned long ret;
74 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
75
76 if (NF_INVF(ip6info, IP6T_INV_SRCIP,
77 ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
78 &ip6info->src)) ||
79 NF_INVF(ip6info, IP6T_INV_DSTIP,
80 ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
81 &ip6info->dst)))
82 return false;
83
84 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
85
86 if (NF_INVF(ip6info, IP6T_INV_VIA_IN, ret != 0))
87 return false;
88
89 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
90
91 if (NF_INVF(ip6info, IP6T_INV_VIA_OUT, ret != 0))
92 return false;
93
94 /* ... might want to do something with class and flowlabel here ... */
95
96 /* look for the desired protocol header */
97 if (ip6info->flags & IP6T_F_PROTO) {
98 int protohdr;
99 unsigned short _frag_off;
100
101 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
102 if (protohdr < 0) {
103 if (_frag_off == 0)
104 *hotdrop = true;
105 return false;
106 }
107 *fragoff = _frag_off;
108
109 if (ip6info->proto == protohdr) {
110 if (ip6info->invflags & IP6T_INV_PROTO)
111 return false;
112
113 return true;
114 }
115
116 /* We need match for the '-p all', too! */
117 if ((ip6info->proto != 0) &&
118 !(ip6info->invflags & IP6T_INV_PROTO))
119 return false;
120 }
121 return true;
122 }
123
124 /* should be ip6 safe */
125 static bool
126 ip6_checkentry(const struct ip6t_ip6 *ipv6)
127 {
128 if (ipv6->flags & ~IP6T_F_MASK)
129 return false;
130 if (ipv6->invflags & ~IP6T_INV_MASK)
131 return false;
132
133 return true;
134 }
135
136 static unsigned int
137 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
138 {
139 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
140
141 return NF_DROP;
142 }
143
144 static inline struct ip6t_entry *
145 get_entry(const void *base, unsigned int offset)
146 {
147 return (struct ip6t_entry *)(base + offset);
148 }
149
150 /* All zeroes == unconditional rule. */
151 /* Mildly perf critical (only if packet tracing is on) */
152 static inline bool unconditional(const struct ip6t_entry *e)
153 {
154 static const struct ip6t_ip6 uncond;
155
156 return e->target_offset == sizeof(struct ip6t_entry) &&
157 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
158 }
159
160 static inline const struct xt_entry_target *
161 ip6t_get_target_c(const struct ip6t_entry *e)
162 {
163 return ip6t_get_target((struct ip6t_entry *)e);
164 }
165
166 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
167 /* This cries for unification! */
168 static const char *const hooknames[] = {
169 [NF_INET_PRE_ROUTING] = "PREROUTING",
170 [NF_INET_LOCAL_IN] = "INPUT",
171 [NF_INET_FORWARD] = "FORWARD",
172 [NF_INET_LOCAL_OUT] = "OUTPUT",
173 [NF_INET_POST_ROUTING] = "POSTROUTING",
174 };
175
176 enum nf_ip_trace_comments {
177 NF_IP6_TRACE_COMMENT_RULE,
178 NF_IP6_TRACE_COMMENT_RETURN,
179 NF_IP6_TRACE_COMMENT_POLICY,
180 };
181
182 static const char *const comments[] = {
183 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
184 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
185 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
186 };
187
188 static struct nf_loginfo trace_loginfo = {
189 .type = NF_LOG_TYPE_LOG,
190 .u = {
191 .log = {
192 .level = LOGLEVEL_WARNING,
193 .logflags = NF_LOG_DEFAULT_MASK,
194 },
195 },
196 };
197
198 /* Mildly perf critical (only if packet tracing is on) */
199 static inline int
200 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
201 const char *hookname, const char **chainname,
202 const char **comment, unsigned int *rulenum)
203 {
204 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
205
206 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
207 /* Head of user chain: ERROR target with chainname */
208 *chainname = t->target.data;
209 (*rulenum) = 0;
210 } else if (s == e) {
211 (*rulenum)++;
212
213 if (unconditional(s) &&
214 strcmp(t->target.u.kernel.target->name,
215 XT_STANDARD_TARGET) == 0 &&
216 t->verdict < 0) {
217 /* Tail of chains: STANDARD target (return/policy) */
218 *comment = *chainname == hookname
219 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
220 : comments[NF_IP6_TRACE_COMMENT_RETURN];
221 }
222 return 1;
223 } else
224 (*rulenum)++;
225
226 return 0;
227 }
228
229 static void trace_packet(struct net *net,
230 const struct sk_buff *skb,
231 unsigned int hook,
232 const struct net_device *in,
233 const struct net_device *out,
234 const char *tablename,
235 const struct xt_table_info *private,
236 const struct ip6t_entry *e)
237 {
238 const struct ip6t_entry *root;
239 const char *hookname, *chainname, *comment;
240 const struct ip6t_entry *iter;
241 unsigned int rulenum = 0;
242
243 root = get_entry(private->entries, private->hook_entry[hook]);
244
245 hookname = chainname = hooknames[hook];
246 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
247
248 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
249 if (get_chainname_rulenum(iter, e, hookname,
250 &chainname, &comment, &rulenum) != 0)
251 break;
252
253 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
254 "TRACE: %s:%s:%s:%u ",
255 tablename, chainname, comment, rulenum);
256 }
257 #endif
258
259 static inline struct ip6t_entry *
260 ip6t_next_entry(const struct ip6t_entry *entry)
261 {
262 return (void *)entry + entry->next_offset;
263 }
264
265 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
266 unsigned int
267 ip6t_do_table(struct sk_buff *skb,
268 const struct nf_hook_state *state,
269 struct xt_table *table)
270 {
271 unsigned int hook = state->hook;
272 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
273 /* Initializing verdict to NF_DROP keeps gcc happy. */
274 unsigned int verdict = NF_DROP;
275 const char *indev, *outdev;
276 const void *table_base;
277 struct ip6t_entry *e, **jumpstack;
278 unsigned int stackidx, cpu;
279 const struct xt_table_info *private;
280 struct xt_action_param acpar;
281 unsigned int addend;
282
283 /* Initialization */
284 stackidx = 0;
285 indev = state->in ? state->in->name : nulldevname;
286 outdev = state->out ? state->out->name : nulldevname;
287 /* We handle fragments by dealing with the first fragment as
288 * if it was a normal packet. All other fragments are treated
289 * normally, except that they will NEVER match rules that ask
290 * things we don't know, ie. tcp syn flag or ports). If the
291 * rule is also a fragment-specific rule, non-fragments won't
292 * match it. */
293 acpar.hotdrop = false;
294 acpar.net = state->net;
295 acpar.in = state->in;
296 acpar.out = state->out;
297 acpar.family = NFPROTO_IPV6;
298 acpar.hooknum = hook;
299
300 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
301
302 local_bh_disable();
303 addend = xt_write_recseq_begin();
304 private = table->private;
305 /*
306 * Ensure we load private-> members after we've fetched the base
307 * pointer.
308 */
309 smp_read_barrier_depends();
310 cpu = smp_processor_id();
311 table_base = private->entries;
312 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
313
314 /* Switch to alternate jumpstack if we're being invoked via TEE.
315 * TEE issues XT_CONTINUE verdict on original skb so we must not
316 * clobber the jumpstack.
317 *
318 * For recursion via REJECT or SYNPROXY the stack will be clobbered
319 * but it is no problem since absolute verdict is issued by these.
320 */
321 if (static_key_false(&xt_tee_enabled))
322 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
323
324 e = get_entry(table_base, private->hook_entry[hook]);
325
326 do {
327 const struct xt_entry_target *t;
328 const struct xt_entry_match *ematch;
329 struct xt_counters *counter;
330
331 IP_NF_ASSERT(e);
332 acpar.thoff = 0;
333 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
334 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
335 no_match:
336 e = ip6t_next_entry(e);
337 continue;
338 }
339
340 xt_ematch_foreach(ematch, e) {
341 acpar.match = ematch->u.kernel.match;
342 acpar.matchinfo = ematch->data;
343 if (!acpar.match->match(skb, &acpar))
344 goto no_match;
345 }
346
347 counter = xt_get_this_cpu_counter(&e->counters);
348 ADD_COUNTER(*counter, skb->len, 1);
349
350 t = ip6t_get_target_c(e);
351 IP_NF_ASSERT(t->u.kernel.target);
352
353 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
354 /* The packet is traced: log it */
355 if (unlikely(skb->nf_trace))
356 trace_packet(state->net, skb, hook, state->in,
357 state->out, table->name, private, e);
358 #endif
359 /* Standard target? */
360 if (!t->u.kernel.target->target) {
361 int v;
362
363 v = ((struct xt_standard_target *)t)->verdict;
364 if (v < 0) {
365 /* Pop from stack? */
366 if (v != XT_RETURN) {
367 verdict = (unsigned int)(-v) - 1;
368 break;
369 }
370 if (stackidx == 0)
371 e = get_entry(table_base,
372 private->underflow[hook]);
373 else
374 e = ip6t_next_entry(jumpstack[--stackidx]);
375 continue;
376 }
377 if (table_base + v != ip6t_next_entry(e) &&
378 !(e->ipv6.flags & IP6T_F_GOTO)) {
379 jumpstack[stackidx++] = e;
380 }
381
382 e = get_entry(table_base, v);
383 continue;
384 }
385
386 acpar.target = t->u.kernel.target;
387 acpar.targinfo = t->data;
388
389 verdict = t->u.kernel.target->target(skb, &acpar);
390 if (verdict == XT_CONTINUE)
391 e = ip6t_next_entry(e);
392 else
393 /* Verdict */
394 break;
395 } while (!acpar.hotdrop);
396
397 xt_write_recseq_end(addend);
398 local_bh_enable();
399
400 if (acpar.hotdrop)
401 return NF_DROP;
402 else return verdict;
403 }
404
405 /* Figures out from what hook each rule can be called: returns 0 if
406 there are loops. Puts hook bitmask in comefrom. */
407 static int
408 mark_source_chains(const struct xt_table_info *newinfo,
409 unsigned int valid_hooks, void *entry0,
410 unsigned int *offsets)
411 {
412 unsigned int hook;
413
414 /* No recursion; use packet counter to save back ptrs (reset
415 to 0 as we leave), and comefrom to save source hook bitmask */
416 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
417 unsigned int pos = newinfo->hook_entry[hook];
418 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
419
420 if (!(valid_hooks & (1 << hook)))
421 continue;
422
423 /* Set initial back pointer. */
424 e->counters.pcnt = pos;
425
426 for (;;) {
427 const struct xt_standard_target *t
428 = (void *)ip6t_get_target_c(e);
429 int visited = e->comefrom & (1 << hook);
430
431 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
432 return 0;
433
434 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
435
436 /* Unconditional return/END. */
437 if ((unconditional(e) &&
438 (strcmp(t->target.u.user.name,
439 XT_STANDARD_TARGET) == 0) &&
440 t->verdict < 0) || visited) {
441 unsigned int oldpos, size;
442
443 if ((strcmp(t->target.u.user.name,
444 XT_STANDARD_TARGET) == 0) &&
445 t->verdict < -NF_MAX_VERDICT - 1)
446 return 0;
447
448 /* Return: backtrack through the last
449 big jump. */
450 do {
451 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
452 oldpos = pos;
453 pos = e->counters.pcnt;
454 e->counters.pcnt = 0;
455
456 /* We're at the start. */
457 if (pos == oldpos)
458 goto next;
459
460 e = (struct ip6t_entry *)
461 (entry0 + pos);
462 } while (oldpos == pos + e->next_offset);
463
464 /* Move along one */
465 size = e->next_offset;
466 e = (struct ip6t_entry *)
467 (entry0 + pos + size);
468 if (pos + size >= newinfo->size)
469 return 0;
470 e->counters.pcnt = pos;
471 pos += size;
472 } else {
473 int newpos = t->verdict;
474
475 if (strcmp(t->target.u.user.name,
476 XT_STANDARD_TARGET) == 0 &&
477 newpos >= 0) {
478 /* This a jump; chase it. */
479 if (!xt_find_jump_offset(offsets, newpos,
480 newinfo->number))
481 return 0;
482 e = (struct ip6t_entry *)
483 (entry0 + newpos);
484 } else {
485 /* ... this is a fallthru */
486 newpos = pos + e->next_offset;
487 if (newpos >= newinfo->size)
488 return 0;
489 }
490 e = (struct ip6t_entry *)
491 (entry0 + newpos);
492 e->counters.pcnt = pos;
493 pos = newpos;
494 }
495 }
496 next: ;
497 }
498 return 1;
499 }
500
501 static void cleanup_match(struct xt_entry_match *m, struct net *net)
502 {
503 struct xt_mtdtor_param par;
504
505 par.net = net;
506 par.match = m->u.kernel.match;
507 par.matchinfo = m->data;
508 par.family = NFPROTO_IPV6;
509 if (par.match->destroy != NULL)
510 par.match->destroy(&par);
511 module_put(par.match->me);
512 }
513
514 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
515 {
516 const struct ip6t_ip6 *ipv6 = par->entryinfo;
517
518 par->match = m->u.kernel.match;
519 par->matchinfo = m->data;
520
521 return xt_check_match(par, m->u.match_size - sizeof(*m),
522 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
523 }
524
525 static int
526 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
527 {
528 struct xt_match *match;
529 int ret;
530
531 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
532 m->u.user.revision);
533 if (IS_ERR(match))
534 return PTR_ERR(match);
535
536 m->u.kernel.match = match;
537
538 ret = check_match(m, par);
539 if (ret)
540 goto err;
541
542 return 0;
543 err:
544 module_put(m->u.kernel.match->me);
545 return ret;
546 }
547
548 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
549 {
550 struct xt_entry_target *t = ip6t_get_target(e);
551 struct xt_tgchk_param par = {
552 .net = net,
553 .table = name,
554 .entryinfo = e,
555 .target = t->u.kernel.target,
556 .targinfo = t->data,
557 .hook_mask = e->comefrom,
558 .family = NFPROTO_IPV6,
559 };
560
561 t = ip6t_get_target(e);
562 return xt_check_target(&par, t->u.target_size - sizeof(*t),
563 e->ipv6.proto,
564 e->ipv6.invflags & IP6T_INV_PROTO);
565 }
566
567 static int
568 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
569 unsigned int size)
570 {
571 struct xt_entry_target *t;
572 struct xt_target *target;
573 int ret;
574 unsigned int j;
575 struct xt_mtchk_param mtpar;
576 struct xt_entry_match *ematch;
577 unsigned long pcnt;
578
579 pcnt = xt_percpu_counter_alloc();
580 if (IS_ERR_VALUE(pcnt))
581 return -ENOMEM;
582 e->counters.pcnt = pcnt;
583
584 j = 0;
585 mtpar.net = net;
586 mtpar.table = name;
587 mtpar.entryinfo = &e->ipv6;
588 mtpar.hook_mask = e->comefrom;
589 mtpar.family = NFPROTO_IPV6;
590 xt_ematch_foreach(ematch, e) {
591 ret = find_check_match(ematch, &mtpar);
592 if (ret != 0)
593 goto cleanup_matches;
594 ++j;
595 }
596
597 t = ip6t_get_target(e);
598 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
599 t->u.user.revision);
600 if (IS_ERR(target)) {
601 ret = PTR_ERR(target);
602 goto cleanup_matches;
603 }
604 t->u.kernel.target = target;
605
606 ret = check_target(e, net, name);
607 if (ret)
608 goto err;
609 return 0;
610 err:
611 module_put(t->u.kernel.target->me);
612 cleanup_matches:
613 xt_ematch_foreach(ematch, e) {
614 if (j-- == 0)
615 break;
616 cleanup_match(ematch, net);
617 }
618
619 xt_percpu_counter_free(e->counters.pcnt);
620
621 return ret;
622 }
623
624 static bool check_underflow(const struct ip6t_entry *e)
625 {
626 const struct xt_entry_target *t;
627 unsigned int verdict;
628
629 if (!unconditional(e))
630 return false;
631 t = ip6t_get_target_c(e);
632 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
633 return false;
634 verdict = ((struct xt_standard_target *)t)->verdict;
635 verdict = -verdict - 1;
636 return verdict == NF_DROP || verdict == NF_ACCEPT;
637 }
638
639 static int
640 check_entry_size_and_hooks(struct ip6t_entry *e,
641 struct xt_table_info *newinfo,
642 const unsigned char *base,
643 const unsigned char *limit,
644 const unsigned int *hook_entries,
645 const unsigned int *underflows,
646 unsigned int valid_hooks)
647 {
648 unsigned int h;
649 int err;
650
651 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
652 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
653 (unsigned char *)e + e->next_offset > limit)
654 return -EINVAL;
655
656 if (e->next_offset
657 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
658 return -EINVAL;
659
660 if (!ip6_checkentry(&e->ipv6))
661 return -EINVAL;
662
663 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
664 e->next_offset);
665 if (err)
666 return err;
667
668 /* Check hooks & underflows */
669 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
670 if (!(valid_hooks & (1 << h)))
671 continue;
672 if ((unsigned char *)e - base == hook_entries[h])
673 newinfo->hook_entry[h] = hook_entries[h];
674 if ((unsigned char *)e - base == underflows[h]) {
675 if (!check_underflow(e))
676 return -EINVAL;
677
678 newinfo->underflow[h] = underflows[h];
679 }
680 }
681
682 /* Clear counters and comefrom */
683 e->counters = ((struct xt_counters) { 0, 0 });
684 e->comefrom = 0;
685 return 0;
686 }
687
688 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
689 {
690 struct xt_tgdtor_param par;
691 struct xt_entry_target *t;
692 struct xt_entry_match *ematch;
693
694 /* Cleanup all matches */
695 xt_ematch_foreach(ematch, e)
696 cleanup_match(ematch, net);
697 t = ip6t_get_target(e);
698
699 par.net = net;
700 par.target = t->u.kernel.target;
701 par.targinfo = t->data;
702 par.family = NFPROTO_IPV6;
703 if (par.target->destroy != NULL)
704 par.target->destroy(&par);
705 module_put(par.target->me);
706
707 xt_percpu_counter_free(e->counters.pcnt);
708 }
709
710 /* Checks and translates the user-supplied table segment (held in
711 newinfo) */
712 static int
713 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
714 const struct ip6t_replace *repl)
715 {
716 struct ip6t_entry *iter;
717 unsigned int *offsets;
718 unsigned int i;
719 int ret = 0;
720
721 newinfo->size = repl->size;
722 newinfo->number = repl->num_entries;
723
724 /* Init all hooks to impossible value. */
725 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
726 newinfo->hook_entry[i] = 0xFFFFFFFF;
727 newinfo->underflow[i] = 0xFFFFFFFF;
728 }
729
730 offsets = xt_alloc_entry_offsets(newinfo->number);
731 if (!offsets)
732 return -ENOMEM;
733 i = 0;
734 /* Walk through entries, checking offsets. */
735 xt_entry_foreach(iter, entry0, newinfo->size) {
736 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
737 entry0 + repl->size,
738 repl->hook_entry,
739 repl->underflow,
740 repl->valid_hooks);
741 if (ret != 0)
742 goto out_free;
743 if (i < repl->num_entries)
744 offsets[i] = (void *)iter - entry0;
745 ++i;
746 if (strcmp(ip6t_get_target(iter)->u.user.name,
747 XT_ERROR_TARGET) == 0)
748 ++newinfo->stacksize;
749 }
750
751 ret = -EINVAL;
752 if (i != repl->num_entries)
753 goto out_free;
754
755 /* Check hooks all assigned */
756 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
757 /* Only hooks which are valid */
758 if (!(repl->valid_hooks & (1 << i)))
759 continue;
760 if (newinfo->hook_entry[i] == 0xFFFFFFFF)
761 goto out_free;
762 if (newinfo->underflow[i] == 0xFFFFFFFF)
763 goto out_free;
764 }
765
766 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
767 ret = -ELOOP;
768 goto out_free;
769 }
770 kvfree(offsets);
771
772 /* Finally, each sanity check must pass */
773 i = 0;
774 xt_entry_foreach(iter, entry0, newinfo->size) {
775 ret = find_check_entry(iter, net, repl->name, repl->size);
776 if (ret != 0)
777 break;
778 ++i;
779 }
780
781 if (ret != 0) {
782 xt_entry_foreach(iter, entry0, newinfo->size) {
783 if (i-- == 0)
784 break;
785 cleanup_entry(iter, net);
786 }
787 return ret;
788 }
789
790 return ret;
791 out_free:
792 kvfree(offsets);
793 return ret;
794 }
795
796 static void
797 get_counters(const struct xt_table_info *t,
798 struct xt_counters counters[])
799 {
800 struct ip6t_entry *iter;
801 unsigned int cpu;
802 unsigned int i;
803
804 for_each_possible_cpu(cpu) {
805 seqcount_t *s = &per_cpu(xt_recseq, cpu);
806
807 i = 0;
808 xt_entry_foreach(iter, t->entries, t->size) {
809 struct xt_counters *tmp;
810 u64 bcnt, pcnt;
811 unsigned int start;
812
813 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
814 do {
815 start = read_seqcount_begin(s);
816 bcnt = tmp->bcnt;
817 pcnt = tmp->pcnt;
818 } while (read_seqcount_retry(s, start));
819
820 ADD_COUNTER(counters[i], bcnt, pcnt);
821 ++i;
822 }
823 }
824 }
825
826 static struct xt_counters *alloc_counters(const struct xt_table *table)
827 {
828 unsigned int countersize;
829 struct xt_counters *counters;
830 const struct xt_table_info *private = table->private;
831
832 /* We need atomic snapshot of counters: rest doesn't change
833 (other than comefrom, which userspace doesn't care
834 about). */
835 countersize = sizeof(struct xt_counters) * private->number;
836 counters = vzalloc(countersize);
837
838 if (counters == NULL)
839 return ERR_PTR(-ENOMEM);
840
841 get_counters(private, counters);
842
843 return counters;
844 }
845
846 static int
847 copy_entries_to_user(unsigned int total_size,
848 const struct xt_table *table,
849 void __user *userptr)
850 {
851 unsigned int off, num;
852 const struct ip6t_entry *e;
853 struct xt_counters *counters;
854 const struct xt_table_info *private = table->private;
855 int ret = 0;
856 const void *loc_cpu_entry;
857
858 counters = alloc_counters(table);
859 if (IS_ERR(counters))
860 return PTR_ERR(counters);
861
862 loc_cpu_entry = private->entries;
863 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
864 ret = -EFAULT;
865 goto free_counters;
866 }
867
868 /* FIXME: use iterator macros --RR */
869 /* ... then go back and fix counters and names */
870 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
871 unsigned int i;
872 const struct xt_entry_match *m;
873 const struct xt_entry_target *t;
874
875 e = (struct ip6t_entry *)(loc_cpu_entry + off);
876 if (copy_to_user(userptr + off
877 + offsetof(struct ip6t_entry, counters),
878 &counters[num],
879 sizeof(counters[num])) != 0) {
880 ret = -EFAULT;
881 goto free_counters;
882 }
883
884 for (i = sizeof(struct ip6t_entry);
885 i < e->target_offset;
886 i += m->u.match_size) {
887 m = (void *)e + i;
888
889 if (copy_to_user(userptr + off + i
890 + offsetof(struct xt_entry_match,
891 u.user.name),
892 m->u.kernel.match->name,
893 strlen(m->u.kernel.match->name)+1)
894 != 0) {
895 ret = -EFAULT;
896 goto free_counters;
897 }
898 }
899
900 t = ip6t_get_target_c(e);
901 if (copy_to_user(userptr + off + e->target_offset
902 + offsetof(struct xt_entry_target,
903 u.user.name),
904 t->u.kernel.target->name,
905 strlen(t->u.kernel.target->name)+1) != 0) {
906 ret = -EFAULT;
907 goto free_counters;
908 }
909 }
910
911 free_counters:
912 vfree(counters);
913 return ret;
914 }
915
916 #ifdef CONFIG_COMPAT
917 static void compat_standard_from_user(void *dst, const void *src)
918 {
919 int v = *(compat_int_t *)src;
920
921 if (v > 0)
922 v += xt_compat_calc_jump(AF_INET6, v);
923 memcpy(dst, &v, sizeof(v));
924 }
925
926 static int compat_standard_to_user(void __user *dst, const void *src)
927 {
928 compat_int_t cv = *(int *)src;
929
930 if (cv > 0)
931 cv -= xt_compat_calc_jump(AF_INET6, cv);
932 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
933 }
934
935 static int compat_calc_entry(const struct ip6t_entry *e,
936 const struct xt_table_info *info,
937 const void *base, struct xt_table_info *newinfo)
938 {
939 const struct xt_entry_match *ematch;
940 const struct xt_entry_target *t;
941 unsigned int entry_offset;
942 int off, i, ret;
943
944 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
945 entry_offset = (void *)e - base;
946 xt_ematch_foreach(ematch, e)
947 off += xt_compat_match_offset(ematch->u.kernel.match);
948 t = ip6t_get_target_c(e);
949 off += xt_compat_target_offset(t->u.kernel.target);
950 newinfo->size -= off;
951 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
952 if (ret)
953 return ret;
954
955 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
956 if (info->hook_entry[i] &&
957 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
958 newinfo->hook_entry[i] -= off;
959 if (info->underflow[i] &&
960 (e < (struct ip6t_entry *)(base + info->underflow[i])))
961 newinfo->underflow[i] -= off;
962 }
963 return 0;
964 }
965
966 static int compat_table_info(const struct xt_table_info *info,
967 struct xt_table_info *newinfo)
968 {
969 struct ip6t_entry *iter;
970 const void *loc_cpu_entry;
971 int ret;
972
973 if (!newinfo || !info)
974 return -EINVAL;
975
976 /* we dont care about newinfo->entries */
977 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
978 newinfo->initial_entries = 0;
979 loc_cpu_entry = info->entries;
980 xt_compat_init_offsets(AF_INET6, info->number);
981 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
982 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
983 if (ret != 0)
984 return ret;
985 }
986 return 0;
987 }
988 #endif
989
990 static int get_info(struct net *net, void __user *user,
991 const int *len, int compat)
992 {
993 char name[XT_TABLE_MAXNAMELEN];
994 struct xt_table *t;
995 int ret;
996
997 if (*len != sizeof(struct ip6t_getinfo))
998 return -EINVAL;
999
1000 if (copy_from_user(name, user, sizeof(name)) != 0)
1001 return -EFAULT;
1002
1003 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1004 #ifdef CONFIG_COMPAT
1005 if (compat)
1006 xt_compat_lock(AF_INET6);
1007 #endif
1008 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1009 "ip6table_%s", name);
1010 if (!IS_ERR_OR_NULL(t)) {
1011 struct ip6t_getinfo info;
1012 const struct xt_table_info *private = t->private;
1013 #ifdef CONFIG_COMPAT
1014 struct xt_table_info tmp;
1015
1016 if (compat) {
1017 ret = compat_table_info(private, &tmp);
1018 xt_compat_flush_offsets(AF_INET6);
1019 private = &tmp;
1020 }
1021 #endif
1022 memset(&info, 0, sizeof(info));
1023 info.valid_hooks = t->valid_hooks;
1024 memcpy(info.hook_entry, private->hook_entry,
1025 sizeof(info.hook_entry));
1026 memcpy(info.underflow, private->underflow,
1027 sizeof(info.underflow));
1028 info.num_entries = private->number;
1029 info.size = private->size;
1030 strcpy(info.name, name);
1031
1032 if (copy_to_user(user, &info, *len) != 0)
1033 ret = -EFAULT;
1034 else
1035 ret = 0;
1036
1037 xt_table_unlock(t);
1038 module_put(t->me);
1039 } else
1040 ret = t ? PTR_ERR(t) : -ENOENT;
1041 #ifdef CONFIG_COMPAT
1042 if (compat)
1043 xt_compat_unlock(AF_INET6);
1044 #endif
1045 return ret;
1046 }
1047
1048 static int
1049 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1050 const int *len)
1051 {
1052 int ret;
1053 struct ip6t_get_entries get;
1054 struct xt_table *t;
1055
1056 if (*len < sizeof(get))
1057 return -EINVAL;
1058 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1059 return -EFAULT;
1060 if (*len != sizeof(struct ip6t_get_entries) + get.size)
1061 return -EINVAL;
1062
1063 get.name[sizeof(get.name) - 1] = '\0';
1064
1065 t = xt_find_table_lock(net, AF_INET6, get.name);
1066 if (!IS_ERR_OR_NULL(t)) {
1067 struct xt_table_info *private = t->private;
1068 if (get.size == private->size)
1069 ret = copy_entries_to_user(private->size,
1070 t, uptr->entrytable);
1071 else
1072 ret = -EAGAIN;
1073
1074 module_put(t->me);
1075 xt_table_unlock(t);
1076 } else
1077 ret = t ? PTR_ERR(t) : -ENOENT;
1078
1079 return ret;
1080 }
1081
1082 static int
1083 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1084 struct xt_table_info *newinfo, unsigned int num_counters,
1085 void __user *counters_ptr)
1086 {
1087 int ret;
1088 struct xt_table *t;
1089 struct xt_table_info *oldinfo;
1090 struct xt_counters *counters;
1091 struct ip6t_entry *iter;
1092
1093 ret = 0;
1094 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1095 if (!counters) {
1096 ret = -ENOMEM;
1097 goto out;
1098 }
1099
1100 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1101 "ip6table_%s", name);
1102 if (IS_ERR_OR_NULL(t)) {
1103 ret = t ? PTR_ERR(t) : -ENOENT;
1104 goto free_newinfo_counters_untrans;
1105 }
1106
1107 /* You lied! */
1108 if (valid_hooks != t->valid_hooks) {
1109 ret = -EINVAL;
1110 goto put_module;
1111 }
1112
1113 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1114 if (!oldinfo)
1115 goto put_module;
1116
1117 /* Update module usage count based on number of rules */
1118 if ((oldinfo->number > oldinfo->initial_entries) ||
1119 (newinfo->number <= oldinfo->initial_entries))
1120 module_put(t->me);
1121 if ((oldinfo->number > oldinfo->initial_entries) &&
1122 (newinfo->number <= oldinfo->initial_entries))
1123 module_put(t->me);
1124
1125 /* Get the old counters, and synchronize with replace */
1126 get_counters(oldinfo, counters);
1127
1128 /* Decrease module usage counts and free resource */
1129 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1130 cleanup_entry(iter, net);
1131
1132 xt_free_table_info(oldinfo);
1133 if (copy_to_user(counters_ptr, counters,
1134 sizeof(struct xt_counters) * num_counters) != 0) {
1135 /* Silent error, can't fail, new table is already in place */
1136 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1137 }
1138 vfree(counters);
1139 xt_table_unlock(t);
1140 return ret;
1141
1142 put_module:
1143 module_put(t->me);
1144 xt_table_unlock(t);
1145 free_newinfo_counters_untrans:
1146 vfree(counters);
1147 out:
1148 return ret;
1149 }
1150
1151 static int
1152 do_replace(struct net *net, const void __user *user, unsigned int len)
1153 {
1154 int ret;
1155 struct ip6t_replace tmp;
1156 struct xt_table_info *newinfo;
1157 void *loc_cpu_entry;
1158 struct ip6t_entry *iter;
1159
1160 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1161 return -EFAULT;
1162
1163 /* overflow check */
1164 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1165 return -ENOMEM;
1166 if (tmp.num_counters == 0)
1167 return -EINVAL;
1168
1169 tmp.name[sizeof(tmp.name)-1] = 0;
1170
1171 newinfo = xt_alloc_table_info(tmp.size);
1172 if (!newinfo)
1173 return -ENOMEM;
1174
1175 loc_cpu_entry = newinfo->entries;
1176 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1177 tmp.size) != 0) {
1178 ret = -EFAULT;
1179 goto free_newinfo;
1180 }
1181
1182 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1183 if (ret != 0)
1184 goto free_newinfo;
1185
1186 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1187 tmp.num_counters, tmp.counters);
1188 if (ret)
1189 goto free_newinfo_untrans;
1190 return 0;
1191
1192 free_newinfo_untrans:
1193 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1194 cleanup_entry(iter, net);
1195 free_newinfo:
1196 xt_free_table_info(newinfo);
1197 return ret;
1198 }
1199
1200 static int
1201 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1202 int compat)
1203 {
1204 unsigned int i;
1205 struct xt_counters_info tmp;
1206 struct xt_counters *paddc;
1207 struct xt_table *t;
1208 const struct xt_table_info *private;
1209 int ret = 0;
1210 struct ip6t_entry *iter;
1211 unsigned int addend;
1212
1213 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1214 if (IS_ERR(paddc))
1215 return PTR_ERR(paddc);
1216 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1217 if (IS_ERR_OR_NULL(t)) {
1218 ret = t ? PTR_ERR(t) : -ENOENT;
1219 goto free;
1220 }
1221
1222 local_bh_disable();
1223 private = t->private;
1224 if (private->number != tmp.num_counters) {
1225 ret = -EINVAL;
1226 goto unlock_up_free;
1227 }
1228
1229 i = 0;
1230 addend = xt_write_recseq_begin();
1231 xt_entry_foreach(iter, private->entries, private->size) {
1232 struct xt_counters *tmp;
1233
1234 tmp = xt_get_this_cpu_counter(&iter->counters);
1235 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1236 ++i;
1237 }
1238 xt_write_recseq_end(addend);
1239 unlock_up_free:
1240 local_bh_enable();
1241 xt_table_unlock(t);
1242 module_put(t->me);
1243 free:
1244 vfree(paddc);
1245
1246 return ret;
1247 }
1248
1249 #ifdef CONFIG_COMPAT
1250 struct compat_ip6t_replace {
1251 char name[XT_TABLE_MAXNAMELEN];
1252 u32 valid_hooks;
1253 u32 num_entries;
1254 u32 size;
1255 u32 hook_entry[NF_INET_NUMHOOKS];
1256 u32 underflow[NF_INET_NUMHOOKS];
1257 u32 num_counters;
1258 compat_uptr_t counters; /* struct xt_counters * */
1259 struct compat_ip6t_entry entries[0];
1260 };
1261
1262 static int
1263 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1264 unsigned int *size, struct xt_counters *counters,
1265 unsigned int i)
1266 {
1267 struct xt_entry_target *t;
1268 struct compat_ip6t_entry __user *ce;
1269 u_int16_t target_offset, next_offset;
1270 compat_uint_t origsize;
1271 const struct xt_entry_match *ematch;
1272 int ret = 0;
1273
1274 origsize = *size;
1275 ce = (struct compat_ip6t_entry __user *)*dstptr;
1276 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1277 copy_to_user(&ce->counters, &counters[i],
1278 sizeof(counters[i])) != 0)
1279 return -EFAULT;
1280
1281 *dstptr += sizeof(struct compat_ip6t_entry);
1282 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1283
1284 xt_ematch_foreach(ematch, e) {
1285 ret = xt_compat_match_to_user(ematch, dstptr, size);
1286 if (ret != 0)
1287 return ret;
1288 }
1289 target_offset = e->target_offset - (origsize - *size);
1290 t = ip6t_get_target(e);
1291 ret = xt_compat_target_to_user(t, dstptr, size);
1292 if (ret)
1293 return ret;
1294 next_offset = e->next_offset - (origsize - *size);
1295 if (put_user(target_offset, &ce->target_offset) != 0 ||
1296 put_user(next_offset, &ce->next_offset) != 0)
1297 return -EFAULT;
1298 return 0;
1299 }
1300
1301 static int
1302 compat_find_calc_match(struct xt_entry_match *m,
1303 const struct ip6t_ip6 *ipv6,
1304 int *size)
1305 {
1306 struct xt_match *match;
1307
1308 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1309 m->u.user.revision);
1310 if (IS_ERR(match))
1311 return PTR_ERR(match);
1312
1313 m->u.kernel.match = match;
1314 *size += xt_compat_match_offset(match);
1315 return 0;
1316 }
1317
1318 static void compat_release_entry(struct compat_ip6t_entry *e)
1319 {
1320 struct xt_entry_target *t;
1321 struct xt_entry_match *ematch;
1322
1323 /* Cleanup all matches */
1324 xt_ematch_foreach(ematch, e)
1325 module_put(ematch->u.kernel.match->me);
1326 t = compat_ip6t_get_target(e);
1327 module_put(t->u.kernel.target->me);
1328 }
1329
1330 static int
1331 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1332 struct xt_table_info *newinfo,
1333 unsigned int *size,
1334 const unsigned char *base,
1335 const unsigned char *limit)
1336 {
1337 struct xt_entry_match *ematch;
1338 struct xt_entry_target *t;
1339 struct xt_target *target;
1340 unsigned int entry_offset;
1341 unsigned int j;
1342 int ret, off;
1343
1344 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1345 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1346 (unsigned char *)e + e->next_offset > limit)
1347 return -EINVAL;
1348
1349 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1350 sizeof(struct compat_xt_entry_target))
1351 return -EINVAL;
1352
1353 if (!ip6_checkentry(&e->ipv6))
1354 return -EINVAL;
1355
1356 ret = xt_compat_check_entry_offsets(e, e->elems,
1357 e->target_offset, e->next_offset);
1358 if (ret)
1359 return ret;
1360
1361 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1362 entry_offset = (void *)e - (void *)base;
1363 j = 0;
1364 xt_ematch_foreach(ematch, e) {
1365 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1366 if (ret != 0)
1367 goto release_matches;
1368 ++j;
1369 }
1370
1371 t = compat_ip6t_get_target(e);
1372 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1373 t->u.user.revision);
1374 if (IS_ERR(target)) {
1375 ret = PTR_ERR(target);
1376 goto release_matches;
1377 }
1378 t->u.kernel.target = target;
1379
1380 off += xt_compat_target_offset(target);
1381 *size += off;
1382 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1383 if (ret)
1384 goto out;
1385
1386 return 0;
1387
1388 out:
1389 module_put(t->u.kernel.target->me);
1390 release_matches:
1391 xt_ematch_foreach(ematch, e) {
1392 if (j-- == 0)
1393 break;
1394 module_put(ematch->u.kernel.match->me);
1395 }
1396 return ret;
1397 }
1398
1399 static void
1400 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1401 unsigned int *size,
1402 struct xt_table_info *newinfo, unsigned char *base)
1403 {
1404 struct xt_entry_target *t;
1405 struct ip6t_entry *de;
1406 unsigned int origsize;
1407 int h;
1408 struct xt_entry_match *ematch;
1409
1410 origsize = *size;
1411 de = (struct ip6t_entry *)*dstptr;
1412 memcpy(de, e, sizeof(struct ip6t_entry));
1413 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1414
1415 *dstptr += sizeof(struct ip6t_entry);
1416 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1417
1418 xt_ematch_foreach(ematch, e)
1419 xt_compat_match_from_user(ematch, dstptr, size);
1420
1421 de->target_offset = e->target_offset - (origsize - *size);
1422 t = compat_ip6t_get_target(e);
1423 xt_compat_target_from_user(t, dstptr, size);
1424
1425 de->next_offset = e->next_offset - (origsize - *size);
1426 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1427 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1428 newinfo->hook_entry[h] -= origsize - *size;
1429 if ((unsigned char *)de - base < newinfo->underflow[h])
1430 newinfo->underflow[h] -= origsize - *size;
1431 }
1432 }
1433
1434 static int
1435 translate_compat_table(struct net *net,
1436 struct xt_table_info **pinfo,
1437 void **pentry0,
1438 const struct compat_ip6t_replace *compatr)
1439 {
1440 unsigned int i, j;
1441 struct xt_table_info *newinfo, *info;
1442 void *pos, *entry0, *entry1;
1443 struct compat_ip6t_entry *iter0;
1444 struct ip6t_replace repl;
1445 unsigned int size;
1446 int ret = 0;
1447
1448 info = *pinfo;
1449 entry0 = *pentry0;
1450 size = compatr->size;
1451 info->number = compatr->num_entries;
1452
1453 j = 0;
1454 xt_compat_lock(AF_INET6);
1455 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1456 /* Walk through entries, checking offsets. */
1457 xt_entry_foreach(iter0, entry0, compatr->size) {
1458 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1459 entry0,
1460 entry0 + compatr->size);
1461 if (ret != 0)
1462 goto out_unlock;
1463 ++j;
1464 }
1465
1466 ret = -EINVAL;
1467 if (j != compatr->num_entries)
1468 goto out_unlock;
1469
1470 ret = -ENOMEM;
1471 newinfo = xt_alloc_table_info(size);
1472 if (!newinfo)
1473 goto out_unlock;
1474
1475 newinfo->number = compatr->num_entries;
1476 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1477 newinfo->hook_entry[i] = compatr->hook_entry[i];
1478 newinfo->underflow[i] = compatr->underflow[i];
1479 }
1480 entry1 = newinfo->entries;
1481 pos = entry1;
1482 size = compatr->size;
1483 xt_entry_foreach(iter0, entry0, compatr->size)
1484 compat_copy_entry_from_user(iter0, &pos, &size,
1485 newinfo, entry1);
1486
1487 /* all module references in entry0 are now gone. */
1488 xt_compat_flush_offsets(AF_INET6);
1489 xt_compat_unlock(AF_INET6);
1490
1491 memcpy(&repl, compatr, sizeof(*compatr));
1492
1493 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1494 repl.hook_entry[i] = newinfo->hook_entry[i];
1495 repl.underflow[i] = newinfo->underflow[i];
1496 }
1497
1498 repl.num_counters = 0;
1499 repl.counters = NULL;
1500 repl.size = newinfo->size;
1501 ret = translate_table(net, newinfo, entry1, &repl);
1502 if (ret)
1503 goto free_newinfo;
1504
1505 *pinfo = newinfo;
1506 *pentry0 = entry1;
1507 xt_free_table_info(info);
1508 return 0;
1509
1510 free_newinfo:
1511 xt_free_table_info(newinfo);
1512 return ret;
1513 out_unlock:
1514 xt_compat_flush_offsets(AF_INET6);
1515 xt_compat_unlock(AF_INET6);
1516 xt_entry_foreach(iter0, entry0, compatr->size) {
1517 if (j-- == 0)
1518 break;
1519 compat_release_entry(iter0);
1520 }
1521 return ret;
1522 }
1523
1524 static int
1525 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1526 {
1527 int ret;
1528 struct compat_ip6t_replace tmp;
1529 struct xt_table_info *newinfo;
1530 void *loc_cpu_entry;
1531 struct ip6t_entry *iter;
1532
1533 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1534 return -EFAULT;
1535
1536 /* overflow check */
1537 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1538 return -ENOMEM;
1539 if (tmp.num_counters == 0)
1540 return -EINVAL;
1541
1542 tmp.name[sizeof(tmp.name)-1] = 0;
1543
1544 newinfo = xt_alloc_table_info(tmp.size);
1545 if (!newinfo)
1546 return -ENOMEM;
1547
1548 loc_cpu_entry = newinfo->entries;
1549 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1550 tmp.size) != 0) {
1551 ret = -EFAULT;
1552 goto free_newinfo;
1553 }
1554
1555 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1556 if (ret != 0)
1557 goto free_newinfo;
1558
1559 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1560 tmp.num_counters, compat_ptr(tmp.counters));
1561 if (ret)
1562 goto free_newinfo_untrans;
1563 return 0;
1564
1565 free_newinfo_untrans:
1566 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1567 cleanup_entry(iter, net);
1568 free_newinfo:
1569 xt_free_table_info(newinfo);
1570 return ret;
1571 }
1572
1573 static int
1574 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1575 unsigned int len)
1576 {
1577 int ret;
1578
1579 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1580 return -EPERM;
1581
1582 switch (cmd) {
1583 case IP6T_SO_SET_REPLACE:
1584 ret = compat_do_replace(sock_net(sk), user, len);
1585 break;
1586
1587 case IP6T_SO_SET_ADD_COUNTERS:
1588 ret = do_add_counters(sock_net(sk), user, len, 1);
1589 break;
1590
1591 default:
1592 ret = -EINVAL;
1593 }
1594
1595 return ret;
1596 }
1597
1598 struct compat_ip6t_get_entries {
1599 char name[XT_TABLE_MAXNAMELEN];
1600 compat_uint_t size;
1601 struct compat_ip6t_entry entrytable[0];
1602 };
1603
1604 static int
1605 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1606 void __user *userptr)
1607 {
1608 struct xt_counters *counters;
1609 const struct xt_table_info *private = table->private;
1610 void __user *pos;
1611 unsigned int size;
1612 int ret = 0;
1613 unsigned int i = 0;
1614 struct ip6t_entry *iter;
1615
1616 counters = alloc_counters(table);
1617 if (IS_ERR(counters))
1618 return PTR_ERR(counters);
1619
1620 pos = userptr;
1621 size = total_size;
1622 xt_entry_foreach(iter, private->entries, total_size) {
1623 ret = compat_copy_entry_to_user(iter, &pos,
1624 &size, counters, i++);
1625 if (ret != 0)
1626 break;
1627 }
1628
1629 vfree(counters);
1630 return ret;
1631 }
1632
1633 static int
1634 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1635 int *len)
1636 {
1637 int ret;
1638 struct compat_ip6t_get_entries get;
1639 struct xt_table *t;
1640
1641 if (*len < sizeof(get))
1642 return -EINVAL;
1643
1644 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1645 return -EFAULT;
1646
1647 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
1648 return -EINVAL;
1649
1650 get.name[sizeof(get.name) - 1] = '\0';
1651
1652 xt_compat_lock(AF_INET6);
1653 t = xt_find_table_lock(net, AF_INET6, get.name);
1654 if (!IS_ERR_OR_NULL(t)) {
1655 const struct xt_table_info *private = t->private;
1656 struct xt_table_info info;
1657 ret = compat_table_info(private, &info);
1658 if (!ret && get.size == info.size)
1659 ret = compat_copy_entries_to_user(private->size,
1660 t, uptr->entrytable);
1661 else if (!ret)
1662 ret = -EAGAIN;
1663
1664 xt_compat_flush_offsets(AF_INET6);
1665 module_put(t->me);
1666 xt_table_unlock(t);
1667 } else
1668 ret = t ? PTR_ERR(t) : -ENOENT;
1669
1670 xt_compat_unlock(AF_INET6);
1671 return ret;
1672 }
1673
1674 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1675
1676 static int
1677 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1678 {
1679 int ret;
1680
1681 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1682 return -EPERM;
1683
1684 switch (cmd) {
1685 case IP6T_SO_GET_INFO:
1686 ret = get_info(sock_net(sk), user, len, 1);
1687 break;
1688 case IP6T_SO_GET_ENTRIES:
1689 ret = compat_get_entries(sock_net(sk), user, len);
1690 break;
1691 default:
1692 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1693 }
1694 return ret;
1695 }
1696 #endif
1697
1698 static int
1699 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1700 {
1701 int ret;
1702
1703 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1704 return -EPERM;
1705
1706 switch (cmd) {
1707 case IP6T_SO_SET_REPLACE:
1708 ret = do_replace(sock_net(sk), user, len);
1709 break;
1710
1711 case IP6T_SO_SET_ADD_COUNTERS:
1712 ret = do_add_counters(sock_net(sk), user, len, 0);
1713 break;
1714
1715 default:
1716 ret = -EINVAL;
1717 }
1718
1719 return ret;
1720 }
1721
1722 static int
1723 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1724 {
1725 int ret;
1726
1727 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1728 return -EPERM;
1729
1730 switch (cmd) {
1731 case IP6T_SO_GET_INFO:
1732 ret = get_info(sock_net(sk), user, len, 0);
1733 break;
1734
1735 case IP6T_SO_GET_ENTRIES:
1736 ret = get_entries(sock_net(sk), user, len);
1737 break;
1738
1739 case IP6T_SO_GET_REVISION_MATCH:
1740 case IP6T_SO_GET_REVISION_TARGET: {
1741 struct xt_get_revision rev;
1742 int target;
1743
1744 if (*len != sizeof(rev)) {
1745 ret = -EINVAL;
1746 break;
1747 }
1748 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1749 ret = -EFAULT;
1750 break;
1751 }
1752 rev.name[sizeof(rev.name)-1] = 0;
1753
1754 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1755 target = 1;
1756 else
1757 target = 0;
1758
1759 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1760 rev.revision,
1761 target, &ret),
1762 "ip6t_%s", rev.name);
1763 break;
1764 }
1765
1766 default:
1767 ret = -EINVAL;
1768 }
1769
1770 return ret;
1771 }
1772
1773 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
1774 {
1775 struct xt_table_info *private;
1776 void *loc_cpu_entry;
1777 struct module *table_owner = table->me;
1778 struct ip6t_entry *iter;
1779
1780 private = xt_unregister_table(table);
1781
1782 /* Decrease module usage counts and free resources */
1783 loc_cpu_entry = private->entries;
1784 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1785 cleanup_entry(iter, net);
1786 if (private->number > private->initial_entries)
1787 module_put(table_owner);
1788 xt_free_table_info(private);
1789 }
1790
1791 int ip6t_register_table(struct net *net, const struct xt_table *table,
1792 const struct ip6t_replace *repl,
1793 const struct nf_hook_ops *ops,
1794 struct xt_table **res)
1795 {
1796 int ret;
1797 struct xt_table_info *newinfo;
1798 struct xt_table_info bootstrap = {0};
1799 void *loc_cpu_entry;
1800 struct xt_table *new_table;
1801
1802 newinfo = xt_alloc_table_info(repl->size);
1803 if (!newinfo)
1804 return -ENOMEM;
1805
1806 loc_cpu_entry = newinfo->entries;
1807 memcpy(loc_cpu_entry, repl->entries, repl->size);
1808
1809 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1810 if (ret != 0)
1811 goto out_free;
1812
1813 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1814 if (IS_ERR(new_table)) {
1815 ret = PTR_ERR(new_table);
1816 goto out_free;
1817 }
1818
1819 /* set res now, will see skbs right after nf_register_net_hooks */
1820 WRITE_ONCE(*res, new_table);
1821
1822 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1823 if (ret != 0) {
1824 __ip6t_unregister_table(net, new_table);
1825 *res = NULL;
1826 }
1827
1828 return ret;
1829
1830 out_free:
1831 xt_free_table_info(newinfo);
1832 return ret;
1833 }
1834
1835 void ip6t_unregister_table(struct net *net, struct xt_table *table,
1836 const struct nf_hook_ops *ops)
1837 {
1838 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1839 __ip6t_unregister_table(net, table);
1840 }
1841
1842 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1843 static inline bool
1844 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1845 u_int8_t type, u_int8_t code,
1846 bool invert)
1847 {
1848 return (type == test_type && code >= min_code && code <= max_code)
1849 ^ invert;
1850 }
1851
1852 static bool
1853 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1854 {
1855 const struct icmp6hdr *ic;
1856 struct icmp6hdr _icmph;
1857 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1858
1859 /* Must not be a fragment. */
1860 if (par->fragoff != 0)
1861 return false;
1862
1863 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1864 if (ic == NULL) {
1865 /* We've been asked to examine this packet, and we
1866 * can't. Hence, no choice but to drop.
1867 */
1868 par->hotdrop = true;
1869 return false;
1870 }
1871
1872 return icmp6_type_code_match(icmpinfo->type,
1873 icmpinfo->code[0],
1874 icmpinfo->code[1],
1875 ic->icmp6_type, ic->icmp6_code,
1876 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1877 }
1878
1879 /* Called when user tries to insert an entry of this type. */
1880 static int icmp6_checkentry(const struct xt_mtchk_param *par)
1881 {
1882 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1883
1884 /* Must specify no unknown invflags */
1885 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
1886 }
1887
1888 /* The built-in targets: standard (NULL) and error. */
1889 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
1890 {
1891 .name = XT_STANDARD_TARGET,
1892 .targetsize = sizeof(int),
1893 .family = NFPROTO_IPV6,
1894 #ifdef CONFIG_COMPAT
1895 .compatsize = sizeof(compat_int_t),
1896 .compat_from_user = compat_standard_from_user,
1897 .compat_to_user = compat_standard_to_user,
1898 #endif
1899 },
1900 {
1901 .name = XT_ERROR_TARGET,
1902 .target = ip6t_error,
1903 .targetsize = XT_FUNCTION_MAXNAMELEN,
1904 .family = NFPROTO_IPV6,
1905 },
1906 };
1907
1908 static struct nf_sockopt_ops ip6t_sockopts = {
1909 .pf = PF_INET6,
1910 .set_optmin = IP6T_BASE_CTL,
1911 .set_optmax = IP6T_SO_SET_MAX+1,
1912 .set = do_ip6t_set_ctl,
1913 #ifdef CONFIG_COMPAT
1914 .compat_set = compat_do_ip6t_set_ctl,
1915 #endif
1916 .get_optmin = IP6T_BASE_CTL,
1917 .get_optmax = IP6T_SO_GET_MAX+1,
1918 .get = do_ip6t_get_ctl,
1919 #ifdef CONFIG_COMPAT
1920 .compat_get = compat_do_ip6t_get_ctl,
1921 #endif
1922 .owner = THIS_MODULE,
1923 };
1924
1925 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
1926 {
1927 .name = "icmp6",
1928 .match = icmp6_match,
1929 .matchsize = sizeof(struct ip6t_icmp),
1930 .checkentry = icmp6_checkentry,
1931 .proto = IPPROTO_ICMPV6,
1932 .family = NFPROTO_IPV6,
1933 },
1934 };
1935
1936 static int __net_init ip6_tables_net_init(struct net *net)
1937 {
1938 return xt_proto_init(net, NFPROTO_IPV6);
1939 }
1940
1941 static void __net_exit ip6_tables_net_exit(struct net *net)
1942 {
1943 xt_proto_fini(net, NFPROTO_IPV6);
1944 }
1945
1946 static struct pernet_operations ip6_tables_net_ops = {
1947 .init = ip6_tables_net_init,
1948 .exit = ip6_tables_net_exit,
1949 };
1950
1951 static int __init ip6_tables_init(void)
1952 {
1953 int ret;
1954
1955 ret = register_pernet_subsys(&ip6_tables_net_ops);
1956 if (ret < 0)
1957 goto err1;
1958
1959 /* No one else will be downing sem now, so we won't sleep */
1960 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1961 if (ret < 0)
1962 goto err2;
1963 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1964 if (ret < 0)
1965 goto err4;
1966
1967 /* Register setsockopt */
1968 ret = nf_register_sockopt(&ip6t_sockopts);
1969 if (ret < 0)
1970 goto err5;
1971
1972 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1973 return 0;
1974
1975 err5:
1976 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1977 err4:
1978 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1979 err2:
1980 unregister_pernet_subsys(&ip6_tables_net_ops);
1981 err1:
1982 return ret;
1983 }
1984
1985 static void __exit ip6_tables_fini(void)
1986 {
1987 nf_unregister_sockopt(&ip6t_sockopts);
1988
1989 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1990 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1991 unregister_pernet_subsys(&ip6_tables_net_ops);
1992 }
1993
1994 EXPORT_SYMBOL(ip6t_register_table);
1995 EXPORT_SYMBOL(ip6t_unregister_table);
1996 EXPORT_SYMBOL(ip6t_do_table);
1997
1998 module_init(ip6_tables_init);
1999 module_exit(ip6_tables_fini);