]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/netfilter/ip6_tables.c
1e15c54fd5e27dbafaf4d4b0b3de9008a724559f
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <linux/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 #ifdef CONFIG_NETFILTER_DEBUG
43 #define IP_NF_ASSERT(x) WARN_ON(!(x))
44 #else
45 #define IP_NF_ASSERT(x)
46 #endif
47
48 void *ip6t_alloc_initial_table(const struct xt_table *info)
49 {
50 return xt_alloc_initial_table(ip6t, IP6T);
51 }
52 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
53
54 /*
55 We keep a set of rules for each CPU, so we can avoid write-locking
56 them in the softirq when updating the counters and therefore
57 only need to read-lock in the softirq; doing a write_lock_bh() in user
58 context stops packets coming through and allows user context to read
59 the counters or update the rules.
60
61 Hence the start of any table is given by get_table() below. */
62
63 /* Returns whether matches rule or not. */
64 /* Performance critical - called for every packet */
65 static inline bool
66 ip6_packet_match(const struct sk_buff *skb,
67 const char *indev,
68 const char *outdev,
69 const struct ip6t_ip6 *ip6info,
70 unsigned int *protoff,
71 int *fragoff, bool *hotdrop)
72 {
73 unsigned long ret;
74 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
75
76 if (NF_INVF(ip6info, IP6T_INV_SRCIP,
77 ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
78 &ip6info->src)) ||
79 NF_INVF(ip6info, IP6T_INV_DSTIP,
80 ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
81 &ip6info->dst)))
82 return false;
83
84 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
85
86 if (NF_INVF(ip6info, IP6T_INV_VIA_IN, ret != 0))
87 return false;
88
89 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
90
91 if (NF_INVF(ip6info, IP6T_INV_VIA_OUT, ret != 0))
92 return false;
93
94 /* ... might want to do something with class and flowlabel here ... */
95
96 /* look for the desired protocol header */
97 if (ip6info->flags & IP6T_F_PROTO) {
98 int protohdr;
99 unsigned short _frag_off;
100
101 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
102 if (protohdr < 0) {
103 if (_frag_off == 0)
104 *hotdrop = true;
105 return false;
106 }
107 *fragoff = _frag_off;
108
109 if (ip6info->proto == protohdr) {
110 if (ip6info->invflags & IP6T_INV_PROTO)
111 return false;
112
113 return true;
114 }
115
116 /* We need match for the '-p all', too! */
117 if ((ip6info->proto != 0) &&
118 !(ip6info->invflags & IP6T_INV_PROTO))
119 return false;
120 }
121 return true;
122 }
123
124 /* should be ip6 safe */
125 static bool
126 ip6_checkentry(const struct ip6t_ip6 *ipv6)
127 {
128 if (ipv6->flags & ~IP6T_F_MASK)
129 return false;
130 if (ipv6->invflags & ~IP6T_INV_MASK)
131 return false;
132
133 return true;
134 }
135
136 static unsigned int
137 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
138 {
139 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
140
141 return NF_DROP;
142 }
143
144 static inline struct ip6t_entry *
145 get_entry(const void *base, unsigned int offset)
146 {
147 return (struct ip6t_entry *)(base + offset);
148 }
149
150 /* All zeroes == unconditional rule. */
151 /* Mildly perf critical (only if packet tracing is on) */
152 static inline bool unconditional(const struct ip6t_entry *e)
153 {
154 static const struct ip6t_ip6 uncond;
155
156 return e->target_offset == sizeof(struct ip6t_entry) &&
157 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
158 }
159
160 static inline const struct xt_entry_target *
161 ip6t_get_target_c(const struct ip6t_entry *e)
162 {
163 return ip6t_get_target((struct ip6t_entry *)e);
164 }
165
166 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
167 /* This cries for unification! */
168 static const char *const hooknames[] = {
169 [NF_INET_PRE_ROUTING] = "PREROUTING",
170 [NF_INET_LOCAL_IN] = "INPUT",
171 [NF_INET_FORWARD] = "FORWARD",
172 [NF_INET_LOCAL_OUT] = "OUTPUT",
173 [NF_INET_POST_ROUTING] = "POSTROUTING",
174 };
175
176 enum nf_ip_trace_comments {
177 NF_IP6_TRACE_COMMENT_RULE,
178 NF_IP6_TRACE_COMMENT_RETURN,
179 NF_IP6_TRACE_COMMENT_POLICY,
180 };
181
182 static const char *const comments[] = {
183 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
184 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
185 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
186 };
187
188 static struct nf_loginfo trace_loginfo = {
189 .type = NF_LOG_TYPE_LOG,
190 .u = {
191 .log = {
192 .level = LOGLEVEL_WARNING,
193 .logflags = NF_LOG_DEFAULT_MASK,
194 },
195 },
196 };
197
198 /* Mildly perf critical (only if packet tracing is on) */
199 static inline int
200 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
201 const char *hookname, const char **chainname,
202 const char **comment, unsigned int *rulenum)
203 {
204 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
205
206 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
207 /* Head of user chain: ERROR target with chainname */
208 *chainname = t->target.data;
209 (*rulenum) = 0;
210 } else if (s == e) {
211 (*rulenum)++;
212
213 if (unconditional(s) &&
214 strcmp(t->target.u.kernel.target->name,
215 XT_STANDARD_TARGET) == 0 &&
216 t->verdict < 0) {
217 /* Tail of chains: STANDARD target (return/policy) */
218 *comment = *chainname == hookname
219 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
220 : comments[NF_IP6_TRACE_COMMENT_RETURN];
221 }
222 return 1;
223 } else
224 (*rulenum)++;
225
226 return 0;
227 }
228
229 static void trace_packet(struct net *net,
230 const struct sk_buff *skb,
231 unsigned int hook,
232 const struct net_device *in,
233 const struct net_device *out,
234 const char *tablename,
235 const struct xt_table_info *private,
236 const struct ip6t_entry *e)
237 {
238 const struct ip6t_entry *root;
239 const char *hookname, *chainname, *comment;
240 const struct ip6t_entry *iter;
241 unsigned int rulenum = 0;
242
243 root = get_entry(private->entries, private->hook_entry[hook]);
244
245 hookname = chainname = hooknames[hook];
246 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
247
248 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
249 if (get_chainname_rulenum(iter, e, hookname,
250 &chainname, &comment, &rulenum) != 0)
251 break;
252
253 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
254 "TRACE: %s:%s:%s:%u ",
255 tablename, chainname, comment, rulenum);
256 }
257 #endif
258
259 static inline struct ip6t_entry *
260 ip6t_next_entry(const struct ip6t_entry *entry)
261 {
262 return (void *)entry + entry->next_offset;
263 }
264
265 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
266 unsigned int
267 ip6t_do_table(struct sk_buff *skb,
268 const struct nf_hook_state *state,
269 struct xt_table *table)
270 {
271 unsigned int hook = state->hook;
272 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
273 /* Initializing verdict to NF_DROP keeps gcc happy. */
274 unsigned int verdict = NF_DROP;
275 const char *indev, *outdev;
276 const void *table_base;
277 struct ip6t_entry *e, **jumpstack;
278 unsigned int stackidx, cpu;
279 const struct xt_table_info *private;
280 struct xt_action_param acpar;
281 unsigned int addend;
282
283 /* Initialization */
284 stackidx = 0;
285 indev = state->in ? state->in->name : nulldevname;
286 outdev = state->out ? state->out->name : nulldevname;
287 /* We handle fragments by dealing with the first fragment as
288 * if it was a normal packet. All other fragments are treated
289 * normally, except that they will NEVER match rules that ask
290 * things we don't know, ie. tcp syn flag or ports). If the
291 * rule is also a fragment-specific rule, non-fragments won't
292 * match it. */
293 acpar.hotdrop = false;
294 acpar.state = state;
295
296 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
297
298 local_bh_disable();
299 addend = xt_write_recseq_begin();
300 private = table->private;
301 /*
302 * Ensure we load private-> members after we've fetched the base
303 * pointer.
304 */
305 smp_read_barrier_depends();
306 cpu = smp_processor_id();
307 table_base = private->entries;
308 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
309
310 /* Switch to alternate jumpstack if we're being invoked via TEE.
311 * TEE issues XT_CONTINUE verdict on original skb so we must not
312 * clobber the jumpstack.
313 *
314 * For recursion via REJECT or SYNPROXY the stack will be clobbered
315 * but it is no problem since absolute verdict is issued by these.
316 */
317 if (static_key_false(&xt_tee_enabled))
318 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
319
320 e = get_entry(table_base, private->hook_entry[hook]);
321
322 do {
323 const struct xt_entry_target *t;
324 const struct xt_entry_match *ematch;
325 struct xt_counters *counter;
326
327 IP_NF_ASSERT(e);
328 acpar.thoff = 0;
329 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
330 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
331 no_match:
332 e = ip6t_next_entry(e);
333 continue;
334 }
335
336 xt_ematch_foreach(ematch, e) {
337 acpar.match = ematch->u.kernel.match;
338 acpar.matchinfo = ematch->data;
339 if (!acpar.match->match(skb, &acpar))
340 goto no_match;
341 }
342
343 counter = xt_get_this_cpu_counter(&e->counters);
344 ADD_COUNTER(*counter, skb->len, 1);
345
346 t = ip6t_get_target_c(e);
347 IP_NF_ASSERT(t->u.kernel.target);
348
349 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
350 /* The packet is traced: log it */
351 if (unlikely(skb->nf_trace))
352 trace_packet(state->net, skb, hook, state->in,
353 state->out, table->name, private, e);
354 #endif
355 /* Standard target? */
356 if (!t->u.kernel.target->target) {
357 int v;
358
359 v = ((struct xt_standard_target *)t)->verdict;
360 if (v < 0) {
361 /* Pop from stack? */
362 if (v != XT_RETURN) {
363 verdict = (unsigned int)(-v) - 1;
364 break;
365 }
366 if (stackidx == 0)
367 e = get_entry(table_base,
368 private->underflow[hook]);
369 else
370 e = ip6t_next_entry(jumpstack[--stackidx]);
371 continue;
372 }
373 if (table_base + v != ip6t_next_entry(e) &&
374 !(e->ipv6.flags & IP6T_F_GOTO)) {
375 jumpstack[stackidx++] = e;
376 }
377
378 e = get_entry(table_base, v);
379 continue;
380 }
381
382 acpar.target = t->u.kernel.target;
383 acpar.targinfo = t->data;
384
385 verdict = t->u.kernel.target->target(skb, &acpar);
386 if (verdict == XT_CONTINUE)
387 e = ip6t_next_entry(e);
388 else
389 /* Verdict */
390 break;
391 } while (!acpar.hotdrop);
392
393 xt_write_recseq_end(addend);
394 local_bh_enable();
395
396 if (acpar.hotdrop)
397 return NF_DROP;
398 else return verdict;
399 }
400
401 /* Figures out from what hook each rule can be called: returns 0 if
402 there are loops. Puts hook bitmask in comefrom. */
403 static int
404 mark_source_chains(const struct xt_table_info *newinfo,
405 unsigned int valid_hooks, void *entry0,
406 unsigned int *offsets)
407 {
408 unsigned int hook;
409
410 /* No recursion; use packet counter to save back ptrs (reset
411 to 0 as we leave), and comefrom to save source hook bitmask */
412 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
413 unsigned int pos = newinfo->hook_entry[hook];
414 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
415
416 if (!(valid_hooks & (1 << hook)))
417 continue;
418
419 /* Set initial back pointer. */
420 e->counters.pcnt = pos;
421
422 for (;;) {
423 const struct xt_standard_target *t
424 = (void *)ip6t_get_target_c(e);
425 int visited = e->comefrom & (1 << hook);
426
427 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
428 return 0;
429
430 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
431
432 /* Unconditional return/END. */
433 if ((unconditional(e) &&
434 (strcmp(t->target.u.user.name,
435 XT_STANDARD_TARGET) == 0) &&
436 t->verdict < 0) || visited) {
437 unsigned int oldpos, size;
438
439 if ((strcmp(t->target.u.user.name,
440 XT_STANDARD_TARGET) == 0) &&
441 t->verdict < -NF_MAX_VERDICT - 1)
442 return 0;
443
444 /* Return: backtrack through the last
445 big jump. */
446 do {
447 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
448 oldpos = pos;
449 pos = e->counters.pcnt;
450 e->counters.pcnt = 0;
451
452 /* We're at the start. */
453 if (pos == oldpos)
454 goto next;
455
456 e = (struct ip6t_entry *)
457 (entry0 + pos);
458 } while (oldpos == pos + e->next_offset);
459
460 /* Move along one */
461 size = e->next_offset;
462 e = (struct ip6t_entry *)
463 (entry0 + pos + size);
464 if (pos + size >= newinfo->size)
465 return 0;
466 e->counters.pcnt = pos;
467 pos += size;
468 } else {
469 int newpos = t->verdict;
470
471 if (strcmp(t->target.u.user.name,
472 XT_STANDARD_TARGET) == 0 &&
473 newpos >= 0) {
474 /* This a jump; chase it. */
475 if (!xt_find_jump_offset(offsets, newpos,
476 newinfo->number))
477 return 0;
478 e = (struct ip6t_entry *)
479 (entry0 + newpos);
480 } else {
481 /* ... this is a fallthru */
482 newpos = pos + e->next_offset;
483 if (newpos >= newinfo->size)
484 return 0;
485 }
486 e = (struct ip6t_entry *)
487 (entry0 + newpos);
488 e->counters.pcnt = pos;
489 pos = newpos;
490 }
491 }
492 next: ;
493 }
494 return 1;
495 }
496
497 static void cleanup_match(struct xt_entry_match *m, struct net *net)
498 {
499 struct xt_mtdtor_param par;
500
501 par.net = net;
502 par.match = m->u.kernel.match;
503 par.matchinfo = m->data;
504 par.family = NFPROTO_IPV6;
505 if (par.match->destroy != NULL)
506 par.match->destroy(&par);
507 module_put(par.match->me);
508 }
509
510 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
511 {
512 const struct ip6t_ip6 *ipv6 = par->entryinfo;
513
514 par->match = m->u.kernel.match;
515 par->matchinfo = m->data;
516
517 return xt_check_match(par, m->u.match_size - sizeof(*m),
518 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
519 }
520
521 static int
522 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
523 {
524 struct xt_match *match;
525 int ret;
526
527 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
528 m->u.user.revision);
529 if (IS_ERR(match))
530 return PTR_ERR(match);
531
532 m->u.kernel.match = match;
533
534 ret = check_match(m, par);
535 if (ret)
536 goto err;
537
538 return 0;
539 err:
540 module_put(m->u.kernel.match->me);
541 return ret;
542 }
543
544 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
545 {
546 struct xt_entry_target *t = ip6t_get_target(e);
547 struct xt_tgchk_param par = {
548 .net = net,
549 .table = name,
550 .entryinfo = e,
551 .target = t->u.kernel.target,
552 .targinfo = t->data,
553 .hook_mask = e->comefrom,
554 .family = NFPROTO_IPV6,
555 };
556
557 t = ip6t_get_target(e);
558 return xt_check_target(&par, t->u.target_size - sizeof(*t),
559 e->ipv6.proto,
560 e->ipv6.invflags & IP6T_INV_PROTO);
561 }
562
563 static int
564 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
565 unsigned int size,
566 struct xt_percpu_counter_alloc_state *alloc_state)
567 {
568 struct xt_entry_target *t;
569 struct xt_target *target;
570 int ret;
571 unsigned int j;
572 struct xt_mtchk_param mtpar;
573 struct xt_entry_match *ematch;
574
575 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
576 return -ENOMEM;
577
578 j = 0;
579 mtpar.net = net;
580 mtpar.table = name;
581 mtpar.entryinfo = &e->ipv6;
582 mtpar.hook_mask = e->comefrom;
583 mtpar.family = NFPROTO_IPV6;
584 xt_ematch_foreach(ematch, e) {
585 ret = find_check_match(ematch, &mtpar);
586 if (ret != 0)
587 goto cleanup_matches;
588 ++j;
589 }
590
591 t = ip6t_get_target(e);
592 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
593 t->u.user.revision);
594 if (IS_ERR(target)) {
595 ret = PTR_ERR(target);
596 goto cleanup_matches;
597 }
598 t->u.kernel.target = target;
599
600 ret = check_target(e, net, name);
601 if (ret)
602 goto err;
603 return 0;
604 err:
605 module_put(t->u.kernel.target->me);
606 cleanup_matches:
607 xt_ematch_foreach(ematch, e) {
608 if (j-- == 0)
609 break;
610 cleanup_match(ematch, net);
611 }
612
613 xt_percpu_counter_free(&e->counters);
614
615 return ret;
616 }
617
618 static bool check_underflow(const struct ip6t_entry *e)
619 {
620 const struct xt_entry_target *t;
621 unsigned int verdict;
622
623 if (!unconditional(e))
624 return false;
625 t = ip6t_get_target_c(e);
626 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
627 return false;
628 verdict = ((struct xt_standard_target *)t)->verdict;
629 verdict = -verdict - 1;
630 return verdict == NF_DROP || verdict == NF_ACCEPT;
631 }
632
633 static int
634 check_entry_size_and_hooks(struct ip6t_entry *e,
635 struct xt_table_info *newinfo,
636 const unsigned char *base,
637 const unsigned char *limit,
638 const unsigned int *hook_entries,
639 const unsigned int *underflows,
640 unsigned int valid_hooks)
641 {
642 unsigned int h;
643 int err;
644
645 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
646 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
647 (unsigned char *)e + e->next_offset > limit)
648 return -EINVAL;
649
650 if (e->next_offset
651 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
652 return -EINVAL;
653
654 if (!ip6_checkentry(&e->ipv6))
655 return -EINVAL;
656
657 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
658 e->next_offset);
659 if (err)
660 return err;
661
662 /* Check hooks & underflows */
663 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
664 if (!(valid_hooks & (1 << h)))
665 continue;
666 if ((unsigned char *)e - base == hook_entries[h])
667 newinfo->hook_entry[h] = hook_entries[h];
668 if ((unsigned char *)e - base == underflows[h]) {
669 if (!check_underflow(e))
670 return -EINVAL;
671
672 newinfo->underflow[h] = underflows[h];
673 }
674 }
675
676 /* Clear counters and comefrom */
677 e->counters = ((struct xt_counters) { 0, 0 });
678 e->comefrom = 0;
679 return 0;
680 }
681
682 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
683 {
684 struct xt_tgdtor_param par;
685 struct xt_entry_target *t;
686 struct xt_entry_match *ematch;
687
688 /* Cleanup all matches */
689 xt_ematch_foreach(ematch, e)
690 cleanup_match(ematch, net);
691 t = ip6t_get_target(e);
692
693 par.net = net;
694 par.target = t->u.kernel.target;
695 par.targinfo = t->data;
696 par.family = NFPROTO_IPV6;
697 if (par.target->destroy != NULL)
698 par.target->destroy(&par);
699 module_put(par.target->me);
700 xt_percpu_counter_free(&e->counters);
701 }
702
703 /* Checks and translates the user-supplied table segment (held in
704 newinfo) */
705 static int
706 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
707 const struct ip6t_replace *repl)
708 {
709 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
710 struct ip6t_entry *iter;
711 unsigned int *offsets;
712 unsigned int i;
713 int ret = 0;
714
715 newinfo->size = repl->size;
716 newinfo->number = repl->num_entries;
717
718 /* Init all hooks to impossible value. */
719 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
720 newinfo->hook_entry[i] = 0xFFFFFFFF;
721 newinfo->underflow[i] = 0xFFFFFFFF;
722 }
723
724 offsets = xt_alloc_entry_offsets(newinfo->number);
725 if (!offsets)
726 return -ENOMEM;
727 i = 0;
728 /* Walk through entries, checking offsets. */
729 xt_entry_foreach(iter, entry0, newinfo->size) {
730 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
731 entry0 + repl->size,
732 repl->hook_entry,
733 repl->underflow,
734 repl->valid_hooks);
735 if (ret != 0)
736 goto out_free;
737 if (i < repl->num_entries)
738 offsets[i] = (void *)iter - entry0;
739 ++i;
740 if (strcmp(ip6t_get_target(iter)->u.user.name,
741 XT_ERROR_TARGET) == 0)
742 ++newinfo->stacksize;
743 }
744
745 ret = -EINVAL;
746 if (i != repl->num_entries)
747 goto out_free;
748
749 /* Check hooks all assigned */
750 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
751 /* Only hooks which are valid */
752 if (!(repl->valid_hooks & (1 << i)))
753 continue;
754 if (newinfo->hook_entry[i] == 0xFFFFFFFF)
755 goto out_free;
756 if (newinfo->underflow[i] == 0xFFFFFFFF)
757 goto out_free;
758 }
759
760 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
761 ret = -ELOOP;
762 goto out_free;
763 }
764 kvfree(offsets);
765
766 /* Finally, each sanity check must pass */
767 i = 0;
768 xt_entry_foreach(iter, entry0, newinfo->size) {
769 ret = find_check_entry(iter, net, repl->name, repl->size,
770 &alloc_state);
771 if (ret != 0)
772 break;
773 ++i;
774 }
775
776 if (ret != 0) {
777 xt_entry_foreach(iter, entry0, newinfo->size) {
778 if (i-- == 0)
779 break;
780 cleanup_entry(iter, net);
781 }
782 return ret;
783 }
784
785 return ret;
786 out_free:
787 kvfree(offsets);
788 return ret;
789 }
790
791 static void
792 get_counters(const struct xt_table_info *t,
793 struct xt_counters counters[])
794 {
795 struct ip6t_entry *iter;
796 unsigned int cpu;
797 unsigned int i;
798
799 for_each_possible_cpu(cpu) {
800 seqcount_t *s = &per_cpu(xt_recseq, cpu);
801
802 i = 0;
803 xt_entry_foreach(iter, t->entries, t->size) {
804 struct xt_counters *tmp;
805 u64 bcnt, pcnt;
806 unsigned int start;
807
808 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
809 do {
810 start = read_seqcount_begin(s);
811 bcnt = tmp->bcnt;
812 pcnt = tmp->pcnt;
813 } while (read_seqcount_retry(s, start));
814
815 ADD_COUNTER(counters[i], bcnt, pcnt);
816 ++i;
817 }
818 }
819 }
820
821 static struct xt_counters *alloc_counters(const struct xt_table *table)
822 {
823 unsigned int countersize;
824 struct xt_counters *counters;
825 const struct xt_table_info *private = table->private;
826
827 /* We need atomic snapshot of counters: rest doesn't change
828 (other than comefrom, which userspace doesn't care
829 about). */
830 countersize = sizeof(struct xt_counters) * private->number;
831 counters = vzalloc(countersize);
832
833 if (counters == NULL)
834 return ERR_PTR(-ENOMEM);
835
836 get_counters(private, counters);
837
838 return counters;
839 }
840
841 static int
842 copy_entries_to_user(unsigned int total_size,
843 const struct xt_table *table,
844 void __user *userptr)
845 {
846 unsigned int off, num;
847 const struct ip6t_entry *e;
848 struct xt_counters *counters;
849 const struct xt_table_info *private = table->private;
850 int ret = 0;
851 const void *loc_cpu_entry;
852
853 counters = alloc_counters(table);
854 if (IS_ERR(counters))
855 return PTR_ERR(counters);
856
857 loc_cpu_entry = private->entries;
858
859 /* FIXME: use iterator macros --RR */
860 /* ... then go back and fix counters and names */
861 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
862 unsigned int i;
863 const struct xt_entry_match *m;
864 const struct xt_entry_target *t;
865
866 e = (struct ip6t_entry *)(loc_cpu_entry + off);
867 if (copy_to_user(userptr + off, e, sizeof(*e))) {
868 ret = -EFAULT;
869 goto free_counters;
870 }
871 if (copy_to_user(userptr + off
872 + offsetof(struct ip6t_entry, counters),
873 &counters[num],
874 sizeof(counters[num])) != 0) {
875 ret = -EFAULT;
876 goto free_counters;
877 }
878
879 for (i = sizeof(struct ip6t_entry);
880 i < e->target_offset;
881 i += m->u.match_size) {
882 m = (void *)e + i;
883
884 if (xt_match_to_user(m, userptr + off + i)) {
885 ret = -EFAULT;
886 goto free_counters;
887 }
888 }
889
890 t = ip6t_get_target_c(e);
891 if (xt_target_to_user(t, userptr + off + e->target_offset)) {
892 ret = -EFAULT;
893 goto free_counters;
894 }
895 }
896
897 free_counters:
898 vfree(counters);
899 return ret;
900 }
901
902 #ifdef CONFIG_COMPAT
903 static void compat_standard_from_user(void *dst, const void *src)
904 {
905 int v = *(compat_int_t *)src;
906
907 if (v > 0)
908 v += xt_compat_calc_jump(AF_INET6, v);
909 memcpy(dst, &v, sizeof(v));
910 }
911
912 static int compat_standard_to_user(void __user *dst, const void *src)
913 {
914 compat_int_t cv = *(int *)src;
915
916 if (cv > 0)
917 cv -= xt_compat_calc_jump(AF_INET6, cv);
918 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
919 }
920
921 static int compat_calc_entry(const struct ip6t_entry *e,
922 const struct xt_table_info *info,
923 const void *base, struct xt_table_info *newinfo)
924 {
925 const struct xt_entry_match *ematch;
926 const struct xt_entry_target *t;
927 unsigned int entry_offset;
928 int off, i, ret;
929
930 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
931 entry_offset = (void *)e - base;
932 xt_ematch_foreach(ematch, e)
933 off += xt_compat_match_offset(ematch->u.kernel.match);
934 t = ip6t_get_target_c(e);
935 off += xt_compat_target_offset(t->u.kernel.target);
936 newinfo->size -= off;
937 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
938 if (ret)
939 return ret;
940
941 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
942 if (info->hook_entry[i] &&
943 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
944 newinfo->hook_entry[i] -= off;
945 if (info->underflow[i] &&
946 (e < (struct ip6t_entry *)(base + info->underflow[i])))
947 newinfo->underflow[i] -= off;
948 }
949 return 0;
950 }
951
952 static int compat_table_info(const struct xt_table_info *info,
953 struct xt_table_info *newinfo)
954 {
955 struct ip6t_entry *iter;
956 const void *loc_cpu_entry;
957 int ret;
958
959 if (!newinfo || !info)
960 return -EINVAL;
961
962 /* we dont care about newinfo->entries */
963 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
964 newinfo->initial_entries = 0;
965 loc_cpu_entry = info->entries;
966 xt_compat_init_offsets(AF_INET6, info->number);
967 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
968 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
969 if (ret != 0)
970 return ret;
971 }
972 return 0;
973 }
974 #endif
975
976 static int get_info(struct net *net, void __user *user,
977 const int *len, int compat)
978 {
979 char name[XT_TABLE_MAXNAMELEN];
980 struct xt_table *t;
981 int ret;
982
983 if (*len != sizeof(struct ip6t_getinfo))
984 return -EINVAL;
985
986 if (copy_from_user(name, user, sizeof(name)) != 0)
987 return -EFAULT;
988
989 name[XT_TABLE_MAXNAMELEN-1] = '\0';
990 #ifdef CONFIG_COMPAT
991 if (compat)
992 xt_compat_lock(AF_INET6);
993 #endif
994 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
995 "ip6table_%s", name);
996 if (t) {
997 struct ip6t_getinfo info;
998 const struct xt_table_info *private = t->private;
999 #ifdef CONFIG_COMPAT
1000 struct xt_table_info tmp;
1001
1002 if (compat) {
1003 ret = compat_table_info(private, &tmp);
1004 xt_compat_flush_offsets(AF_INET6);
1005 private = &tmp;
1006 }
1007 #endif
1008 memset(&info, 0, sizeof(info));
1009 info.valid_hooks = t->valid_hooks;
1010 memcpy(info.hook_entry, private->hook_entry,
1011 sizeof(info.hook_entry));
1012 memcpy(info.underflow, private->underflow,
1013 sizeof(info.underflow));
1014 info.num_entries = private->number;
1015 info.size = private->size;
1016 strcpy(info.name, name);
1017
1018 if (copy_to_user(user, &info, *len) != 0)
1019 ret = -EFAULT;
1020 else
1021 ret = 0;
1022
1023 xt_table_unlock(t);
1024 module_put(t->me);
1025 } else
1026 ret = -ENOENT;
1027 #ifdef CONFIG_COMPAT
1028 if (compat)
1029 xt_compat_unlock(AF_INET6);
1030 #endif
1031 return ret;
1032 }
1033
1034 static int
1035 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1036 const int *len)
1037 {
1038 int ret;
1039 struct ip6t_get_entries get;
1040 struct xt_table *t;
1041
1042 if (*len < sizeof(get))
1043 return -EINVAL;
1044 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1045 return -EFAULT;
1046 if (*len != sizeof(struct ip6t_get_entries) + get.size)
1047 return -EINVAL;
1048
1049 get.name[sizeof(get.name) - 1] = '\0';
1050
1051 t = xt_find_table_lock(net, AF_INET6, get.name);
1052 if (t) {
1053 struct xt_table_info *private = t->private;
1054 if (get.size == private->size)
1055 ret = copy_entries_to_user(private->size,
1056 t, uptr->entrytable);
1057 else
1058 ret = -EAGAIN;
1059
1060 module_put(t->me);
1061 xt_table_unlock(t);
1062 } else
1063 ret = -ENOENT;
1064
1065 return ret;
1066 }
1067
1068 static int
1069 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1070 struct xt_table_info *newinfo, unsigned int num_counters,
1071 void __user *counters_ptr)
1072 {
1073 int ret;
1074 struct xt_table *t;
1075 struct xt_table_info *oldinfo;
1076 struct xt_counters *counters;
1077 struct ip6t_entry *iter;
1078
1079 ret = 0;
1080 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1081 if (!counters) {
1082 ret = -ENOMEM;
1083 goto out;
1084 }
1085
1086 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1087 "ip6table_%s", name);
1088 if (!t) {
1089 ret = -ENOENT;
1090 goto free_newinfo_counters_untrans;
1091 }
1092
1093 /* You lied! */
1094 if (valid_hooks != t->valid_hooks) {
1095 ret = -EINVAL;
1096 goto put_module;
1097 }
1098
1099 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1100 if (!oldinfo)
1101 goto put_module;
1102
1103 /* Update module usage count based on number of rules */
1104 if ((oldinfo->number > oldinfo->initial_entries) ||
1105 (newinfo->number <= oldinfo->initial_entries))
1106 module_put(t->me);
1107 if ((oldinfo->number > oldinfo->initial_entries) &&
1108 (newinfo->number <= oldinfo->initial_entries))
1109 module_put(t->me);
1110
1111 /* Get the old counters, and synchronize with replace */
1112 get_counters(oldinfo, counters);
1113
1114 /* Decrease module usage counts and free resource */
1115 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1116 cleanup_entry(iter, net);
1117
1118 xt_free_table_info(oldinfo);
1119 if (copy_to_user(counters_ptr, counters,
1120 sizeof(struct xt_counters) * num_counters) != 0) {
1121 /* Silent error, can't fail, new table is already in place */
1122 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1123 }
1124 vfree(counters);
1125 xt_table_unlock(t);
1126 return ret;
1127
1128 put_module:
1129 module_put(t->me);
1130 xt_table_unlock(t);
1131 free_newinfo_counters_untrans:
1132 vfree(counters);
1133 out:
1134 return ret;
1135 }
1136
1137 static int
1138 do_replace(struct net *net, const void __user *user, unsigned int len)
1139 {
1140 int ret;
1141 struct ip6t_replace tmp;
1142 struct xt_table_info *newinfo;
1143 void *loc_cpu_entry;
1144 struct ip6t_entry *iter;
1145
1146 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1147 return -EFAULT;
1148
1149 /* overflow check */
1150 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1151 return -ENOMEM;
1152 if (tmp.num_counters == 0)
1153 return -EINVAL;
1154
1155 tmp.name[sizeof(tmp.name)-1] = 0;
1156
1157 newinfo = xt_alloc_table_info(tmp.size);
1158 if (!newinfo)
1159 return -ENOMEM;
1160
1161 loc_cpu_entry = newinfo->entries;
1162 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1163 tmp.size) != 0) {
1164 ret = -EFAULT;
1165 goto free_newinfo;
1166 }
1167
1168 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1169 if (ret != 0)
1170 goto free_newinfo;
1171
1172 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1173 tmp.num_counters, tmp.counters);
1174 if (ret)
1175 goto free_newinfo_untrans;
1176 return 0;
1177
1178 free_newinfo_untrans:
1179 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1180 cleanup_entry(iter, net);
1181 free_newinfo:
1182 xt_free_table_info(newinfo);
1183 return ret;
1184 }
1185
1186 static int
1187 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1188 int compat)
1189 {
1190 unsigned int i;
1191 struct xt_counters_info tmp;
1192 struct xt_counters *paddc;
1193 struct xt_table *t;
1194 const struct xt_table_info *private;
1195 int ret = 0;
1196 struct ip6t_entry *iter;
1197 unsigned int addend;
1198
1199 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1200 if (IS_ERR(paddc))
1201 return PTR_ERR(paddc);
1202 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1203 if (!t) {
1204 ret = -ENOENT;
1205 goto free;
1206 }
1207
1208 local_bh_disable();
1209 private = t->private;
1210 if (private->number != tmp.num_counters) {
1211 ret = -EINVAL;
1212 goto unlock_up_free;
1213 }
1214
1215 i = 0;
1216 addend = xt_write_recseq_begin();
1217 xt_entry_foreach(iter, private->entries, private->size) {
1218 struct xt_counters *tmp;
1219
1220 tmp = xt_get_this_cpu_counter(&iter->counters);
1221 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1222 ++i;
1223 }
1224 xt_write_recseq_end(addend);
1225 unlock_up_free:
1226 local_bh_enable();
1227 xt_table_unlock(t);
1228 module_put(t->me);
1229 free:
1230 vfree(paddc);
1231
1232 return ret;
1233 }
1234
1235 #ifdef CONFIG_COMPAT
1236 struct compat_ip6t_replace {
1237 char name[XT_TABLE_MAXNAMELEN];
1238 u32 valid_hooks;
1239 u32 num_entries;
1240 u32 size;
1241 u32 hook_entry[NF_INET_NUMHOOKS];
1242 u32 underflow[NF_INET_NUMHOOKS];
1243 u32 num_counters;
1244 compat_uptr_t counters; /* struct xt_counters * */
1245 struct compat_ip6t_entry entries[0];
1246 };
1247
1248 static int
1249 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1250 unsigned int *size, struct xt_counters *counters,
1251 unsigned int i)
1252 {
1253 struct xt_entry_target *t;
1254 struct compat_ip6t_entry __user *ce;
1255 u_int16_t target_offset, next_offset;
1256 compat_uint_t origsize;
1257 const struct xt_entry_match *ematch;
1258 int ret = 0;
1259
1260 origsize = *size;
1261 ce = (struct compat_ip6t_entry __user *)*dstptr;
1262 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1263 copy_to_user(&ce->counters, &counters[i],
1264 sizeof(counters[i])) != 0)
1265 return -EFAULT;
1266
1267 *dstptr += sizeof(struct compat_ip6t_entry);
1268 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1269
1270 xt_ematch_foreach(ematch, e) {
1271 ret = xt_compat_match_to_user(ematch, dstptr, size);
1272 if (ret != 0)
1273 return ret;
1274 }
1275 target_offset = e->target_offset - (origsize - *size);
1276 t = ip6t_get_target(e);
1277 ret = xt_compat_target_to_user(t, dstptr, size);
1278 if (ret)
1279 return ret;
1280 next_offset = e->next_offset - (origsize - *size);
1281 if (put_user(target_offset, &ce->target_offset) != 0 ||
1282 put_user(next_offset, &ce->next_offset) != 0)
1283 return -EFAULT;
1284 return 0;
1285 }
1286
1287 static int
1288 compat_find_calc_match(struct xt_entry_match *m,
1289 const struct ip6t_ip6 *ipv6,
1290 int *size)
1291 {
1292 struct xt_match *match;
1293
1294 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1295 m->u.user.revision);
1296 if (IS_ERR(match))
1297 return PTR_ERR(match);
1298
1299 m->u.kernel.match = match;
1300 *size += xt_compat_match_offset(match);
1301 return 0;
1302 }
1303
1304 static void compat_release_entry(struct compat_ip6t_entry *e)
1305 {
1306 struct xt_entry_target *t;
1307 struct xt_entry_match *ematch;
1308
1309 /* Cleanup all matches */
1310 xt_ematch_foreach(ematch, e)
1311 module_put(ematch->u.kernel.match->me);
1312 t = compat_ip6t_get_target(e);
1313 module_put(t->u.kernel.target->me);
1314 }
1315
1316 static int
1317 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1318 struct xt_table_info *newinfo,
1319 unsigned int *size,
1320 const unsigned char *base,
1321 const unsigned char *limit)
1322 {
1323 struct xt_entry_match *ematch;
1324 struct xt_entry_target *t;
1325 struct xt_target *target;
1326 unsigned int entry_offset;
1327 unsigned int j;
1328 int ret, off;
1329
1330 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1331 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1332 (unsigned char *)e + e->next_offset > limit)
1333 return -EINVAL;
1334
1335 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1336 sizeof(struct compat_xt_entry_target))
1337 return -EINVAL;
1338
1339 if (!ip6_checkentry(&e->ipv6))
1340 return -EINVAL;
1341
1342 ret = xt_compat_check_entry_offsets(e, e->elems,
1343 e->target_offset, e->next_offset);
1344 if (ret)
1345 return ret;
1346
1347 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1348 entry_offset = (void *)e - (void *)base;
1349 j = 0;
1350 xt_ematch_foreach(ematch, e) {
1351 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1352 if (ret != 0)
1353 goto release_matches;
1354 ++j;
1355 }
1356
1357 t = compat_ip6t_get_target(e);
1358 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1359 t->u.user.revision);
1360 if (IS_ERR(target)) {
1361 ret = PTR_ERR(target);
1362 goto release_matches;
1363 }
1364 t->u.kernel.target = target;
1365
1366 off += xt_compat_target_offset(target);
1367 *size += off;
1368 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1369 if (ret)
1370 goto out;
1371
1372 return 0;
1373
1374 out:
1375 module_put(t->u.kernel.target->me);
1376 release_matches:
1377 xt_ematch_foreach(ematch, e) {
1378 if (j-- == 0)
1379 break;
1380 module_put(ematch->u.kernel.match->me);
1381 }
1382 return ret;
1383 }
1384
1385 static void
1386 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1387 unsigned int *size,
1388 struct xt_table_info *newinfo, unsigned char *base)
1389 {
1390 struct xt_entry_target *t;
1391 struct ip6t_entry *de;
1392 unsigned int origsize;
1393 int h;
1394 struct xt_entry_match *ematch;
1395
1396 origsize = *size;
1397 de = (struct ip6t_entry *)*dstptr;
1398 memcpy(de, e, sizeof(struct ip6t_entry));
1399 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1400
1401 *dstptr += sizeof(struct ip6t_entry);
1402 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1403
1404 xt_ematch_foreach(ematch, e)
1405 xt_compat_match_from_user(ematch, dstptr, size);
1406
1407 de->target_offset = e->target_offset - (origsize - *size);
1408 t = compat_ip6t_get_target(e);
1409 xt_compat_target_from_user(t, dstptr, size);
1410
1411 de->next_offset = e->next_offset - (origsize - *size);
1412 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1413 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1414 newinfo->hook_entry[h] -= origsize - *size;
1415 if ((unsigned char *)de - base < newinfo->underflow[h])
1416 newinfo->underflow[h] -= origsize - *size;
1417 }
1418 }
1419
1420 static int
1421 translate_compat_table(struct net *net,
1422 struct xt_table_info **pinfo,
1423 void **pentry0,
1424 const struct compat_ip6t_replace *compatr)
1425 {
1426 unsigned int i, j;
1427 struct xt_table_info *newinfo, *info;
1428 void *pos, *entry0, *entry1;
1429 struct compat_ip6t_entry *iter0;
1430 struct ip6t_replace repl;
1431 unsigned int size;
1432 int ret = 0;
1433
1434 info = *pinfo;
1435 entry0 = *pentry0;
1436 size = compatr->size;
1437 info->number = compatr->num_entries;
1438
1439 j = 0;
1440 xt_compat_lock(AF_INET6);
1441 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1442 /* Walk through entries, checking offsets. */
1443 xt_entry_foreach(iter0, entry0, compatr->size) {
1444 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1445 entry0,
1446 entry0 + compatr->size);
1447 if (ret != 0)
1448 goto out_unlock;
1449 ++j;
1450 }
1451
1452 ret = -EINVAL;
1453 if (j != compatr->num_entries)
1454 goto out_unlock;
1455
1456 ret = -ENOMEM;
1457 newinfo = xt_alloc_table_info(size);
1458 if (!newinfo)
1459 goto out_unlock;
1460
1461 newinfo->number = compatr->num_entries;
1462 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1463 newinfo->hook_entry[i] = compatr->hook_entry[i];
1464 newinfo->underflow[i] = compatr->underflow[i];
1465 }
1466 entry1 = newinfo->entries;
1467 pos = entry1;
1468 size = compatr->size;
1469 xt_entry_foreach(iter0, entry0, compatr->size)
1470 compat_copy_entry_from_user(iter0, &pos, &size,
1471 newinfo, entry1);
1472
1473 /* all module references in entry0 are now gone. */
1474 xt_compat_flush_offsets(AF_INET6);
1475 xt_compat_unlock(AF_INET6);
1476
1477 memcpy(&repl, compatr, sizeof(*compatr));
1478
1479 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1480 repl.hook_entry[i] = newinfo->hook_entry[i];
1481 repl.underflow[i] = newinfo->underflow[i];
1482 }
1483
1484 repl.num_counters = 0;
1485 repl.counters = NULL;
1486 repl.size = newinfo->size;
1487 ret = translate_table(net, newinfo, entry1, &repl);
1488 if (ret)
1489 goto free_newinfo;
1490
1491 *pinfo = newinfo;
1492 *pentry0 = entry1;
1493 xt_free_table_info(info);
1494 return 0;
1495
1496 free_newinfo:
1497 xt_free_table_info(newinfo);
1498 return ret;
1499 out_unlock:
1500 xt_compat_flush_offsets(AF_INET6);
1501 xt_compat_unlock(AF_INET6);
1502 xt_entry_foreach(iter0, entry0, compatr->size) {
1503 if (j-- == 0)
1504 break;
1505 compat_release_entry(iter0);
1506 }
1507 return ret;
1508 }
1509
1510 static int
1511 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1512 {
1513 int ret;
1514 struct compat_ip6t_replace tmp;
1515 struct xt_table_info *newinfo;
1516 void *loc_cpu_entry;
1517 struct ip6t_entry *iter;
1518
1519 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1520 return -EFAULT;
1521
1522 /* overflow check */
1523 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1524 return -ENOMEM;
1525 if (tmp.num_counters == 0)
1526 return -EINVAL;
1527
1528 tmp.name[sizeof(tmp.name)-1] = 0;
1529
1530 newinfo = xt_alloc_table_info(tmp.size);
1531 if (!newinfo)
1532 return -ENOMEM;
1533
1534 loc_cpu_entry = newinfo->entries;
1535 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1536 tmp.size) != 0) {
1537 ret = -EFAULT;
1538 goto free_newinfo;
1539 }
1540
1541 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1542 if (ret != 0)
1543 goto free_newinfo;
1544
1545 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1546 tmp.num_counters, compat_ptr(tmp.counters));
1547 if (ret)
1548 goto free_newinfo_untrans;
1549 return 0;
1550
1551 free_newinfo_untrans:
1552 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1553 cleanup_entry(iter, net);
1554 free_newinfo:
1555 xt_free_table_info(newinfo);
1556 return ret;
1557 }
1558
1559 static int
1560 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1561 unsigned int len)
1562 {
1563 int ret;
1564
1565 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1566 return -EPERM;
1567
1568 switch (cmd) {
1569 case IP6T_SO_SET_REPLACE:
1570 ret = compat_do_replace(sock_net(sk), user, len);
1571 break;
1572
1573 case IP6T_SO_SET_ADD_COUNTERS:
1574 ret = do_add_counters(sock_net(sk), user, len, 1);
1575 break;
1576
1577 default:
1578 ret = -EINVAL;
1579 }
1580
1581 return ret;
1582 }
1583
1584 struct compat_ip6t_get_entries {
1585 char name[XT_TABLE_MAXNAMELEN];
1586 compat_uint_t size;
1587 struct compat_ip6t_entry entrytable[0];
1588 };
1589
1590 static int
1591 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1592 void __user *userptr)
1593 {
1594 struct xt_counters *counters;
1595 const struct xt_table_info *private = table->private;
1596 void __user *pos;
1597 unsigned int size;
1598 int ret = 0;
1599 unsigned int i = 0;
1600 struct ip6t_entry *iter;
1601
1602 counters = alloc_counters(table);
1603 if (IS_ERR(counters))
1604 return PTR_ERR(counters);
1605
1606 pos = userptr;
1607 size = total_size;
1608 xt_entry_foreach(iter, private->entries, total_size) {
1609 ret = compat_copy_entry_to_user(iter, &pos,
1610 &size, counters, i++);
1611 if (ret != 0)
1612 break;
1613 }
1614
1615 vfree(counters);
1616 return ret;
1617 }
1618
1619 static int
1620 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1621 int *len)
1622 {
1623 int ret;
1624 struct compat_ip6t_get_entries get;
1625 struct xt_table *t;
1626
1627 if (*len < sizeof(get))
1628 return -EINVAL;
1629
1630 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1631 return -EFAULT;
1632
1633 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
1634 return -EINVAL;
1635
1636 get.name[sizeof(get.name) - 1] = '\0';
1637
1638 xt_compat_lock(AF_INET6);
1639 t = xt_find_table_lock(net, AF_INET6, get.name);
1640 if (t) {
1641 const struct xt_table_info *private = t->private;
1642 struct xt_table_info info;
1643 ret = compat_table_info(private, &info);
1644 if (!ret && get.size == info.size)
1645 ret = compat_copy_entries_to_user(private->size,
1646 t, uptr->entrytable);
1647 else if (!ret)
1648 ret = -EAGAIN;
1649
1650 xt_compat_flush_offsets(AF_INET6);
1651 module_put(t->me);
1652 xt_table_unlock(t);
1653 } else
1654 ret = -ENOENT;
1655
1656 xt_compat_unlock(AF_INET6);
1657 return ret;
1658 }
1659
1660 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1661
1662 static int
1663 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1664 {
1665 int ret;
1666
1667 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1668 return -EPERM;
1669
1670 switch (cmd) {
1671 case IP6T_SO_GET_INFO:
1672 ret = get_info(sock_net(sk), user, len, 1);
1673 break;
1674 case IP6T_SO_GET_ENTRIES:
1675 ret = compat_get_entries(sock_net(sk), user, len);
1676 break;
1677 default:
1678 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1679 }
1680 return ret;
1681 }
1682 #endif
1683
1684 static int
1685 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1686 {
1687 int ret;
1688
1689 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1690 return -EPERM;
1691
1692 switch (cmd) {
1693 case IP6T_SO_SET_REPLACE:
1694 ret = do_replace(sock_net(sk), user, len);
1695 break;
1696
1697 case IP6T_SO_SET_ADD_COUNTERS:
1698 ret = do_add_counters(sock_net(sk), user, len, 0);
1699 break;
1700
1701 default:
1702 ret = -EINVAL;
1703 }
1704
1705 return ret;
1706 }
1707
1708 static int
1709 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1710 {
1711 int ret;
1712
1713 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1714 return -EPERM;
1715
1716 switch (cmd) {
1717 case IP6T_SO_GET_INFO:
1718 ret = get_info(sock_net(sk), user, len, 0);
1719 break;
1720
1721 case IP6T_SO_GET_ENTRIES:
1722 ret = get_entries(sock_net(sk), user, len);
1723 break;
1724
1725 case IP6T_SO_GET_REVISION_MATCH:
1726 case IP6T_SO_GET_REVISION_TARGET: {
1727 struct xt_get_revision rev;
1728 int target;
1729
1730 if (*len != sizeof(rev)) {
1731 ret = -EINVAL;
1732 break;
1733 }
1734 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1735 ret = -EFAULT;
1736 break;
1737 }
1738 rev.name[sizeof(rev.name)-1] = 0;
1739
1740 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1741 target = 1;
1742 else
1743 target = 0;
1744
1745 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1746 rev.revision,
1747 target, &ret),
1748 "ip6t_%s", rev.name);
1749 break;
1750 }
1751
1752 default:
1753 ret = -EINVAL;
1754 }
1755
1756 return ret;
1757 }
1758
1759 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
1760 {
1761 struct xt_table_info *private;
1762 void *loc_cpu_entry;
1763 struct module *table_owner = table->me;
1764 struct ip6t_entry *iter;
1765
1766 private = xt_unregister_table(table);
1767
1768 /* Decrease module usage counts and free resources */
1769 loc_cpu_entry = private->entries;
1770 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1771 cleanup_entry(iter, net);
1772 if (private->number > private->initial_entries)
1773 module_put(table_owner);
1774 xt_free_table_info(private);
1775 }
1776
1777 int ip6t_register_table(struct net *net, const struct xt_table *table,
1778 const struct ip6t_replace *repl,
1779 const struct nf_hook_ops *ops,
1780 struct xt_table **res)
1781 {
1782 int ret;
1783 struct xt_table_info *newinfo;
1784 struct xt_table_info bootstrap = {0};
1785 void *loc_cpu_entry;
1786 struct xt_table *new_table;
1787
1788 newinfo = xt_alloc_table_info(repl->size);
1789 if (!newinfo)
1790 return -ENOMEM;
1791
1792 loc_cpu_entry = newinfo->entries;
1793 memcpy(loc_cpu_entry, repl->entries, repl->size);
1794
1795 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1796 if (ret != 0)
1797 goto out_free;
1798
1799 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1800 if (IS_ERR(new_table)) {
1801 ret = PTR_ERR(new_table);
1802 goto out_free;
1803 }
1804
1805 /* set res now, will see skbs right after nf_register_net_hooks */
1806 WRITE_ONCE(*res, new_table);
1807
1808 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1809 if (ret != 0) {
1810 __ip6t_unregister_table(net, new_table);
1811 *res = NULL;
1812 }
1813
1814 return ret;
1815
1816 out_free:
1817 xt_free_table_info(newinfo);
1818 return ret;
1819 }
1820
1821 void ip6t_unregister_table(struct net *net, struct xt_table *table,
1822 const struct nf_hook_ops *ops)
1823 {
1824 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1825 __ip6t_unregister_table(net, table);
1826 }
1827
1828 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1829 static inline bool
1830 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1831 u_int8_t type, u_int8_t code,
1832 bool invert)
1833 {
1834 return (type == test_type && code >= min_code && code <= max_code)
1835 ^ invert;
1836 }
1837
1838 static bool
1839 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1840 {
1841 const struct icmp6hdr *ic;
1842 struct icmp6hdr _icmph;
1843 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1844
1845 /* Must not be a fragment. */
1846 if (par->fragoff != 0)
1847 return false;
1848
1849 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1850 if (ic == NULL) {
1851 /* We've been asked to examine this packet, and we
1852 * can't. Hence, no choice but to drop.
1853 */
1854 par->hotdrop = true;
1855 return false;
1856 }
1857
1858 return icmp6_type_code_match(icmpinfo->type,
1859 icmpinfo->code[0],
1860 icmpinfo->code[1],
1861 ic->icmp6_type, ic->icmp6_code,
1862 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1863 }
1864
1865 /* Called when user tries to insert an entry of this type. */
1866 static int icmp6_checkentry(const struct xt_mtchk_param *par)
1867 {
1868 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1869
1870 /* Must specify no unknown invflags */
1871 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
1872 }
1873
1874 /* The built-in targets: standard (NULL) and error. */
1875 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
1876 {
1877 .name = XT_STANDARD_TARGET,
1878 .targetsize = sizeof(int),
1879 .family = NFPROTO_IPV6,
1880 #ifdef CONFIG_COMPAT
1881 .compatsize = sizeof(compat_int_t),
1882 .compat_from_user = compat_standard_from_user,
1883 .compat_to_user = compat_standard_to_user,
1884 #endif
1885 },
1886 {
1887 .name = XT_ERROR_TARGET,
1888 .target = ip6t_error,
1889 .targetsize = XT_FUNCTION_MAXNAMELEN,
1890 .family = NFPROTO_IPV6,
1891 },
1892 };
1893
1894 static struct nf_sockopt_ops ip6t_sockopts = {
1895 .pf = PF_INET6,
1896 .set_optmin = IP6T_BASE_CTL,
1897 .set_optmax = IP6T_SO_SET_MAX+1,
1898 .set = do_ip6t_set_ctl,
1899 #ifdef CONFIG_COMPAT
1900 .compat_set = compat_do_ip6t_set_ctl,
1901 #endif
1902 .get_optmin = IP6T_BASE_CTL,
1903 .get_optmax = IP6T_SO_GET_MAX+1,
1904 .get = do_ip6t_get_ctl,
1905 #ifdef CONFIG_COMPAT
1906 .compat_get = compat_do_ip6t_get_ctl,
1907 #endif
1908 .owner = THIS_MODULE,
1909 };
1910
1911 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
1912 {
1913 .name = "icmp6",
1914 .match = icmp6_match,
1915 .matchsize = sizeof(struct ip6t_icmp),
1916 .checkentry = icmp6_checkentry,
1917 .proto = IPPROTO_ICMPV6,
1918 .family = NFPROTO_IPV6,
1919 },
1920 };
1921
1922 static int __net_init ip6_tables_net_init(struct net *net)
1923 {
1924 return xt_proto_init(net, NFPROTO_IPV6);
1925 }
1926
1927 static void __net_exit ip6_tables_net_exit(struct net *net)
1928 {
1929 xt_proto_fini(net, NFPROTO_IPV6);
1930 }
1931
1932 static struct pernet_operations ip6_tables_net_ops = {
1933 .init = ip6_tables_net_init,
1934 .exit = ip6_tables_net_exit,
1935 };
1936
1937 static int __init ip6_tables_init(void)
1938 {
1939 int ret;
1940
1941 ret = register_pernet_subsys(&ip6_tables_net_ops);
1942 if (ret < 0)
1943 goto err1;
1944
1945 /* No one else will be downing sem now, so we won't sleep */
1946 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1947 if (ret < 0)
1948 goto err2;
1949 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1950 if (ret < 0)
1951 goto err4;
1952
1953 /* Register setsockopt */
1954 ret = nf_register_sockopt(&ip6t_sockopts);
1955 if (ret < 0)
1956 goto err5;
1957
1958 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1959 return 0;
1960
1961 err5:
1962 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1963 err4:
1964 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1965 err2:
1966 unregister_pernet_subsys(&ip6_tables_net_ops);
1967 err1:
1968 return ret;
1969 }
1970
1971 static void __exit ip6_tables_fini(void)
1972 {
1973 nf_unregister_sockopt(&ip6t_sockopts);
1974
1975 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1976 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1977 unregister_pernet_subsys(&ip6_tables_net_ops);
1978 }
1979
1980 EXPORT_SYMBOL(ip6t_register_table);
1981 EXPORT_SYMBOL(ip6t_unregister_table);
1982 EXPORT_SYMBOL(ip6t_do_table);
1983
1984 module_init(ip6_tables_init);
1985 module_exit(ip6_tables_fini);