2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
44 #define dprintf(format, args...)
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
50 #define duprintf(format, args...)
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
77 Hence the start of any table is given by get_table() below. */
79 /* Check for an extension */
81 ip6t_ext_hdr(u8 nexthdr
)
83 return ( (nexthdr
== IPPROTO_HOPOPTS
) ||
84 (nexthdr
== IPPROTO_ROUTING
) ||
85 (nexthdr
== IPPROTO_FRAGMENT
) ||
86 (nexthdr
== IPPROTO_ESP
) ||
87 (nexthdr
== IPPROTO_AH
) ||
88 (nexthdr
== IPPROTO_NONE
) ||
89 (nexthdr
== IPPROTO_DSTOPTS
) );
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
95 ip6_packet_match(const struct sk_buff
*skb
,
98 const struct ip6t_ip6
*ip6info
,
99 unsigned int *protoff
,
100 int *fragoff
, bool *hotdrop
)
104 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
109 &ip6info
->src
), IP6T_INV_SRCIP
)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
111 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
112 dprintf("Source or dest mismatch.\n");
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
125 ret
|= (((const unsigned long *)indev
)[i
]
126 ^ ((const unsigned long *)ip6info
->iniface
)[i
])
127 & ((const unsigned long *)ip6info
->iniface_mask
)[i
];
130 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev
, ip6info
->iniface
,
133 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
137 for (i
= 0, ret
= 0; i
< IFNAMSIZ
/sizeof(unsigned long); i
++) {
138 ret
|= (((const unsigned long *)outdev
)[i
]
139 ^ ((const unsigned long *)ip6info
->outiface
)[i
])
140 & ((const unsigned long *)ip6info
->outiface_mask
)[i
];
143 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev
, ip6info
->outiface
,
146 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
150 /* ... might want to do something with class and flowlabel here ... */
152 /* look for the desired protocol header */
153 if((ip6info
->flags
& IP6T_F_PROTO
)) {
155 unsigned short _frag_off
;
157 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
);
163 *fragoff
= _frag_off
;
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
167 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
170 if (ip6info
->proto
== protohdr
) {
171 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
177 /* We need match for the '-p all', too! */
178 if ((ip6info
->proto
!= 0) &&
179 !(ip6info
->invflags
& IP6T_INV_PROTO
))
185 /* should be ip6 safe */
187 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
189 if (ipv6
->flags
& ~IP6T_F_MASK
) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6
->flags
& ~IP6T_F_MASK
);
194 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6
->invflags
& ~IP6T_INV_MASK
);
203 ip6t_error(struct sk_buff
*skb
,
204 const struct net_device
*in
,
205 const struct net_device
*out
,
206 unsigned int hooknum
,
207 const struct xt_target
*target
,
208 const void *targinfo
)
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo
);
216 /* Performance critical - called for every packet */
218 do_match(struct ip6t_entry_match
*m
, const struct sk_buff
*skb
,
219 struct xt_match_param
*par
)
221 par
->match
= m
->u
.kernel
.match
;
222 par
->matchinfo
= m
->data
;
224 /* Stop iteration if it doesn't match */
225 if (!m
->u
.kernel
.match
->match(skb
, par
))
231 static inline struct ip6t_entry
*
232 get_entry(void *base
, unsigned int offset
)
234 return (struct ip6t_entry
*)(base
+ offset
);
237 /* All zeroes == unconditional rule. */
238 /* Mildly perf critical (only if packet tracing is on) */
240 unconditional(const struct ip6t_ip6
*ipv6
)
244 for (i
= 0; i
< sizeof(*ipv6
); i
++)
245 if (((char *)ipv6
)[i
])
248 return (i
== sizeof(*ipv6
));
251 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
252 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
253 /* This cries for unification! */
254 static const char *const hooknames
[] = {
255 [NF_INET_PRE_ROUTING
] = "PREROUTING",
256 [NF_INET_LOCAL_IN
] = "INPUT",
257 [NF_INET_FORWARD
] = "FORWARD",
258 [NF_INET_LOCAL_OUT
] = "OUTPUT",
259 [NF_INET_POST_ROUTING
] = "POSTROUTING",
262 enum nf_ip_trace_comments
{
263 NF_IP6_TRACE_COMMENT_RULE
,
264 NF_IP6_TRACE_COMMENT_RETURN
,
265 NF_IP6_TRACE_COMMENT_POLICY
,
268 static const char *const comments
[] = {
269 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
270 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
271 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
274 static struct nf_loginfo trace_loginfo
= {
275 .type
= NF_LOG_TYPE_LOG
,
279 .logflags
= NF_LOG_MASK
,
284 /* Mildly perf critical (only if packet tracing is on) */
286 get_chainname_rulenum(struct ip6t_entry
*s
, struct ip6t_entry
*e
,
287 char *hookname
, char **chainname
,
288 char **comment
, unsigned int *rulenum
)
290 struct ip6t_standard_target
*t
= (void *)ip6t_get_target(s
);
292 if (strcmp(t
->target
.u
.kernel
.target
->name
, IP6T_ERROR_TARGET
) == 0) {
293 /* Head of user chain: ERROR target with chainname */
294 *chainname
= t
->target
.data
;
299 if (s
->target_offset
== sizeof(struct ip6t_entry
)
300 && strcmp(t
->target
.u
.kernel
.target
->name
,
301 IP6T_STANDARD_TARGET
) == 0
303 && unconditional(&s
->ipv6
)) {
304 /* Tail of chains: STANDARD target (return/policy) */
305 *comment
= *chainname
== hookname
306 ? (char *)comments
[NF_IP6_TRACE_COMMENT_POLICY
]
307 : (char *)comments
[NF_IP6_TRACE_COMMENT_RETURN
];
316 static void trace_packet(struct sk_buff
*skb
,
318 const struct net_device
*in
,
319 const struct net_device
*out
,
320 const char *tablename
,
321 struct xt_table_info
*private,
322 struct ip6t_entry
*e
)
325 const struct ip6t_entry
*root
;
326 char *hookname
, *chainname
, *comment
;
327 unsigned int rulenum
= 0;
329 table_base
= (void *)private->entries
[smp_processor_id()];
330 root
= get_entry(table_base
, private->hook_entry
[hook
]);
332 hookname
= chainname
= (char *)hooknames
[hook
];
333 comment
= (char *)comments
[NF_IP6_TRACE_COMMENT_RULE
];
335 IP6T_ENTRY_ITERATE(root
,
336 private->size
- private->hook_entry
[hook
],
337 get_chainname_rulenum
,
338 e
, hookname
, &chainname
, &comment
, &rulenum
);
340 nf_log_packet(AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
341 "TRACE: %s:%s:%s:%u ",
342 tablename
, chainname
, comment
, rulenum
);
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff
*skb
,
350 const struct net_device
*in
,
351 const struct net_device
*out
,
352 struct xt_table
*table
)
354 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
355 bool hotdrop
= false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict
= NF_DROP
;
358 const char *indev
, *outdev
;
360 struct ip6t_entry
*e
, *back
;
361 struct xt_table_info
*private;
362 struct xt_match_param mtpar
;
365 indev
= in
? in
->name
: nulldevname
;
366 outdev
= out
? out
->name
: nulldevname
;
367 /* We handle fragments by dealing with the first fragment as
368 * if it was a normal packet. All other fragments are treated
369 * normally, except that they will NEVER match rules that ask
370 * things we don't know, ie. tcp syn flag or ports). If the
371 * rule is also a fragment-specific rule, non-fragments won't
373 mtpar
.hotdrop
= &hotdrop
;
377 read_lock_bh(&table
->lock
);
378 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
379 private = table
->private;
380 table_base
= (void *)private->entries
[smp_processor_id()];
381 e
= get_entry(table_base
, private->hook_entry
[hook
]);
383 /* For return from builtin chain */
384 back
= get_entry(table_base
, private->underflow
[hook
]);
389 if (ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
390 &mtpar
.thoff
, &mtpar
.fragoff
, &hotdrop
)) {
391 struct ip6t_entry_target
*t
;
393 if (IP6T_MATCH_ITERATE(e
, do_match
, skb
, &mtpar
) != 0)
396 ADD_COUNTER(e
->counters
,
397 ntohs(ipv6_hdr(skb
)->payload_len
) +
398 sizeof(struct ipv6hdr
), 1);
400 t
= ip6t_get_target(e
);
401 IP_NF_ASSERT(t
->u
.kernel
.target
);
403 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
404 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
405 /* The packet is traced: log it */
406 if (unlikely(skb
->nf_trace
))
407 trace_packet(skb
, hook
, in
, out
,
408 table
->name
, private, e
);
410 /* Standard target? */
411 if (!t
->u
.kernel
.target
->target
) {
414 v
= ((struct ip6t_standard_target
*)t
)->verdict
;
416 /* Pop from stack? */
417 if (v
!= IP6T_RETURN
) {
418 verdict
= (unsigned)(-v
) - 1;
422 back
= get_entry(table_base
,
426 if (table_base
+ v
!= (void *)e
+ e
->next_offset
427 && !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
428 /* Save old back ptr in next entry */
429 struct ip6t_entry
*next
430 = (void *)e
+ e
->next_offset
;
432 = (void *)back
- table_base
;
433 /* set back pointer to next entry */
437 e
= get_entry(table_base
, v
);
439 /* Targets which reenter must return
441 #ifdef CONFIG_NETFILTER_DEBUG
442 ((struct ip6t_entry
*)table_base
)->comefrom
445 verdict
= t
->u
.kernel
.target
->target(skb
,
451 #ifdef CONFIG_NETFILTER_DEBUG
452 if (((struct ip6t_entry
*)table_base
)->comefrom
454 && verdict
== IP6T_CONTINUE
) {
455 printk("Target %s reentered!\n",
456 t
->u
.kernel
.target
->name
);
459 ((struct ip6t_entry
*)table_base
)->comefrom
462 if (verdict
== IP6T_CONTINUE
)
463 e
= (void *)e
+ e
->next_offset
;
471 e
= (void *)e
+ e
->next_offset
;
475 #ifdef CONFIG_NETFILTER_DEBUG
476 ((struct ip6t_entry
*)table_base
)->comefrom
= NETFILTER_LINK_POISON
;
478 read_unlock_bh(&table
->lock
);
480 #ifdef DEBUG_ALLOW_ALL
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
492 mark_source_chains(struct xt_table_info
*newinfo
,
493 unsigned int valid_hooks
, void *entry0
)
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
500 unsigned int pos
= newinfo
->hook_entry
[hook
];
501 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
503 if (!(valid_hooks
& (1 << hook
)))
506 /* Set initial back pointer. */
507 e
->counters
.pcnt
= pos
;
510 struct ip6t_standard_target
*t
511 = (void *)ip6t_get_target(e
);
512 int visited
= e
->comefrom
& (1 << hook
);
514 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook
, pos
, e
->comefrom
);
519 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
521 /* Unconditional return/END. */
522 if ((e
->target_offset
== sizeof(struct ip6t_entry
)
523 && (strcmp(t
->target
.u
.user
.name
,
524 IP6T_STANDARD_TARGET
) == 0)
526 && unconditional(&e
->ipv6
)) || visited
) {
527 unsigned int oldpos
, size
;
529 if (t
->verdict
< -NF_MAX_VERDICT
- 1) {
530 duprintf("mark_source_chains: bad "
531 "negative verdict (%i)\n",
536 /* Return: backtrack through the last
539 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
540 #ifdef DEBUG_IP_FIREWALL_USER
542 & (1 << NF_INET_NUMHOOKS
)) {
543 duprintf("Back unset "
550 pos
= e
->counters
.pcnt
;
551 e
->counters
.pcnt
= 0;
553 /* We're at the start. */
557 e
= (struct ip6t_entry
*)
559 } while (oldpos
== pos
+ e
->next_offset
);
562 size
= e
->next_offset
;
563 e
= (struct ip6t_entry
*)
564 (entry0
+ pos
+ size
);
565 e
->counters
.pcnt
= pos
;
568 int newpos
= t
->verdict
;
570 if (strcmp(t
->target
.u
.user
.name
,
571 IP6T_STANDARD_TARGET
) == 0
573 if (newpos
> newinfo
->size
-
574 sizeof(struct ip6t_entry
)) {
575 duprintf("mark_source_chains: "
576 "bad verdict (%i)\n",
580 /* This a jump; chase it. */
581 duprintf("Jump rule %u -> %u\n",
584 /* ... this is a fallthru */
585 newpos
= pos
+ e
->next_offset
;
587 e
= (struct ip6t_entry
*)
589 e
->counters
.pcnt
= pos
;
594 duprintf("Finished chain %u\n", hook
);
600 cleanup_match(struct ip6t_entry_match
*m
, unsigned int *i
)
602 struct xt_mtdtor_param par
;
604 if (i
&& (*i
)-- == 0)
607 par
.match
= m
->u
.kernel
.match
;
608 par
.matchinfo
= m
->data
;
609 if (par
.match
->destroy
!= NULL
)
610 par
.match
->destroy(&par
);
611 module_put(par
.match
->me
);
616 check_entry(struct ip6t_entry
*e
, const char *name
)
618 struct ip6t_entry_target
*t
;
620 if (!ip6_checkentry(&e
->ipv6
)) {
621 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
625 if (e
->target_offset
+ sizeof(struct ip6t_entry_target
) >
629 t
= ip6t_get_target(e
);
630 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
636 static int check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
,
639 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
642 par
->match
= m
->u
.kernel
.match
;
643 par
->matchinfo
= m
->data
;
645 ret
= xt_check_match(par
, NFPROTO_IPV6
, m
->u
.match_size
- sizeof(*m
),
646 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
648 duprintf("ip_tables: check failed for `%s'.\n",
657 find_check_match(struct ip6t_entry_match
*m
, struct xt_mtchk_param
*par
,
660 struct xt_match
*match
;
663 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
665 "ip6t_%s", m
->u
.user
.name
);
666 if (IS_ERR(match
) || !match
) {
667 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
668 return match
? PTR_ERR(match
) : -ENOENT
;
670 m
->u
.kernel
.match
= match
;
672 ret
= check_match(m
, par
, i
);
678 module_put(m
->u
.kernel
.match
->me
);
682 static int check_target(struct ip6t_entry
*e
, const char *name
)
684 struct ip6t_entry_target
*t
;
685 struct xt_target
*target
;
688 t
= ip6t_get_target(e
);
689 target
= t
->u
.kernel
.target
;
690 ret
= xt_check_target(target
, AF_INET6
, t
->u
.target_size
- sizeof(*t
),
691 name
, e
->comefrom
, e
->ipv6
.proto
,
692 e
->ipv6
.invflags
& IP6T_INV_PROTO
, e
, t
->data
);
694 duprintf("ip_tables: check failed for `%s'.\n",
695 t
->u
.kernel
.target
->name
);
702 find_check_entry(struct ip6t_entry
*e
, const char *name
, unsigned int size
,
705 struct ip6t_entry_target
*t
;
706 struct xt_target
*target
;
709 struct xt_mtchk_param mtpar
;
711 ret
= check_entry(e
, name
);
717 mtpar
.entryinfo
= &e
->ipv6
;
718 mtpar
.hook_mask
= e
->comefrom
;
719 ret
= IP6T_MATCH_ITERATE(e
, find_check_match
, &mtpar
, &j
);
721 goto cleanup_matches
;
723 t
= ip6t_get_target(e
);
724 target
= try_then_request_module(xt_find_target(AF_INET6
,
727 "ip6t_%s", t
->u
.user
.name
);
728 if (IS_ERR(target
) || !target
) {
729 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
730 ret
= target
? PTR_ERR(target
) : -ENOENT
;
731 goto cleanup_matches
;
733 t
->u
.kernel
.target
= target
;
735 ret
= check_target(e
, name
);
742 module_put(t
->u
.kernel
.target
->me
);
744 IP6T_MATCH_ITERATE(e
, cleanup_match
, &j
);
749 check_entry_size_and_hooks(struct ip6t_entry
*e
,
750 struct xt_table_info
*newinfo
,
752 unsigned char *limit
,
753 const unsigned int *hook_entries
,
754 const unsigned int *underflows
,
759 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0
760 || (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
761 duprintf("Bad offset %p\n", e
);
766 < sizeof(struct ip6t_entry
) + sizeof(struct ip6t_entry_target
)) {
767 duprintf("checking: element %p size %u\n",
772 /* Check hooks & underflows */
773 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
774 if ((unsigned char *)e
- base
== hook_entries
[h
])
775 newinfo
->hook_entry
[h
] = hook_entries
[h
];
776 if ((unsigned char *)e
- base
== underflows
[h
])
777 newinfo
->underflow
[h
] = underflows
[h
];
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
783 /* Clear counters and comefrom */
784 e
->counters
= ((struct xt_counters
) { 0, 0 });
792 cleanup_entry(struct ip6t_entry
*e
, unsigned int *i
)
794 struct ip6t_entry_target
*t
;
796 if (i
&& (*i
)-- == 0)
799 /* Cleanup all matches */
800 IP6T_MATCH_ITERATE(e
, cleanup_match
, NULL
);
801 t
= ip6t_get_target(e
);
802 if (t
->u
.kernel
.target
->destroy
)
803 t
->u
.kernel
.target
->destroy(t
->u
.kernel
.target
, t
->data
);
804 module_put(t
->u
.kernel
.target
->me
);
808 /* Checks and translates the user-supplied table segment (held in
811 translate_table(const char *name
,
812 unsigned int valid_hooks
,
813 struct xt_table_info
*newinfo
,
817 const unsigned int *hook_entries
,
818 const unsigned int *underflows
)
823 newinfo
->size
= size
;
824 newinfo
->number
= number
;
826 /* Init all hooks to impossible value. */
827 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
828 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
829 newinfo
->underflow
[i
] = 0xFFFFFFFF;
832 duprintf("translate_table: size %u\n", newinfo
->size
);
834 /* Walk through entries, checking offsets. */
835 ret
= IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
836 check_entry_size_and_hooks
,
840 hook_entries
, underflows
, &i
);
845 duprintf("translate_table: %u not %u entries\n",
850 /* Check hooks all assigned */
851 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
852 /* Only hooks which are valid */
853 if (!(valid_hooks
& (1 << i
)))
855 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
856 duprintf("Invalid hook entry %u %u\n",
860 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
861 duprintf("Invalid underflow %u %u\n",
867 if (!mark_source_chains(newinfo
, valid_hooks
, entry0
))
870 /* Finally, each sanity check must pass */
872 ret
= IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
873 find_check_entry
, name
, size
, &i
);
876 IP6T_ENTRY_ITERATE(entry0
, newinfo
->size
,
881 /* And one copy for every other CPU */
882 for_each_possible_cpu(i
) {
883 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
884 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
892 add_entry_to_counter(const struct ip6t_entry
*e
,
893 struct xt_counters total
[],
896 ADD_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
903 set_entry_to_counter(const struct ip6t_entry
*e
,
904 struct ip6t_counters total
[],
907 SET_COUNTER(total
[*i
], e
->counters
.bcnt
, e
->counters
.pcnt
);
914 get_counters(const struct xt_table_info
*t
,
915 struct xt_counters counters
[])
921 /* Instead of clearing (by a previous call to memset())
922 * the counters and using adds, we set the counters
923 * with data used by 'current' CPU
924 * We dont care about preemption here.
926 curcpu
= raw_smp_processor_id();
929 IP6T_ENTRY_ITERATE(t
->entries
[curcpu
],
931 set_entry_to_counter
,
935 for_each_possible_cpu(cpu
) {
939 IP6T_ENTRY_ITERATE(t
->entries
[cpu
],
941 add_entry_to_counter
,
947 static struct xt_counters
*alloc_counters(struct xt_table
*table
)
949 unsigned int countersize
;
950 struct xt_counters
*counters
;
951 const struct xt_table_info
*private = table
->private;
953 /* We need atomic snapshot of counters: rest doesn't change
954 (other than comefrom, which userspace doesn't care
956 countersize
= sizeof(struct xt_counters
) * private->number
;
957 counters
= vmalloc_node(countersize
, numa_node_id());
959 if (counters
== NULL
)
960 return ERR_PTR(-ENOMEM
);
962 /* First, sum counters... */
963 write_lock_bh(&table
->lock
);
964 get_counters(private, counters
);
965 write_unlock_bh(&table
->lock
);
971 copy_entries_to_user(unsigned int total_size
,
972 struct xt_table
*table
,
973 void __user
*userptr
)
975 unsigned int off
, num
;
976 struct ip6t_entry
*e
;
977 struct xt_counters
*counters
;
978 const struct xt_table_info
*private = table
->private;
980 const void *loc_cpu_entry
;
982 counters
= alloc_counters(table
);
983 if (IS_ERR(counters
))
984 return PTR_ERR(counters
);
986 /* choose the copy that is on our node/cpu, ...
987 * This choice is lazy (because current thread is
988 * allowed to migrate to another cpu)
990 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
991 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
996 /* FIXME: use iterator macros --RR */
997 /* ... then go back and fix counters and names */
998 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
1000 const struct ip6t_entry_match
*m
;
1001 const struct ip6t_entry_target
*t
;
1003 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
1004 if (copy_to_user(userptr
+ off
1005 + offsetof(struct ip6t_entry
, counters
),
1007 sizeof(counters
[num
])) != 0) {
1012 for (i
= sizeof(struct ip6t_entry
);
1013 i
< e
->target_offset
;
1014 i
+= m
->u
.match_size
) {
1017 if (copy_to_user(userptr
+ off
+ i
1018 + offsetof(struct ip6t_entry_match
,
1020 m
->u
.kernel
.match
->name
,
1021 strlen(m
->u
.kernel
.match
->name
)+1)
1028 t
= ip6t_get_target(e
);
1029 if (copy_to_user(userptr
+ off
+ e
->target_offset
1030 + offsetof(struct ip6t_entry_target
,
1032 t
->u
.kernel
.target
->name
,
1033 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1044 #ifdef CONFIG_COMPAT
1045 static void compat_standard_from_user(void *dst
, void *src
)
1047 int v
= *(compat_int_t
*)src
;
1050 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1051 memcpy(dst
, &v
, sizeof(v
));
1054 static int compat_standard_to_user(void __user
*dst
, void *src
)
1056 compat_int_t cv
= *(int *)src
;
1059 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1060 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1064 compat_calc_match(struct ip6t_entry_match
*m
, int *size
)
1066 *size
+= xt_compat_match_offset(m
->u
.kernel
.match
);
1070 static int compat_calc_entry(struct ip6t_entry
*e
,
1071 const struct xt_table_info
*info
,
1072 void *base
, struct xt_table_info
*newinfo
)
1074 struct ip6t_entry_target
*t
;
1075 unsigned int entry_offset
;
1078 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1079 entry_offset
= (void *)e
- base
;
1080 IP6T_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1081 t
= ip6t_get_target(e
);
1082 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1083 newinfo
->size
-= off
;
1084 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1088 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1089 if (info
->hook_entry
[i
] &&
1090 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1091 newinfo
->hook_entry
[i
] -= off
;
1092 if (info
->underflow
[i
] &&
1093 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1094 newinfo
->underflow
[i
] -= off
;
1099 static int compat_table_info(const struct xt_table_info
*info
,
1100 struct xt_table_info
*newinfo
)
1102 void *loc_cpu_entry
;
1104 if (!newinfo
|| !info
)
1107 /* we dont care about newinfo->entries[] */
1108 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1109 newinfo
->initial_entries
= 0;
1110 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1111 return IP6T_ENTRY_ITERATE(loc_cpu_entry
, info
->size
,
1112 compat_calc_entry
, info
, loc_cpu_entry
,
1117 static int get_info(struct net
*net
, void __user
*user
, int *len
, int compat
)
1119 char name
[IP6T_TABLE_MAXNAMELEN
];
1123 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1124 duprintf("length %u != %zu\n", *len
,
1125 sizeof(struct ip6t_getinfo
));
1129 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1132 name
[IP6T_TABLE_MAXNAMELEN
-1] = '\0';
1133 #ifdef CONFIG_COMPAT
1135 xt_compat_lock(AF_INET6
);
1137 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1138 "ip6table_%s", name
);
1139 if (t
&& !IS_ERR(t
)) {
1140 struct ip6t_getinfo info
;
1141 const struct xt_table_info
*private = t
->private;
1143 #ifdef CONFIG_COMPAT
1145 struct xt_table_info tmp
;
1146 ret
= compat_table_info(private, &tmp
);
1147 xt_compat_flush_offsets(AF_INET6
);
1151 info
.valid_hooks
= t
->valid_hooks
;
1152 memcpy(info
.hook_entry
, private->hook_entry
,
1153 sizeof(info
.hook_entry
));
1154 memcpy(info
.underflow
, private->underflow
,
1155 sizeof(info
.underflow
));
1156 info
.num_entries
= private->number
;
1157 info
.size
= private->size
;
1158 strcpy(info
.name
, name
);
1160 if (copy_to_user(user
, &info
, *len
) != 0)
1168 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1169 #ifdef CONFIG_COMPAT
1171 xt_compat_unlock(AF_INET6
);
1177 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
, int *len
)
1180 struct ip6t_get_entries get
;
1183 if (*len
< sizeof(get
)) {
1184 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1187 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1189 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1190 duprintf("get_entries: %u != %zu\n",
1191 *len
, sizeof(get
) + get
.size
);
1195 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1196 if (t
&& !IS_ERR(t
)) {
1197 struct xt_table_info
*private = t
->private;
1198 duprintf("t->private->number = %u\n", private->number
);
1199 if (get
.size
== private->size
)
1200 ret
= copy_entries_to_user(private->size
,
1201 t
, uptr
->entrytable
);
1203 duprintf("get_entries: I've got %u not %u!\n",
1204 private->size
, get
.size
);
1210 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1216 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1217 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1218 void __user
*counters_ptr
)
1222 struct xt_table_info
*oldinfo
;
1223 struct xt_counters
*counters
;
1224 const void *loc_cpu_old_entry
;
1227 counters
= vmalloc_node(num_counters
* sizeof(struct xt_counters
),
1234 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1235 "ip6table_%s", name
);
1236 if (!t
|| IS_ERR(t
)) {
1237 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1238 goto free_newinfo_counters_untrans
;
1242 if (valid_hooks
!= t
->valid_hooks
) {
1243 duprintf("Valid hook crap: %08X vs %08X\n",
1244 valid_hooks
, t
->valid_hooks
);
1249 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1253 /* Update module usage count based on number of rules */
1254 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1255 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1256 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1257 (newinfo
->number
<= oldinfo
->initial_entries
))
1259 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1260 (newinfo
->number
<= oldinfo
->initial_entries
))
1263 /* Get the old counters. */
1264 get_counters(oldinfo
, counters
);
1265 /* Decrease module usage counts and free resource */
1266 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1267 IP6T_ENTRY_ITERATE(loc_cpu_old_entry
, oldinfo
->size
, cleanup_entry
,
1269 xt_free_table_info(oldinfo
);
1270 if (copy_to_user(counters_ptr
, counters
,
1271 sizeof(struct xt_counters
) * num_counters
) != 0)
1280 free_newinfo_counters_untrans
:
1287 do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1290 struct ip6t_replace tmp
;
1291 struct xt_table_info
*newinfo
;
1292 void *loc_cpu_entry
;
1294 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1297 /* overflow check */
1298 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1301 newinfo
= xt_alloc_table_info(tmp
.size
);
1305 /* choose the copy that is on our node/cpu */
1306 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1307 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1313 ret
= translate_table(tmp
.name
, tmp
.valid_hooks
,
1314 newinfo
, loc_cpu_entry
, tmp
.size
, tmp
.num_entries
,
1315 tmp
.hook_entry
, tmp
.underflow
);
1319 duprintf("ip_tables: Translated table\n");
1321 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1322 tmp
.num_counters
, tmp
.counters
);
1324 goto free_newinfo_untrans
;
1327 free_newinfo_untrans
:
1328 IP6T_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1330 xt_free_table_info(newinfo
);
1334 /* We're lazy, and add to the first CPU; overflow works its fey magic
1335 * and everything is OK. */
1337 add_counter_to_entry(struct ip6t_entry
*e
,
1338 const struct xt_counters addme
[],
1342 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1344 (long unsigned int)e
->counters
.pcnt
,
1345 (long unsigned int)e
->counters
.bcnt
,
1346 (long unsigned int)addme
[*i
].pcnt
,
1347 (long unsigned int)addme
[*i
].bcnt
);
1350 ADD_COUNTER(e
->counters
, addme
[*i
].bcnt
, addme
[*i
].pcnt
);
1357 do_add_counters(struct net
*net
, void __user
*user
, unsigned int len
,
1361 struct xt_counters_info tmp
;
1362 struct xt_counters
*paddc
;
1363 unsigned int num_counters
;
1368 const struct xt_table_info
*private;
1370 const void *loc_cpu_entry
;
1371 #ifdef CONFIG_COMPAT
1372 struct compat_xt_counters_info compat_tmp
;
1376 size
= sizeof(struct compat_xt_counters_info
);
1381 size
= sizeof(struct xt_counters_info
);
1384 if (copy_from_user(ptmp
, user
, size
) != 0)
1387 #ifdef CONFIG_COMPAT
1389 num_counters
= compat_tmp
.num_counters
;
1390 name
= compat_tmp
.name
;
1394 num_counters
= tmp
.num_counters
;
1398 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1401 paddc
= vmalloc_node(len
- size
, numa_node_id());
1405 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1410 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1411 if (!t
|| IS_ERR(t
)) {
1412 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1416 write_lock_bh(&t
->lock
);
1417 private = t
->private;
1418 if (private->number
!= num_counters
) {
1420 goto unlock_up_free
;
1424 /* Choose the copy that is on our node */
1425 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1426 IP6T_ENTRY_ITERATE(loc_cpu_entry
,
1428 add_counter_to_entry
,
1432 write_unlock_bh(&t
->lock
);
1441 #ifdef CONFIG_COMPAT
1442 struct compat_ip6t_replace
{
1443 char name
[IP6T_TABLE_MAXNAMELEN
];
1447 u32 hook_entry
[NF_INET_NUMHOOKS
];
1448 u32 underflow
[NF_INET_NUMHOOKS
];
1450 compat_uptr_t counters
; /* struct ip6t_counters * */
1451 struct compat_ip6t_entry entries
[0];
1455 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1456 unsigned int *size
, struct xt_counters
*counters
,
1459 struct ip6t_entry_target
*t
;
1460 struct compat_ip6t_entry __user
*ce
;
1461 u_int16_t target_offset
, next_offset
;
1462 compat_uint_t origsize
;
1467 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1468 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)))
1471 if (copy_to_user(&ce
->counters
, &counters
[*i
], sizeof(counters
[*i
])))
1474 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1475 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1477 ret
= IP6T_MATCH_ITERATE(e
, xt_compat_match_to_user
, dstptr
, size
);
1478 target_offset
= e
->target_offset
- (origsize
- *size
);
1481 t
= ip6t_get_target(e
);
1482 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1486 next_offset
= e
->next_offset
- (origsize
- *size
);
1487 if (put_user(target_offset
, &ce
->target_offset
))
1489 if (put_user(next_offset
, &ce
->next_offset
))
1499 compat_find_calc_match(struct ip6t_entry_match
*m
,
1501 const struct ip6t_ip6
*ipv6
,
1502 unsigned int hookmask
,
1503 int *size
, unsigned int *i
)
1505 struct xt_match
*match
;
1507 match
= try_then_request_module(xt_find_match(AF_INET6
, m
->u
.user
.name
,
1508 m
->u
.user
.revision
),
1509 "ip6t_%s", m
->u
.user
.name
);
1510 if (IS_ERR(match
) || !match
) {
1511 duprintf("compat_check_calc_match: `%s' not found\n",
1513 return match
? PTR_ERR(match
) : -ENOENT
;
1515 m
->u
.kernel
.match
= match
;
1516 *size
+= xt_compat_match_offset(match
);
1523 compat_release_match(struct ip6t_entry_match
*m
, unsigned int *i
)
1525 if (i
&& (*i
)-- == 0)
1528 module_put(m
->u
.kernel
.match
->me
);
1533 compat_release_entry(struct compat_ip6t_entry
*e
, unsigned int *i
)
1535 struct ip6t_entry_target
*t
;
1537 if (i
&& (*i
)-- == 0)
1540 /* Cleanup all matches */
1541 COMPAT_IP6T_MATCH_ITERATE(e
, compat_release_match
, NULL
);
1542 t
= compat_ip6t_get_target(e
);
1543 module_put(t
->u
.kernel
.target
->me
);
1548 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1549 struct xt_table_info
*newinfo
,
1551 unsigned char *base
,
1552 unsigned char *limit
,
1553 unsigned int *hook_entries
,
1554 unsigned int *underflows
,
1558 struct ip6t_entry_target
*t
;
1559 struct xt_target
*target
;
1560 unsigned int entry_offset
;
1564 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1565 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0
1566 || (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1567 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1571 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1572 sizeof(struct compat_xt_entry_target
)) {
1573 duprintf("checking: element %p size %u\n",
1578 /* For purposes of check_entry casting the compat entry is fine */
1579 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1583 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1584 entry_offset
= (void *)e
- (void *)base
;
1586 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, compat_find_calc_match
, name
,
1587 &e
->ipv6
, e
->comefrom
, &off
, &j
);
1589 goto release_matches
;
1591 t
= compat_ip6t_get_target(e
);
1592 target
= try_then_request_module(xt_find_target(AF_INET6
,
1594 t
->u
.user
.revision
),
1595 "ip6t_%s", t
->u
.user
.name
);
1596 if (IS_ERR(target
) || !target
) {
1597 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1599 ret
= target
? PTR_ERR(target
) : -ENOENT
;
1600 goto release_matches
;
1602 t
->u
.kernel
.target
= target
;
1604 off
+= xt_compat_target_offset(target
);
1606 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1610 /* Check hooks & underflows */
1611 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1612 if ((unsigned char *)e
- base
== hook_entries
[h
])
1613 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1614 if ((unsigned char *)e
- base
== underflows
[h
])
1615 newinfo
->underflow
[h
] = underflows
[h
];
1618 /* Clear counters and comefrom */
1619 memset(&e
->counters
, 0, sizeof(e
->counters
));
1626 module_put(t
->u
.kernel
.target
->me
);
1628 IP6T_MATCH_ITERATE(e
, compat_release_match
, &j
);
1633 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1634 unsigned int *size
, const char *name
,
1635 struct xt_table_info
*newinfo
, unsigned char *base
)
1637 struct ip6t_entry_target
*t
;
1638 struct xt_target
*target
;
1639 struct ip6t_entry
*de
;
1640 unsigned int origsize
;
1645 de
= (struct ip6t_entry
*)*dstptr
;
1646 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1647 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1649 *dstptr
+= sizeof(struct ip6t_entry
);
1650 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1652 ret
= COMPAT_IP6T_MATCH_ITERATE(e
, xt_compat_match_from_user
,
1656 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1657 t
= compat_ip6t_get_target(e
);
1658 target
= t
->u
.kernel
.target
;
1659 xt_compat_target_from_user(t
, dstptr
, size
);
1661 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1662 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1663 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1664 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1665 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1666 newinfo
->underflow
[h
] -= origsize
- *size
;
1671 static int compat_check_entry(struct ip6t_entry
*e
, const char *name
,
1676 struct xt_mtchk_param mtpar
;
1680 mtpar
.entryinfo
= &e
->ipv6
;
1681 mtpar
.hook_mask
= e
->comefrom
;
1682 ret
= IP6T_MATCH_ITERATE(e
, check_match
, &mtpar
, &j
);
1684 goto cleanup_matches
;
1686 ret
= check_target(e
, name
);
1688 goto cleanup_matches
;
1694 IP6T_MATCH_ITERATE(e
, cleanup_match
, &j
);
1699 translate_compat_table(const char *name
,
1700 unsigned int valid_hooks
,
1701 struct xt_table_info
**pinfo
,
1703 unsigned int total_size
,
1704 unsigned int number
,
1705 unsigned int *hook_entries
,
1706 unsigned int *underflows
)
1709 struct xt_table_info
*newinfo
, *info
;
1710 void *pos
, *entry0
, *entry1
;
1717 info
->number
= number
;
1719 /* Init all hooks to impossible value. */
1720 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1721 info
->hook_entry
[i
] = 0xFFFFFFFF;
1722 info
->underflow
[i
] = 0xFFFFFFFF;
1725 duprintf("translate_compat_table: size %u\n", info
->size
);
1727 xt_compat_lock(AF_INET6
);
1728 /* Walk through entries, checking offsets. */
1729 ret
= COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
,
1730 check_compat_entry_size_and_hooks
,
1731 info
, &size
, entry0
,
1732 entry0
+ total_size
,
1733 hook_entries
, underflows
, &j
, name
);
1739 duprintf("translate_compat_table: %u not %u entries\n",
1744 /* Check hooks all assigned */
1745 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1746 /* Only hooks which are valid */
1747 if (!(valid_hooks
& (1 << i
)))
1749 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1750 duprintf("Invalid hook entry %u %u\n",
1751 i
, hook_entries
[i
]);
1754 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1755 duprintf("Invalid underflow %u %u\n",
1762 newinfo
= xt_alloc_table_info(size
);
1766 newinfo
->number
= number
;
1767 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1768 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1769 newinfo
->underflow
[i
] = info
->underflow
[i
];
1771 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1774 ret
= COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
,
1775 compat_copy_entry_from_user
,
1776 &pos
, &size
, name
, newinfo
, entry1
);
1777 xt_compat_flush_offsets(AF_INET6
);
1778 xt_compat_unlock(AF_INET6
);
1783 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1787 ret
= IP6T_ENTRY_ITERATE(entry1
, newinfo
->size
, compat_check_entry
,
1791 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0
, newinfo
->size
, i
,
1792 compat_release_entry
, &j
);
1793 IP6T_ENTRY_ITERATE(entry1
, newinfo
->size
, cleanup_entry
, &i
);
1794 xt_free_table_info(newinfo
);
1798 /* And one copy for every other CPU */
1799 for_each_possible_cpu(i
)
1800 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1801 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1805 xt_free_table_info(info
);
1809 xt_free_table_info(newinfo
);
1811 COMPAT_IP6T_ENTRY_ITERATE(entry0
, total_size
, compat_release_entry
, &j
);
1814 xt_compat_flush_offsets(AF_INET6
);
1815 xt_compat_unlock(AF_INET6
);
1820 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1823 struct compat_ip6t_replace tmp
;
1824 struct xt_table_info
*newinfo
;
1825 void *loc_cpu_entry
;
1827 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1830 /* overflow check */
1831 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1833 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1836 newinfo
= xt_alloc_table_info(tmp
.size
);
1840 /* choose the copy that is on our node/cpu */
1841 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1842 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1848 ret
= translate_compat_table(tmp
.name
, tmp
.valid_hooks
,
1849 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1850 tmp
.num_entries
, tmp
.hook_entry
,
1855 duprintf("compat_do_replace: Translated table\n");
1857 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1858 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1860 goto free_newinfo_untrans
;
1863 free_newinfo_untrans
:
1864 IP6T_ENTRY_ITERATE(loc_cpu_entry
, newinfo
->size
, cleanup_entry
, NULL
);
1866 xt_free_table_info(newinfo
);
1871 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1876 if (!capable(CAP_NET_ADMIN
))
1880 case IP6T_SO_SET_REPLACE
:
1881 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1884 case IP6T_SO_SET_ADD_COUNTERS
:
1885 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1889 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1896 struct compat_ip6t_get_entries
{
1897 char name
[IP6T_TABLE_MAXNAMELEN
];
1899 struct compat_ip6t_entry entrytable
[0];
1903 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1904 void __user
*userptr
)
1906 struct xt_counters
*counters
;
1907 const struct xt_table_info
*private = table
->private;
1911 const void *loc_cpu_entry
;
1914 counters
= alloc_counters(table
);
1915 if (IS_ERR(counters
))
1916 return PTR_ERR(counters
);
1918 /* choose the copy that is on our node/cpu, ...
1919 * This choice is lazy (because current thread is
1920 * allowed to migrate to another cpu)
1922 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1925 ret
= IP6T_ENTRY_ITERATE(loc_cpu_entry
, total_size
,
1926 compat_copy_entry_to_user
,
1927 &pos
, &size
, counters
, &i
);
1934 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1938 struct compat_ip6t_get_entries get
;
1941 if (*len
< sizeof(get
)) {
1942 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1946 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1949 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1950 duprintf("compat_get_entries: %u != %zu\n",
1951 *len
, sizeof(get
) + get
.size
);
1955 xt_compat_lock(AF_INET6
);
1956 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1957 if (t
&& !IS_ERR(t
)) {
1958 const struct xt_table_info
*private = t
->private;
1959 struct xt_table_info info
;
1960 duprintf("t->private->number = %u\n", private->number
);
1961 ret
= compat_table_info(private, &info
);
1962 if (!ret
&& get
.size
== info
.size
) {
1963 ret
= compat_copy_entries_to_user(private->size
,
1964 t
, uptr
->entrytable
);
1966 duprintf("compat_get_entries: I've got %u not %u!\n",
1967 private->size
, get
.size
);
1970 xt_compat_flush_offsets(AF_INET6
);
1974 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1976 xt_compat_unlock(AF_INET6
);
1980 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1983 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1987 if (!capable(CAP_NET_ADMIN
))
1991 case IP6T_SO_GET_INFO
:
1992 ret
= get_info(sock_net(sk
), user
, len
, 1);
1994 case IP6T_SO_GET_ENTRIES
:
1995 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1998 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2005 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2009 if (!capable(CAP_NET_ADMIN
))
2013 case IP6T_SO_SET_REPLACE
:
2014 ret
= do_replace(sock_net(sk
), user
, len
);
2017 case IP6T_SO_SET_ADD_COUNTERS
:
2018 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2022 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2030 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2034 if (!capable(CAP_NET_ADMIN
))
2038 case IP6T_SO_GET_INFO
:
2039 ret
= get_info(sock_net(sk
), user
, len
, 0);
2042 case IP6T_SO_GET_ENTRIES
:
2043 ret
= get_entries(sock_net(sk
), user
, len
);
2046 case IP6T_SO_GET_REVISION_MATCH
:
2047 case IP6T_SO_GET_REVISION_TARGET
: {
2048 struct ip6t_get_revision rev
;
2051 if (*len
!= sizeof(rev
)) {
2055 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2060 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2065 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2068 "ip6t_%s", rev
.name
);
2073 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2080 struct xt_table
*ip6t_register_table(struct net
*net
, struct xt_table
*table
,
2081 const struct ip6t_replace
*repl
)
2084 struct xt_table_info
*newinfo
;
2085 struct xt_table_info bootstrap
2086 = { 0, 0, 0, { 0 }, { 0 }, { } };
2087 void *loc_cpu_entry
;
2088 struct xt_table
*new_table
;
2090 newinfo
= xt_alloc_table_info(repl
->size
);
2096 /* choose the copy on our node/cpu, but dont care about preemption */
2097 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2098 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2100 ret
= translate_table(table
->name
, table
->valid_hooks
,
2101 newinfo
, loc_cpu_entry
, repl
->size
,
2108 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2109 if (IS_ERR(new_table
)) {
2110 ret
= PTR_ERR(new_table
);
2116 xt_free_table_info(newinfo
);
2118 return ERR_PTR(ret
);
2121 void ip6t_unregister_table(struct xt_table
*table
)
2123 struct xt_table_info
*private;
2124 void *loc_cpu_entry
;
2125 struct module
*table_owner
= table
->me
;
2127 private = xt_unregister_table(table
);
2129 /* Decrease module usage counts and free resources */
2130 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2131 IP6T_ENTRY_ITERATE(loc_cpu_entry
, private->size
, cleanup_entry
, NULL
);
2132 if (private->number
> private->initial_entries
)
2133 module_put(table_owner
);
2134 xt_free_table_info(private);
2137 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2139 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2140 u_int8_t type
, u_int8_t code
,
2143 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2148 icmp6_match(const struct sk_buff
*skb
, const struct xt_match_param
*par
)
2150 const struct icmp6hdr
*ic
;
2151 struct icmp6hdr _icmph
;
2152 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2154 /* Must not be a fragment. */
2155 if (par
->fragoff
!= 0)
2158 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2160 /* We've been asked to examine this packet, and we
2161 * can't. Hence, no choice but to drop.
2163 duprintf("Dropping evil ICMP tinygram.\n");
2164 *par
->hotdrop
= true;
2168 return icmp6_type_code_match(icmpinfo
->type
,
2171 ic
->icmp6_type
, ic
->icmp6_code
,
2172 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2175 /* Called when user tries to insert an entry of this type. */
2176 static bool icmp6_checkentry(const struct xt_mtchk_param
*par
)
2178 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2180 /* Must specify no unknown invflags */
2181 return !(icmpinfo
->invflags
& ~IP6T_ICMP_INV
);
2184 /* The built-in targets: standard (NULL) and error. */
2185 static struct xt_target ip6t_standard_target __read_mostly
= {
2186 .name
= IP6T_STANDARD_TARGET
,
2187 .targetsize
= sizeof(int),
2189 #ifdef CONFIG_COMPAT
2190 .compatsize
= sizeof(compat_int_t
),
2191 .compat_from_user
= compat_standard_from_user
,
2192 .compat_to_user
= compat_standard_to_user
,
2196 static struct xt_target ip6t_error_target __read_mostly
= {
2197 .name
= IP6T_ERROR_TARGET
,
2198 .target
= ip6t_error
,
2199 .targetsize
= IP6T_FUNCTION_MAXNAMELEN
,
2203 static struct nf_sockopt_ops ip6t_sockopts
= {
2205 .set_optmin
= IP6T_BASE_CTL
,
2206 .set_optmax
= IP6T_SO_SET_MAX
+1,
2207 .set
= do_ip6t_set_ctl
,
2208 #ifdef CONFIG_COMPAT
2209 .compat_set
= compat_do_ip6t_set_ctl
,
2211 .get_optmin
= IP6T_BASE_CTL
,
2212 .get_optmax
= IP6T_SO_GET_MAX
+1,
2213 .get
= do_ip6t_get_ctl
,
2214 #ifdef CONFIG_COMPAT
2215 .compat_get
= compat_do_ip6t_get_ctl
,
2217 .owner
= THIS_MODULE
,
2220 static struct xt_match icmp6_matchstruct __read_mostly
= {
2222 .match
= icmp6_match
,
2223 .matchsize
= sizeof(struct ip6t_icmp
),
2224 .checkentry
= icmp6_checkentry
,
2225 .proto
= IPPROTO_ICMPV6
,
2229 static int __net_init
ip6_tables_net_init(struct net
*net
)
2231 return xt_proto_init(net
, AF_INET6
);
2234 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2236 xt_proto_fini(net
, AF_INET6
);
2239 static struct pernet_operations ip6_tables_net_ops
= {
2240 .init
= ip6_tables_net_init
,
2241 .exit
= ip6_tables_net_exit
,
2244 static int __init
ip6_tables_init(void)
2248 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2252 /* Noone else will be downing sem now, so we won't sleep */
2253 ret
= xt_register_target(&ip6t_standard_target
);
2256 ret
= xt_register_target(&ip6t_error_target
);
2259 ret
= xt_register_match(&icmp6_matchstruct
);
2263 /* Register setsockopt */
2264 ret
= nf_register_sockopt(&ip6t_sockopts
);
2268 printk(KERN_INFO
"ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2272 xt_unregister_match(&icmp6_matchstruct
);
2274 xt_unregister_target(&ip6t_error_target
);
2276 xt_unregister_target(&ip6t_standard_target
);
2278 unregister_pernet_subsys(&ip6_tables_net_ops
);
2283 static void __exit
ip6_tables_fini(void)
2285 nf_unregister_sockopt(&ip6t_sockopts
);
2287 xt_unregister_match(&icmp6_matchstruct
);
2288 xt_unregister_target(&ip6t_error_target
);
2289 xt_unregister_target(&ip6t_standard_target
);
2291 unregister_pernet_subsys(&ip6_tables_net_ops
);
2295 * find the offset to specified header or the protocol number of last header
2296 * if target < 0. "last header" is transport protocol header, ESP, or
2299 * If target header is found, its offset is set in *offset and return protocol
2300 * number. Otherwise, return -1.
2302 * If the first fragment doesn't contain the final protocol header or
2303 * NEXTHDR_NONE it is considered invalid.
2305 * Note that non-1st fragment is special case that "the protocol number
2306 * of last header" is "next header" field in Fragment header. In this case,
2307 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2311 int ipv6_find_hdr(const struct sk_buff
*skb
, unsigned int *offset
,
2312 int target
, unsigned short *fragoff
)
2314 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
2315 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
2316 unsigned int len
= skb
->len
- start
;
2321 while (nexthdr
!= target
) {
2322 struct ipv6_opt_hdr _hdr
, *hp
;
2323 unsigned int hdrlen
;
2325 if ((!ipv6_ext_hdr(nexthdr
)) || nexthdr
== NEXTHDR_NONE
) {
2331 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
2334 if (nexthdr
== NEXTHDR_FRAGMENT
) {
2335 unsigned short _frag_off
;
2337 fp
= skb_header_pointer(skb
,
2338 start
+offsetof(struct frag_hdr
,
2345 _frag_off
= ntohs(*fp
) & ~0x7;
2348 ((!ipv6_ext_hdr(hp
->nexthdr
)) ||
2349 hp
->nexthdr
== NEXTHDR_NONE
)) {
2351 *fragoff
= _frag_off
;
2357 } else if (nexthdr
== NEXTHDR_AUTH
)
2358 hdrlen
= (hp
->hdrlen
+ 2) << 2;
2360 hdrlen
= ipv6_optlen(hp
);
2362 nexthdr
= hp
->nexthdr
;
2371 EXPORT_SYMBOL(ip6t_register_table
);
2372 EXPORT_SYMBOL(ip6t_unregister_table
);
2373 EXPORT_SYMBOL(ip6t_do_table
);
2374 EXPORT_SYMBOL(ip6t_ext_hdr
);
2375 EXPORT_SYMBOL(ipv6_find_hdr
);
2377 module_init(ip6_tables_init
);
2378 module_exit(ip6_tables_fini
);