]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv4/netfilter/ip_tables.c
[NETFILTER]: Parenthesize macro parameters
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
26
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
30
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
34
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
38
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
41 #else
42 #define dprintf(format, args...)
43 #endif
44
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
47 #else
48 #define duprintf(format, args...)
49 #endif
50
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
53 do { \
54 if (!(x)) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __FUNCTION__, __FILE__, __LINE__); \
57 } while(0)
58 #else
59 #define IP_NF_ASSERT(x)
60 #endif
61
62 #if 0
63 /* All the better to debug you with... */
64 #define static
65 #define inline
66 #endif
67
68 /*
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
74
75 Hence the start of any table is given by get_table() below. */
76
77 /* Returns whether matches rule or not. */
78 static inline bool
79 ip_packet_match(const struct iphdr *ip,
80 const char *indev,
81 const char *outdev,
82 const struct ipt_ip *ipinfo,
83 int isfrag)
84 {
85 size_t i;
86 unsigned long ret;
87
88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
89
90 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 IPT_INV_SRCIP)
92 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 IPT_INV_DSTIP)) {
94 dprintf("Source or dest mismatch.\n");
95
96 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ip->saddr),
98 NIPQUAD(ipinfo->smsk.s_addr),
99 NIPQUAD(ipinfo->src.s_addr),
100 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
101 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ip->daddr),
103 NIPQUAD(ipinfo->dmsk.s_addr),
104 NIPQUAD(ipinfo->dst.s_addr),
105 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
106 return false;
107 }
108
109 /* Look for ifname matches; this should unroll nicely. */
110 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
111 ret |= (((const unsigned long *)indev)[i]
112 ^ ((const unsigned long *)ipinfo->iniface)[i])
113 & ((const unsigned long *)ipinfo->iniface_mask)[i];
114 }
115
116 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
117 dprintf("VIA in mismatch (%s vs %s).%s\n",
118 indev, ipinfo->iniface,
119 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
120 return false;
121 }
122
123 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
124 ret |= (((const unsigned long *)outdev)[i]
125 ^ ((const unsigned long *)ipinfo->outiface)[i])
126 & ((const unsigned long *)ipinfo->outiface_mask)[i];
127 }
128
129 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
130 dprintf("VIA out mismatch (%s vs %s).%s\n",
131 outdev, ipinfo->outiface,
132 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
133 return false;
134 }
135
136 /* Check specific protocol */
137 if (ipinfo->proto
138 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
139 dprintf("Packet protocol %hi does not match %hi.%s\n",
140 ip->protocol, ipinfo->proto,
141 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
142 return false;
143 }
144
145 /* If we have a fragment rule but the packet is not a fragment
146 * then we return zero */
147 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
148 dprintf("Fragment rule but not fragment.%s\n",
149 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
150 return false;
151 }
152
153 return true;
154 }
155
156 static inline bool
157 ip_checkentry(const struct ipt_ip *ip)
158 {
159 if (ip->flags & ~IPT_F_MASK) {
160 duprintf("Unknown flag bits set: %08X\n",
161 ip->flags & ~IPT_F_MASK);
162 return false;
163 }
164 if (ip->invflags & ~IPT_INV_MASK) {
165 duprintf("Unknown invflag bits set: %08X\n",
166 ip->invflags & ~IPT_INV_MASK);
167 return false;
168 }
169 return true;
170 }
171
172 static unsigned int
173 ipt_error(struct sk_buff *skb,
174 const struct net_device *in,
175 const struct net_device *out,
176 unsigned int hooknum,
177 const struct xt_target *target,
178 const void *targinfo)
179 {
180 if (net_ratelimit())
181 printk("ip_tables: error: `%s'\n", (char *)targinfo);
182
183 return NF_DROP;
184 }
185
186 static inline
187 bool do_match(struct ipt_entry_match *m,
188 const struct sk_buff *skb,
189 const struct net_device *in,
190 const struct net_device *out,
191 int offset,
192 bool *hotdrop)
193 {
194 /* Stop iteration if it doesn't match */
195 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
196 offset, ip_hdrlen(skb), hotdrop))
197 return true;
198 else
199 return false;
200 }
201
202 static inline struct ipt_entry *
203 get_entry(void *base, unsigned int offset)
204 {
205 return (struct ipt_entry *)(base + offset);
206 }
207
208 /* All zeroes == unconditional rule. */
209 static inline int
210 unconditional(const struct ipt_ip *ip)
211 {
212 unsigned int i;
213
214 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
215 if (((__u32 *)ip)[i])
216 return 0;
217
218 return 1;
219 #undef FWINV
220 }
221
222 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
223 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
224 static const char *hooknames[] = {
225 [NF_INET_PRE_ROUTING] = "PREROUTING",
226 [NF_INET_LOCAL_IN] = "INPUT",
227 [NF_INET_FORWARD] = "FORWARD",
228 [NF_INET_LOCAL_OUT] = "OUTPUT",
229 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 };
231
232 enum nf_ip_trace_comments {
233 NF_IP_TRACE_COMMENT_RULE,
234 NF_IP_TRACE_COMMENT_RETURN,
235 NF_IP_TRACE_COMMENT_POLICY,
236 };
237
238 static const char *comments[] = {
239 [NF_IP_TRACE_COMMENT_RULE] = "rule",
240 [NF_IP_TRACE_COMMENT_RETURN] = "return",
241 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 };
243
244 static struct nf_loginfo trace_loginfo = {
245 .type = NF_LOG_TYPE_LOG,
246 .u = {
247 .log = {
248 .level = 4,
249 .logflags = NF_LOG_MASK,
250 },
251 },
252 };
253
254 static inline int
255 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
256 char *hookname, char **chainname,
257 char **comment, unsigned int *rulenum)
258 {
259 struct ipt_standard_target *t = (void *)ipt_get_target(s);
260
261 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
262 /* Head of user chain: ERROR target with chainname */
263 *chainname = t->target.data;
264 (*rulenum) = 0;
265 } else if (s == e) {
266 (*rulenum)++;
267
268 if (s->target_offset == sizeof(struct ipt_entry)
269 && strcmp(t->target.u.kernel.target->name,
270 IPT_STANDARD_TARGET) == 0
271 && t->verdict < 0
272 && unconditional(&s->ip)) {
273 /* Tail of chains: STANDARD target (return/policy) */
274 *comment = *chainname == hookname
275 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
276 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
277 }
278 return 1;
279 } else
280 (*rulenum)++;
281
282 return 0;
283 }
284
285 static void trace_packet(struct sk_buff *skb,
286 unsigned int hook,
287 const struct net_device *in,
288 const struct net_device *out,
289 char *tablename,
290 struct xt_table_info *private,
291 struct ipt_entry *e)
292 {
293 void *table_base;
294 struct ipt_entry *root;
295 char *hookname, *chainname, *comment;
296 unsigned int rulenum = 0;
297
298 table_base = (void *)private->entries[smp_processor_id()];
299 root = get_entry(table_base, private->hook_entry[hook]);
300
301 hookname = chainname = (char *)hooknames[hook];
302 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
303
304 IPT_ENTRY_ITERATE(root,
305 private->size - private->hook_entry[hook],
306 get_chainname_rulenum,
307 e, hookname, &chainname, &comment, &rulenum);
308
309 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
310 "TRACE: %s:%s:%s:%u ",
311 tablename, chainname, comment, rulenum);
312 }
313 #endif
314
315 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
316 unsigned int
317 ipt_do_table(struct sk_buff *skb,
318 unsigned int hook,
319 const struct net_device *in,
320 const struct net_device *out,
321 struct xt_table *table)
322 {
323 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
324 u_int16_t offset;
325 struct iphdr *ip;
326 u_int16_t datalen;
327 bool hotdrop = false;
328 /* Initializing verdict to NF_DROP keeps gcc happy. */
329 unsigned int verdict = NF_DROP;
330 const char *indev, *outdev;
331 void *table_base;
332 struct ipt_entry *e, *back;
333 struct xt_table_info *private;
334
335 /* Initialization */
336 ip = ip_hdr(skb);
337 datalen = skb->len - ip->ihl * 4;
338 indev = in ? in->name : nulldevname;
339 outdev = out ? out->name : nulldevname;
340 /* We handle fragments by dealing with the first fragment as
341 * if it was a normal packet. All other fragments are treated
342 * normally, except that they will NEVER match rules that ask
343 * things we don't know, ie. tcp syn flag or ports). If the
344 * rule is also a fragment-specific rule, non-fragments won't
345 * match it. */
346 offset = ntohs(ip->frag_off) & IP_OFFSET;
347
348 read_lock_bh(&table->lock);
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 private = table->private;
351 table_base = (void *)private->entries[smp_processor_id()];
352 e = get_entry(table_base, private->hook_entry[hook]);
353
354 /* For return from builtin chain */
355 back = get_entry(table_base, private->underflow[hook]);
356
357 do {
358 IP_NF_ASSERT(e);
359 IP_NF_ASSERT(back);
360 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
361 struct ipt_entry_target *t;
362
363 if (IPT_MATCH_ITERATE(e, do_match,
364 skb, in, out,
365 offset, &hotdrop) != 0)
366 goto no_match;
367
368 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
369
370 t = ipt_get_target(e);
371 IP_NF_ASSERT(t->u.kernel.target);
372
373 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
374 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
375 /* The packet is traced: log it */
376 if (unlikely(skb->nf_trace))
377 trace_packet(skb, hook, in, out,
378 table->name, private, e);
379 #endif
380 /* Standard target? */
381 if (!t->u.kernel.target->target) {
382 int v;
383
384 v = ((struct ipt_standard_target *)t)->verdict;
385 if (v < 0) {
386 /* Pop from stack? */
387 if (v != IPT_RETURN) {
388 verdict = (unsigned)(-v) - 1;
389 break;
390 }
391 e = back;
392 back = get_entry(table_base,
393 back->comefrom);
394 continue;
395 }
396 if (table_base + v != (void *)e + e->next_offset
397 && !(e->ip.flags & IPT_F_GOTO)) {
398 /* Save old back ptr in next entry */
399 struct ipt_entry *next
400 = (void *)e + e->next_offset;
401 next->comefrom
402 = (void *)back - table_base;
403 /* set back pointer to next entry */
404 back = next;
405 }
406
407 e = get_entry(table_base, v);
408 } else {
409 /* Targets which reenter must return
410 abs. verdicts */
411 #ifdef CONFIG_NETFILTER_DEBUG
412 ((struct ipt_entry *)table_base)->comefrom
413 = 0xeeeeeeec;
414 #endif
415 verdict = t->u.kernel.target->target(skb,
416 in, out,
417 hook,
418 t->u.kernel.target,
419 t->data);
420
421 #ifdef CONFIG_NETFILTER_DEBUG
422 if (((struct ipt_entry *)table_base)->comefrom
423 != 0xeeeeeeec
424 && verdict == IPT_CONTINUE) {
425 printk("Target %s reentered!\n",
426 t->u.kernel.target->name);
427 verdict = NF_DROP;
428 }
429 ((struct ipt_entry *)table_base)->comefrom
430 = 0x57acc001;
431 #endif
432 /* Target might have changed stuff. */
433 ip = ip_hdr(skb);
434 datalen = skb->len - ip->ihl * 4;
435
436 if (verdict == IPT_CONTINUE)
437 e = (void *)e + e->next_offset;
438 else
439 /* Verdict */
440 break;
441 }
442 } else {
443
444 no_match:
445 e = (void *)e + e->next_offset;
446 }
447 } while (!hotdrop);
448
449 read_unlock_bh(&table->lock);
450
451 #ifdef DEBUG_ALLOW_ALL
452 return NF_ACCEPT;
453 #else
454 if (hotdrop)
455 return NF_DROP;
456 else return verdict;
457 #endif
458 }
459
460 /* Figures out from what hook each rule can be called: returns 0 if
461 there are loops. Puts hook bitmask in comefrom. */
462 static int
463 mark_source_chains(struct xt_table_info *newinfo,
464 unsigned int valid_hooks, void *entry0)
465 {
466 unsigned int hook;
467
468 /* No recursion; use packet counter to save back ptrs (reset
469 to 0 as we leave), and comefrom to save source hook bitmask */
470 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
471 unsigned int pos = newinfo->hook_entry[hook];
472 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
473
474 if (!(valid_hooks & (1 << hook)))
475 continue;
476
477 /* Set initial back pointer. */
478 e->counters.pcnt = pos;
479
480 for (;;) {
481 struct ipt_standard_target *t
482 = (void *)ipt_get_target(e);
483 int visited = e->comefrom & (1 << hook);
484
485 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
486 printk("iptables: loop hook %u pos %u %08X.\n",
487 hook, pos, e->comefrom);
488 return 0;
489 }
490 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
491
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
496 && t->verdict < 0
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
499
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
505 }
506
507 /* Return: backtrack through the last
508 big jump. */
509 do {
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
518 }
519 #endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
523
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
527
528 e = (struct ipt_entry *)
529 (entry0 + pos);
530 } while (oldpos == pos + e->next_offset);
531
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
540
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
543 && newpos >= 0) {
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
550 }
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
557 }
558 e = (struct ipt_entry *)
559 (entry0 + newpos);
560 e->counters.pcnt = pos;
561 pos = newpos;
562 }
563 }
564 next:
565 duprintf("Finished chain %u\n", hook);
566 }
567 return 1;
568 }
569
570 static inline int
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
572 {
573 if (i && (*i)-- == 0)
574 return 1;
575
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
579 return 0;
580 }
581
582 static inline int
583 check_entry(struct ipt_entry *e, const char *name)
584 {
585 struct ipt_entry_target *t;
586
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
589 return -EINVAL;
590 }
591
592 if (e->target_offset + sizeof(struct ipt_entry_target) >
593 e->next_offset)
594 return -EINVAL;
595
596 t = ipt_get_target(e);
597 if (e->target_offset + t->u.target_size > e->next_offset)
598 return -EINVAL;
599
600 return 0;
601 }
602
603 static inline int check_match(struct ipt_entry_match *m, const char *name,
604 const struct ipt_ip *ip,
605 unsigned int hookmask, unsigned int *i)
606 {
607 struct xt_match *match;
608 int ret;
609
610 match = m->u.kernel.match;
611 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
612 name, hookmask, ip->proto,
613 ip->invflags & IPT_INV_PROTO);
614 if (!ret && m->u.kernel.match->checkentry
615 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
616 hookmask)) {
617 duprintf("ip_tables: check failed for `%s'.\n",
618 m->u.kernel.match->name);
619 ret = -EINVAL;
620 }
621 if (!ret)
622 (*i)++;
623 return ret;
624 }
625
626 static inline int
627 find_check_match(struct ipt_entry_match *m,
628 const char *name,
629 const struct ipt_ip *ip,
630 unsigned int hookmask,
631 unsigned int *i)
632 {
633 struct xt_match *match;
634 int ret;
635
636 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
637 m->u.user.revision),
638 "ipt_%s", m->u.user.name);
639 if (IS_ERR(match) || !match) {
640 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
641 return match ? PTR_ERR(match) : -ENOENT;
642 }
643 m->u.kernel.match = match;
644
645 ret = check_match(m, name, ip, hookmask, i);
646 if (ret)
647 goto err;
648
649 return 0;
650 err:
651 module_put(m->u.kernel.match->me);
652 return ret;
653 }
654
655 static inline int check_target(struct ipt_entry *e, const char *name)
656 {
657 struct ipt_entry_target *t;
658 struct xt_target *target;
659 int ret;
660
661 t = ipt_get_target(e);
662 target = t->u.kernel.target;
663 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
664 name, e->comefrom, e->ip.proto,
665 e->ip.invflags & IPT_INV_PROTO);
666 if (!ret && t->u.kernel.target->checkentry
667 && !t->u.kernel.target->checkentry(name, e, target, t->data,
668 e->comefrom)) {
669 duprintf("ip_tables: check failed for `%s'.\n",
670 t->u.kernel.target->name);
671 ret = -EINVAL;
672 }
673 return ret;
674 }
675
676 static inline int
677 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
678 unsigned int *i)
679 {
680 struct ipt_entry_target *t;
681 struct xt_target *target;
682 int ret;
683 unsigned int j;
684
685 ret = check_entry(e, name);
686 if (ret)
687 return ret;
688
689 j = 0;
690 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
691 e->comefrom, &j);
692 if (ret != 0)
693 goto cleanup_matches;
694
695 t = ipt_get_target(e);
696 target = try_then_request_module(xt_find_target(AF_INET,
697 t->u.user.name,
698 t->u.user.revision),
699 "ipt_%s", t->u.user.name);
700 if (IS_ERR(target) || !target) {
701 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
702 ret = target ? PTR_ERR(target) : -ENOENT;
703 goto cleanup_matches;
704 }
705 t->u.kernel.target = target;
706
707 ret = check_target(e, name);
708 if (ret)
709 goto err;
710
711 (*i)++;
712 return 0;
713 err:
714 module_put(t->u.kernel.target->me);
715 cleanup_matches:
716 IPT_MATCH_ITERATE(e, cleanup_match, &j);
717 return ret;
718 }
719
720 static inline int
721 check_entry_size_and_hooks(struct ipt_entry *e,
722 struct xt_table_info *newinfo,
723 unsigned char *base,
724 unsigned char *limit,
725 const unsigned int *hook_entries,
726 const unsigned int *underflows,
727 unsigned int *i)
728 {
729 unsigned int h;
730
731 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
732 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
733 duprintf("Bad offset %p\n", e);
734 return -EINVAL;
735 }
736
737 if (e->next_offset
738 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
739 duprintf("checking: element %p size %u\n",
740 e, e->next_offset);
741 return -EINVAL;
742 }
743
744 /* Check hooks & underflows */
745 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
746 if ((unsigned char *)e - base == hook_entries[h])
747 newinfo->hook_entry[h] = hook_entries[h];
748 if ((unsigned char *)e - base == underflows[h])
749 newinfo->underflow[h] = underflows[h];
750 }
751
752 /* FIXME: underflows must be unconditional, standard verdicts
753 < 0 (not IPT_RETURN). --RR */
754
755 /* Clear counters and comefrom */
756 e->counters = ((struct xt_counters) { 0, 0 });
757 e->comefrom = 0;
758
759 (*i)++;
760 return 0;
761 }
762
763 static inline int
764 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 {
766 struct ipt_entry_target *t;
767
768 if (i && (*i)-- == 0)
769 return 1;
770
771 /* Cleanup all matches */
772 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
773 t = ipt_get_target(e);
774 if (t->u.kernel.target->destroy)
775 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
776 module_put(t->u.kernel.target->me);
777 return 0;
778 }
779
780 /* Checks and translates the user-supplied table segment (held in
781 newinfo) */
782 static int
783 translate_table(const char *name,
784 unsigned int valid_hooks,
785 struct xt_table_info *newinfo,
786 void *entry0,
787 unsigned int size,
788 unsigned int number,
789 const unsigned int *hook_entries,
790 const unsigned int *underflows)
791 {
792 unsigned int i;
793 int ret;
794
795 newinfo->size = size;
796 newinfo->number = number;
797
798 /* Init all hooks to impossible value. */
799 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
800 newinfo->hook_entry[i] = 0xFFFFFFFF;
801 newinfo->underflow[i] = 0xFFFFFFFF;
802 }
803
804 duprintf("translate_table: size %u\n", newinfo->size);
805 i = 0;
806 /* Walk through entries, checking offsets. */
807 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
808 check_entry_size_and_hooks,
809 newinfo,
810 entry0,
811 entry0 + size,
812 hook_entries, underflows, &i);
813 if (ret != 0)
814 return ret;
815
816 if (i != number) {
817 duprintf("translate_table: %u not %u entries\n",
818 i, number);
819 return -EINVAL;
820 }
821
822 /* Check hooks all assigned */
823 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
824 /* Only hooks which are valid */
825 if (!(valid_hooks & (1 << i)))
826 continue;
827 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
828 duprintf("Invalid hook entry %u %u\n",
829 i, hook_entries[i]);
830 return -EINVAL;
831 }
832 if (newinfo->underflow[i] == 0xFFFFFFFF) {
833 duprintf("Invalid underflow %u %u\n",
834 i, underflows[i]);
835 return -EINVAL;
836 }
837 }
838
839 if (!mark_source_chains(newinfo, valid_hooks, entry0))
840 return -ELOOP;
841
842 /* Finally, each sanity check must pass */
843 i = 0;
844 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
845 find_check_entry, name, size, &i);
846
847 if (ret != 0) {
848 IPT_ENTRY_ITERATE(entry0, newinfo->size,
849 cleanup_entry, &i);
850 return ret;
851 }
852
853 /* And one copy for every other CPU */
854 for_each_possible_cpu(i) {
855 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
856 memcpy(newinfo->entries[i], entry0, newinfo->size);
857 }
858
859 return ret;
860 }
861
862 /* Gets counters. */
863 static inline int
864 add_entry_to_counter(const struct ipt_entry *e,
865 struct xt_counters total[],
866 unsigned int *i)
867 {
868 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
869
870 (*i)++;
871 return 0;
872 }
873
874 static inline int
875 set_entry_to_counter(const struct ipt_entry *e,
876 struct ipt_counters total[],
877 unsigned int *i)
878 {
879 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
880
881 (*i)++;
882 return 0;
883 }
884
885 static void
886 get_counters(const struct xt_table_info *t,
887 struct xt_counters counters[])
888 {
889 unsigned int cpu;
890 unsigned int i;
891 unsigned int curcpu;
892
893 /* Instead of clearing (by a previous call to memset())
894 * the counters and using adds, we set the counters
895 * with data used by 'current' CPU
896 * We dont care about preemption here.
897 */
898 curcpu = raw_smp_processor_id();
899
900 i = 0;
901 IPT_ENTRY_ITERATE(t->entries[curcpu],
902 t->size,
903 set_entry_to_counter,
904 counters,
905 &i);
906
907 for_each_possible_cpu(cpu) {
908 if (cpu == curcpu)
909 continue;
910 i = 0;
911 IPT_ENTRY_ITERATE(t->entries[cpu],
912 t->size,
913 add_entry_to_counter,
914 counters,
915 &i);
916 }
917 }
918
919 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 {
921 unsigned int countersize;
922 struct xt_counters *counters;
923 struct xt_table_info *private = table->private;
924
925 /* We need atomic snapshot of counters: rest doesn't change
926 (other than comefrom, which userspace doesn't care
927 about). */
928 countersize = sizeof(struct xt_counters) * private->number;
929 counters = vmalloc_node(countersize, numa_node_id());
930
931 if (counters == NULL)
932 return ERR_PTR(-ENOMEM);
933
934 /* First, sum counters... */
935 write_lock_bh(&table->lock);
936 get_counters(private, counters);
937 write_unlock_bh(&table->lock);
938
939 return counters;
940 }
941
942 static int
943 copy_entries_to_user(unsigned int total_size,
944 struct xt_table *table,
945 void __user *userptr)
946 {
947 unsigned int off, num;
948 struct ipt_entry *e;
949 struct xt_counters *counters;
950 struct xt_table_info *private = table->private;
951 int ret = 0;
952 void *loc_cpu_entry;
953
954 counters = alloc_counters(table);
955 if (IS_ERR(counters))
956 return PTR_ERR(counters);
957
958 /* choose the copy that is on our node/cpu, ...
959 * This choice is lazy (because current thread is
960 * allowed to migrate to another cpu)
961 */
962 loc_cpu_entry = private->entries[raw_smp_processor_id()];
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
964 ret = -EFAULT;
965 goto free_counters;
966 }
967
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
971 unsigned int i;
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
974
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
978 &counters[num],
979 sizeof(counters[num])) != 0) {
980 ret = -EFAULT;
981 goto free_counters;
982 }
983
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
987 m = (void *)e + i;
988
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
991 u.user.name),
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
994 != 0) {
995 ret = -EFAULT;
996 goto free_counters;
997 }
998 }
999
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1003 u.user.name),
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1006 ret = -EFAULT;
1007 goto free_counters;
1008 }
1009 }
1010
1011 free_counters:
1012 vfree(counters);
1013 return ret;
1014 }
1015
1016 #ifdef CONFIG_COMPAT
1017 static void compat_standard_from_user(void *dst, void *src)
1018 {
1019 int v = *(compat_int_t *)src;
1020
1021 if (v > 0)
1022 v += xt_compat_calc_jump(AF_INET, v);
1023 memcpy(dst, &v, sizeof(v));
1024 }
1025
1026 static int compat_standard_to_user(void __user *dst, void *src)
1027 {
1028 compat_int_t cv = *(int *)src;
1029
1030 if (cv > 0)
1031 cv -= xt_compat_calc_jump(AF_INET, cv);
1032 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1033 }
1034
1035 static inline int
1036 compat_calc_match(struct ipt_entry_match *m, int *size)
1037 {
1038 *size += xt_compat_match_offset(m->u.kernel.match);
1039 return 0;
1040 }
1041
1042 static int compat_calc_entry(struct ipt_entry *e,
1043 const struct xt_table_info *info,
1044 void *base, struct xt_table_info *newinfo)
1045 {
1046 struct ipt_entry_target *t;
1047 unsigned int entry_offset;
1048 int off, i, ret;
1049
1050 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1051 entry_offset = (void *)e - base;
1052 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1053 t = ipt_get_target(e);
1054 off += xt_compat_target_offset(t->u.kernel.target);
1055 newinfo->size -= off;
1056 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1057 if (ret)
1058 return ret;
1059
1060 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1061 if (info->hook_entry[i] &&
1062 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1063 newinfo->hook_entry[i] -= off;
1064 if (info->underflow[i] &&
1065 (e < (struct ipt_entry *)(base + info->underflow[i])))
1066 newinfo->underflow[i] -= off;
1067 }
1068 return 0;
1069 }
1070
1071 static int compat_table_info(const struct xt_table_info *info,
1072 struct xt_table_info *newinfo)
1073 {
1074 void *loc_cpu_entry;
1075
1076 if (!newinfo || !info)
1077 return -EINVAL;
1078
1079 /* we dont care about newinfo->entries[] */
1080 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1081 newinfo->initial_entries = 0;
1082 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1083 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1084 compat_calc_entry, info, loc_cpu_entry,
1085 newinfo);
1086 }
1087 #endif
1088
1089 static int get_info(void __user *user, int *len, int compat)
1090 {
1091 char name[IPT_TABLE_MAXNAMELEN];
1092 struct xt_table *t;
1093 int ret;
1094
1095 if (*len != sizeof(struct ipt_getinfo)) {
1096 duprintf("length %u != %zu\n", *len,
1097 sizeof(struct ipt_getinfo));
1098 return -EINVAL;
1099 }
1100
1101 if (copy_from_user(name, user, sizeof(name)) != 0)
1102 return -EFAULT;
1103
1104 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1105 #ifdef CONFIG_COMPAT
1106 if (compat)
1107 xt_compat_lock(AF_INET);
1108 #endif
1109 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1110 "iptable_%s", name);
1111 if (t && !IS_ERR(t)) {
1112 struct ipt_getinfo info;
1113 struct xt_table_info *private = t->private;
1114
1115 #ifdef CONFIG_COMPAT
1116 if (compat) {
1117 struct xt_table_info tmp;
1118 ret = compat_table_info(private, &tmp);
1119 xt_compat_flush_offsets(AF_INET);
1120 private = &tmp;
1121 }
1122 #endif
1123 info.valid_hooks = t->valid_hooks;
1124 memcpy(info.hook_entry, private->hook_entry,
1125 sizeof(info.hook_entry));
1126 memcpy(info.underflow, private->underflow,
1127 sizeof(info.underflow));
1128 info.num_entries = private->number;
1129 info.size = private->size;
1130 strcpy(info.name, name);
1131
1132 if (copy_to_user(user, &info, *len) != 0)
1133 ret = -EFAULT;
1134 else
1135 ret = 0;
1136
1137 xt_table_unlock(t);
1138 module_put(t->me);
1139 } else
1140 ret = t ? PTR_ERR(t) : -ENOENT;
1141 #ifdef CONFIG_COMPAT
1142 if (compat)
1143 xt_compat_unlock(AF_INET);
1144 #endif
1145 return ret;
1146 }
1147
1148 static int
1149 get_entries(struct ipt_get_entries __user *uptr, int *len)
1150 {
1151 int ret;
1152 struct ipt_get_entries get;
1153 struct xt_table *t;
1154
1155 if (*len < sizeof(get)) {
1156 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1157 return -EINVAL;
1158 }
1159 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1160 return -EFAULT;
1161 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1162 duprintf("get_entries: %u != %zu\n",
1163 *len, sizeof(get) + get.size);
1164 return -EINVAL;
1165 }
1166
1167 t = xt_find_table_lock(AF_INET, get.name);
1168 if (t && !IS_ERR(t)) {
1169 struct xt_table_info *private = t->private;
1170 duprintf("t->private->number = %u\n", private->number);
1171 if (get.size == private->size)
1172 ret = copy_entries_to_user(private->size,
1173 t, uptr->entrytable);
1174 else {
1175 duprintf("get_entries: I've got %u not %u!\n",
1176 private->size, get.size);
1177 ret = -EINVAL;
1178 }
1179 module_put(t->me);
1180 xt_table_unlock(t);
1181 } else
1182 ret = t ? PTR_ERR(t) : -ENOENT;
1183
1184 return ret;
1185 }
1186
1187 static int
1188 __do_replace(const char *name, unsigned int valid_hooks,
1189 struct xt_table_info *newinfo, unsigned int num_counters,
1190 void __user *counters_ptr)
1191 {
1192 int ret;
1193 struct xt_table *t;
1194 struct xt_table_info *oldinfo;
1195 struct xt_counters *counters;
1196 void *loc_cpu_old_entry;
1197
1198 ret = 0;
1199 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1200 if (!counters) {
1201 ret = -ENOMEM;
1202 goto out;
1203 }
1204
1205 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1206 "iptable_%s", name);
1207 if (!t || IS_ERR(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1210 }
1211
1212 /* You lied! */
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1216 ret = -EINVAL;
1217 goto put_module;
1218 }
1219
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1221 if (!oldinfo)
1222 goto put_module;
1223
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1229 module_put(t->me);
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1232 module_put(t->me);
1233
1234 /* Get the old counters. */
1235 get_counters(oldinfo, counters);
1236 /* Decrease module usage counts and free resource */
1237 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1238 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1239 NULL);
1240 xt_free_table_info(oldinfo);
1241 if (copy_to_user(counters_ptr, counters,
1242 sizeof(struct xt_counters) * num_counters) != 0)
1243 ret = -EFAULT;
1244 vfree(counters);
1245 xt_table_unlock(t);
1246 return ret;
1247
1248 put_module:
1249 module_put(t->me);
1250 xt_table_unlock(t);
1251 free_newinfo_counters_untrans:
1252 vfree(counters);
1253 out:
1254 return ret;
1255 }
1256
1257 static int
1258 do_replace(void __user *user, unsigned int len)
1259 {
1260 int ret;
1261 struct ipt_replace tmp;
1262 struct xt_table_info *newinfo;
1263 void *loc_cpu_entry;
1264
1265 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1266 return -EFAULT;
1267
1268 /* overflow check */
1269 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1270 return -ENOMEM;
1271
1272 newinfo = xt_alloc_table_info(tmp.size);
1273 if (!newinfo)
1274 return -ENOMEM;
1275
1276 /* choose the copy that is on our node/cpu */
1277 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1278 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1279 tmp.size) != 0) {
1280 ret = -EFAULT;
1281 goto free_newinfo;
1282 }
1283
1284 ret = translate_table(tmp.name, tmp.valid_hooks,
1285 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1286 tmp.hook_entry, tmp.underflow);
1287 if (ret != 0)
1288 goto free_newinfo;
1289
1290 duprintf("ip_tables: Translated table\n");
1291
1292 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1293 tmp.num_counters, tmp.counters);
1294 if (ret)
1295 goto free_newinfo_untrans;
1296 return 0;
1297
1298 free_newinfo_untrans:
1299 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1300 free_newinfo:
1301 xt_free_table_info(newinfo);
1302 return ret;
1303 }
1304
1305 /* We're lazy, and add to the first CPU; overflow works its fey magic
1306 * and everything is OK. */
1307 static inline int
1308 add_counter_to_entry(struct ipt_entry *e,
1309 const struct xt_counters addme[],
1310 unsigned int *i)
1311 {
1312 #if 0
1313 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1314 *i,
1315 (long unsigned int)e->counters.pcnt,
1316 (long unsigned int)e->counters.bcnt,
1317 (long unsigned int)addme[*i].pcnt,
1318 (long unsigned int)addme[*i].bcnt);
1319 #endif
1320
1321 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1322
1323 (*i)++;
1324 return 0;
1325 }
1326
1327 static int
1328 do_add_counters(void __user *user, unsigned int len, int compat)
1329 {
1330 unsigned int i;
1331 struct xt_counters_info tmp;
1332 struct xt_counters *paddc;
1333 unsigned int num_counters;
1334 char *name;
1335 int size;
1336 void *ptmp;
1337 struct xt_table *t;
1338 struct xt_table_info *private;
1339 int ret = 0;
1340 void *loc_cpu_entry;
1341 #ifdef CONFIG_COMPAT
1342 struct compat_xt_counters_info compat_tmp;
1343
1344 if (compat) {
1345 ptmp = &compat_tmp;
1346 size = sizeof(struct compat_xt_counters_info);
1347 } else
1348 #endif
1349 {
1350 ptmp = &tmp;
1351 size = sizeof(struct xt_counters_info);
1352 }
1353
1354 if (copy_from_user(ptmp, user, size) != 0)
1355 return -EFAULT;
1356
1357 #ifdef CONFIG_COMPAT
1358 if (compat) {
1359 num_counters = compat_tmp.num_counters;
1360 name = compat_tmp.name;
1361 } else
1362 #endif
1363 {
1364 num_counters = tmp.num_counters;
1365 name = tmp.name;
1366 }
1367
1368 if (len != size + num_counters * sizeof(struct xt_counters))
1369 return -EINVAL;
1370
1371 paddc = vmalloc_node(len - size, numa_node_id());
1372 if (!paddc)
1373 return -ENOMEM;
1374
1375 if (copy_from_user(paddc, user + size, len - size) != 0) {
1376 ret = -EFAULT;
1377 goto free;
1378 }
1379
1380 t = xt_find_table_lock(AF_INET, name);
1381 if (!t || IS_ERR(t)) {
1382 ret = t ? PTR_ERR(t) : -ENOENT;
1383 goto free;
1384 }
1385
1386 write_lock_bh(&t->lock);
1387 private = t->private;
1388 if (private->number != num_counters) {
1389 ret = -EINVAL;
1390 goto unlock_up_free;
1391 }
1392
1393 i = 0;
1394 /* Choose the copy that is on our node */
1395 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1396 IPT_ENTRY_ITERATE(loc_cpu_entry,
1397 private->size,
1398 add_counter_to_entry,
1399 paddc,
1400 &i);
1401 unlock_up_free:
1402 write_unlock_bh(&t->lock);
1403 xt_table_unlock(t);
1404 module_put(t->me);
1405 free:
1406 vfree(paddc);
1407
1408 return ret;
1409 }
1410
1411 #ifdef CONFIG_COMPAT
1412 struct compat_ipt_replace {
1413 char name[IPT_TABLE_MAXNAMELEN];
1414 u32 valid_hooks;
1415 u32 num_entries;
1416 u32 size;
1417 u32 hook_entry[NF_INET_NUMHOOKS];
1418 u32 underflow[NF_INET_NUMHOOKS];
1419 u32 num_counters;
1420 compat_uptr_t counters; /* struct ipt_counters * */
1421 struct compat_ipt_entry entries[0];
1422 };
1423
1424 static int
1425 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1426 compat_uint_t *size, struct xt_counters *counters,
1427 unsigned int *i)
1428 {
1429 struct ipt_entry_target *t;
1430 struct compat_ipt_entry __user *ce;
1431 u_int16_t target_offset, next_offset;
1432 compat_uint_t origsize;
1433 int ret;
1434
1435 ret = -EFAULT;
1436 origsize = *size;
1437 ce = (struct compat_ipt_entry __user *)*dstptr;
1438 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1439 goto out;
1440
1441 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1442 goto out;
1443
1444 *dstptr += sizeof(struct compat_ipt_entry);
1445 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1446
1447 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1448 target_offset = e->target_offset - (origsize - *size);
1449 if (ret)
1450 goto out;
1451 t = ipt_get_target(e);
1452 ret = xt_compat_target_to_user(t, dstptr, size);
1453 if (ret)
1454 goto out;
1455 ret = -EFAULT;
1456 next_offset = e->next_offset - (origsize - *size);
1457 if (put_user(target_offset, &ce->target_offset))
1458 goto out;
1459 if (put_user(next_offset, &ce->next_offset))
1460 goto out;
1461
1462 (*i)++;
1463 return 0;
1464 out:
1465 return ret;
1466 }
1467
1468 static inline int
1469 compat_find_calc_match(struct ipt_entry_match *m,
1470 const char *name,
1471 const struct ipt_ip *ip,
1472 unsigned int hookmask,
1473 int *size, int *i)
1474 {
1475 struct xt_match *match;
1476
1477 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1478 m->u.user.revision),
1479 "ipt_%s", m->u.user.name);
1480 if (IS_ERR(match) || !match) {
1481 duprintf("compat_check_calc_match: `%s' not found\n",
1482 m->u.user.name);
1483 return match ? PTR_ERR(match) : -ENOENT;
1484 }
1485 m->u.kernel.match = match;
1486 *size += xt_compat_match_offset(match);
1487
1488 (*i)++;
1489 return 0;
1490 }
1491
1492 static inline int
1493 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1494 {
1495 if (i && (*i)-- == 0)
1496 return 1;
1497
1498 module_put(m->u.kernel.match->me);
1499 return 0;
1500 }
1501
1502 static inline int
1503 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1504 {
1505 struct ipt_entry_target *t;
1506
1507 if (i && (*i)-- == 0)
1508 return 1;
1509
1510 /* Cleanup all matches */
1511 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1512 t = compat_ipt_get_target(e);
1513 module_put(t->u.kernel.target->me);
1514 return 0;
1515 }
1516
1517 static inline int
1518 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1519 struct xt_table_info *newinfo,
1520 unsigned int *size,
1521 unsigned char *base,
1522 unsigned char *limit,
1523 unsigned int *hook_entries,
1524 unsigned int *underflows,
1525 unsigned int *i,
1526 const char *name)
1527 {
1528 struct ipt_entry_target *t;
1529 struct xt_target *target;
1530 unsigned int entry_offset;
1531 int ret, off, h, j;
1532
1533 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1534 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1535 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1536 duprintf("Bad offset %p, limit = %p\n", e, limit);
1537 return -EINVAL;
1538 }
1539
1540 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1541 sizeof(struct compat_xt_entry_target)) {
1542 duprintf("checking: element %p size %u\n",
1543 e, e->next_offset);
1544 return -EINVAL;
1545 }
1546
1547 /* For purposes of check_entry casting the compat entry is fine */
1548 ret = check_entry((struct ipt_entry *)e, name);
1549 if (ret)
1550 return ret;
1551
1552 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1553 entry_offset = (void *)e - (void *)base;
1554 j = 0;
1555 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1556 &e->ip, e->comefrom, &off, &j);
1557 if (ret != 0)
1558 goto release_matches;
1559
1560 t = compat_ipt_get_target(e);
1561 target = try_then_request_module(xt_find_target(AF_INET,
1562 t->u.user.name,
1563 t->u.user.revision),
1564 "ipt_%s", t->u.user.name);
1565 if (IS_ERR(target) || !target) {
1566 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1567 t->u.user.name);
1568 ret = target ? PTR_ERR(target) : -ENOENT;
1569 goto release_matches;
1570 }
1571 t->u.kernel.target = target;
1572
1573 off += xt_compat_target_offset(target);
1574 *size += off;
1575 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1576 if (ret)
1577 goto out;
1578
1579 /* Check hooks & underflows */
1580 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1581 if ((unsigned char *)e - base == hook_entries[h])
1582 newinfo->hook_entry[h] = hook_entries[h];
1583 if ((unsigned char *)e - base == underflows[h])
1584 newinfo->underflow[h] = underflows[h];
1585 }
1586
1587 /* Clear counters and comefrom */
1588 memset(&e->counters, 0, sizeof(e->counters));
1589 e->comefrom = 0;
1590
1591 (*i)++;
1592 return 0;
1593
1594 out:
1595 module_put(t->u.kernel.target->me);
1596 release_matches:
1597 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1598 return ret;
1599 }
1600
1601 static int
1602 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1603 unsigned int *size, const char *name,
1604 struct xt_table_info *newinfo, unsigned char *base)
1605 {
1606 struct ipt_entry_target *t;
1607 struct xt_target *target;
1608 struct ipt_entry *de;
1609 unsigned int origsize;
1610 int ret, h;
1611
1612 ret = 0;
1613 origsize = *size;
1614 de = (struct ipt_entry *)*dstptr;
1615 memcpy(de, e, sizeof(struct ipt_entry));
1616 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1617
1618 *dstptr += sizeof(struct ipt_entry);
1619 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1620
1621 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1622 dstptr, size);
1623 if (ret)
1624 return ret;
1625 de->target_offset = e->target_offset - (origsize - *size);
1626 t = compat_ipt_get_target(e);
1627 target = t->u.kernel.target;
1628 xt_compat_target_from_user(t, dstptr, size);
1629
1630 de->next_offset = e->next_offset - (origsize - *size);
1631 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1632 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1633 newinfo->hook_entry[h] -= origsize - *size;
1634 if ((unsigned char *)de - base < newinfo->underflow[h])
1635 newinfo->underflow[h] -= origsize - *size;
1636 }
1637 return ret;
1638 }
1639
1640 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1641 unsigned int *i)
1642 {
1643 int j, ret;
1644
1645 j = 0;
1646 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip,
1647 e->comefrom, &j);
1648 if (ret)
1649 goto cleanup_matches;
1650
1651 ret = check_target(e, name);
1652 if (ret)
1653 goto cleanup_matches;
1654
1655 (*i)++;
1656 return 0;
1657
1658 cleanup_matches:
1659 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1660 return ret;
1661 }
1662
1663 static int
1664 translate_compat_table(const char *name,
1665 unsigned int valid_hooks,
1666 struct xt_table_info **pinfo,
1667 void **pentry0,
1668 unsigned int total_size,
1669 unsigned int number,
1670 unsigned int *hook_entries,
1671 unsigned int *underflows)
1672 {
1673 unsigned int i, j;
1674 struct xt_table_info *newinfo, *info;
1675 void *pos, *entry0, *entry1;
1676 unsigned int size;
1677 int ret;
1678
1679 info = *pinfo;
1680 entry0 = *pentry0;
1681 size = total_size;
1682 info->number = number;
1683
1684 /* Init all hooks to impossible value. */
1685 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1686 info->hook_entry[i] = 0xFFFFFFFF;
1687 info->underflow[i] = 0xFFFFFFFF;
1688 }
1689
1690 duprintf("translate_compat_table: size %u\n", info->size);
1691 j = 0;
1692 xt_compat_lock(AF_INET);
1693 /* Walk through entries, checking offsets. */
1694 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1695 check_compat_entry_size_and_hooks,
1696 info, &size, entry0,
1697 entry0 + total_size,
1698 hook_entries, underflows, &j, name);
1699 if (ret != 0)
1700 goto out_unlock;
1701
1702 ret = -EINVAL;
1703 if (j != number) {
1704 duprintf("translate_compat_table: %u not %u entries\n",
1705 j, number);
1706 goto out_unlock;
1707 }
1708
1709 /* Check hooks all assigned */
1710 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1711 /* Only hooks which are valid */
1712 if (!(valid_hooks & (1 << i)))
1713 continue;
1714 if (info->hook_entry[i] == 0xFFFFFFFF) {
1715 duprintf("Invalid hook entry %u %u\n",
1716 i, hook_entries[i]);
1717 goto out_unlock;
1718 }
1719 if (info->underflow[i] == 0xFFFFFFFF) {
1720 duprintf("Invalid underflow %u %u\n",
1721 i, underflows[i]);
1722 goto out_unlock;
1723 }
1724 }
1725
1726 ret = -ENOMEM;
1727 newinfo = xt_alloc_table_info(size);
1728 if (!newinfo)
1729 goto out_unlock;
1730
1731 newinfo->number = number;
1732 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1733 newinfo->hook_entry[i] = info->hook_entry[i];
1734 newinfo->underflow[i] = info->underflow[i];
1735 }
1736 entry1 = newinfo->entries[raw_smp_processor_id()];
1737 pos = entry1;
1738 size = total_size;
1739 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1740 compat_copy_entry_from_user,
1741 &pos, &size, name, newinfo, entry1);
1742 xt_compat_flush_offsets(AF_INET);
1743 xt_compat_unlock(AF_INET);
1744 if (ret)
1745 goto free_newinfo;
1746
1747 ret = -ELOOP;
1748 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1749 goto free_newinfo;
1750
1751 i = 0;
1752 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1753 name, &i);
1754 if (ret) {
1755 j -= i;
1756 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1757 compat_release_entry, &j);
1758 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1759 xt_free_table_info(newinfo);
1760 return ret;
1761 }
1762
1763 /* And one copy for every other CPU */
1764 for_each_possible_cpu(i)
1765 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1766 memcpy(newinfo->entries[i], entry1, newinfo->size);
1767
1768 *pinfo = newinfo;
1769 *pentry0 = entry1;
1770 xt_free_table_info(info);
1771 return 0;
1772
1773 free_newinfo:
1774 xt_free_table_info(newinfo);
1775 out:
1776 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1777 return ret;
1778 out_unlock:
1779 xt_compat_flush_offsets(AF_INET);
1780 xt_compat_unlock(AF_INET);
1781 goto out;
1782 }
1783
1784 static int
1785 compat_do_replace(void __user *user, unsigned int len)
1786 {
1787 int ret;
1788 struct compat_ipt_replace tmp;
1789 struct xt_table_info *newinfo;
1790 void *loc_cpu_entry;
1791
1792 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1793 return -EFAULT;
1794
1795 /* overflow check */
1796 if (tmp.size >= INT_MAX / num_possible_cpus())
1797 return -ENOMEM;
1798 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1799 return -ENOMEM;
1800
1801 newinfo = xt_alloc_table_info(tmp.size);
1802 if (!newinfo)
1803 return -ENOMEM;
1804
1805 /* choose the copy that is on our node/cpu */
1806 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1807 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1808 tmp.size) != 0) {
1809 ret = -EFAULT;
1810 goto free_newinfo;
1811 }
1812
1813 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1814 &newinfo, &loc_cpu_entry, tmp.size,
1815 tmp.num_entries, tmp.hook_entry,
1816 tmp.underflow);
1817 if (ret != 0)
1818 goto free_newinfo;
1819
1820 duprintf("compat_do_replace: Translated table\n");
1821
1822 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1823 tmp.num_counters, compat_ptr(tmp.counters));
1824 if (ret)
1825 goto free_newinfo_untrans;
1826 return 0;
1827
1828 free_newinfo_untrans:
1829 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1830 free_newinfo:
1831 xt_free_table_info(newinfo);
1832 return ret;
1833 }
1834
1835 static int
1836 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1837 unsigned int len)
1838 {
1839 int ret;
1840
1841 if (!capable(CAP_NET_ADMIN))
1842 return -EPERM;
1843
1844 switch (cmd) {
1845 case IPT_SO_SET_REPLACE:
1846 ret = compat_do_replace(user, len);
1847 break;
1848
1849 case IPT_SO_SET_ADD_COUNTERS:
1850 ret = do_add_counters(user, len, 1);
1851 break;
1852
1853 default:
1854 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1855 ret = -EINVAL;
1856 }
1857
1858 return ret;
1859 }
1860
1861 struct compat_ipt_get_entries {
1862 char name[IPT_TABLE_MAXNAMELEN];
1863 compat_uint_t size;
1864 struct compat_ipt_entry entrytable[0];
1865 };
1866
1867 static int
1868 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1869 void __user *userptr)
1870 {
1871 struct xt_counters *counters;
1872 struct xt_table_info *private = table->private;
1873 void __user *pos;
1874 unsigned int size;
1875 int ret = 0;
1876 void *loc_cpu_entry;
1877 unsigned int i = 0;
1878
1879 counters = alloc_counters(table);
1880 if (IS_ERR(counters))
1881 return PTR_ERR(counters);
1882
1883 /* choose the copy that is on our node/cpu, ...
1884 * This choice is lazy (because current thread is
1885 * allowed to migrate to another cpu)
1886 */
1887 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1888 pos = userptr;
1889 size = total_size;
1890 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1891 compat_copy_entry_to_user,
1892 &pos, &size, counters, &i);
1893
1894 vfree(counters);
1895 return ret;
1896 }
1897
1898 static int
1899 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1900 {
1901 int ret;
1902 struct compat_ipt_get_entries get;
1903 struct xt_table *t;
1904
1905 if (*len < sizeof(get)) {
1906 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1907 return -EINVAL;
1908 }
1909
1910 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1911 return -EFAULT;
1912
1913 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1914 duprintf("compat_get_entries: %u != %zu\n",
1915 *len, sizeof(get) + get.size);
1916 return -EINVAL;
1917 }
1918
1919 xt_compat_lock(AF_INET);
1920 t = xt_find_table_lock(AF_INET, get.name);
1921 if (t && !IS_ERR(t)) {
1922 struct xt_table_info *private = t->private;
1923 struct xt_table_info info;
1924 duprintf("t->private->number = %u\n", private->number);
1925 ret = compat_table_info(private, &info);
1926 if (!ret && get.size == info.size) {
1927 ret = compat_copy_entries_to_user(private->size,
1928 t, uptr->entrytable);
1929 } else if (!ret) {
1930 duprintf("compat_get_entries: I've got %u not %u!\n",
1931 private->size, get.size);
1932 ret = -EINVAL;
1933 }
1934 xt_compat_flush_offsets(AF_INET);
1935 module_put(t->me);
1936 xt_table_unlock(t);
1937 } else
1938 ret = t ? PTR_ERR(t) : -ENOENT;
1939
1940 xt_compat_unlock(AF_INET);
1941 return ret;
1942 }
1943
1944 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1945
1946 static int
1947 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1948 {
1949 int ret;
1950
1951 if (!capable(CAP_NET_ADMIN))
1952 return -EPERM;
1953
1954 switch (cmd) {
1955 case IPT_SO_GET_INFO:
1956 ret = get_info(user, len, 1);
1957 break;
1958 case IPT_SO_GET_ENTRIES:
1959 ret = compat_get_entries(user, len);
1960 break;
1961 default:
1962 ret = do_ipt_get_ctl(sk, cmd, user, len);
1963 }
1964 return ret;
1965 }
1966 #endif
1967
1968 static int
1969 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1970 {
1971 int ret;
1972
1973 if (!capable(CAP_NET_ADMIN))
1974 return -EPERM;
1975
1976 switch (cmd) {
1977 case IPT_SO_SET_REPLACE:
1978 ret = do_replace(user, len);
1979 break;
1980
1981 case IPT_SO_SET_ADD_COUNTERS:
1982 ret = do_add_counters(user, len, 0);
1983 break;
1984
1985 default:
1986 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1987 ret = -EINVAL;
1988 }
1989
1990 return ret;
1991 }
1992
1993 static int
1994 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1995 {
1996 int ret;
1997
1998 if (!capable(CAP_NET_ADMIN))
1999 return -EPERM;
2000
2001 switch (cmd) {
2002 case IPT_SO_GET_INFO:
2003 ret = get_info(user, len, 0);
2004 break;
2005
2006 case IPT_SO_GET_ENTRIES:
2007 ret = get_entries(user, len);
2008 break;
2009
2010 case IPT_SO_GET_REVISION_MATCH:
2011 case IPT_SO_GET_REVISION_TARGET: {
2012 struct ipt_get_revision rev;
2013 int target;
2014
2015 if (*len != sizeof(rev)) {
2016 ret = -EINVAL;
2017 break;
2018 }
2019 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2020 ret = -EFAULT;
2021 break;
2022 }
2023
2024 if (cmd == IPT_SO_GET_REVISION_TARGET)
2025 target = 1;
2026 else
2027 target = 0;
2028
2029 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2030 rev.revision,
2031 target, &ret),
2032 "ipt_%s", rev.name);
2033 break;
2034 }
2035
2036 default:
2037 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2038 ret = -EINVAL;
2039 }
2040
2041 return ret;
2042 }
2043
2044 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2045 {
2046 int ret;
2047 struct xt_table_info *newinfo;
2048 struct xt_table_info bootstrap
2049 = { 0, 0, 0, { 0 }, { 0 }, { } };
2050 void *loc_cpu_entry;
2051
2052 newinfo = xt_alloc_table_info(repl->size);
2053 if (!newinfo)
2054 return -ENOMEM;
2055
2056 /* choose the copy on our node/cpu, but dont care about preemption */
2057 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2058 memcpy(loc_cpu_entry, repl->entries, repl->size);
2059
2060 ret = translate_table(table->name, table->valid_hooks,
2061 newinfo, loc_cpu_entry, repl->size,
2062 repl->num_entries,
2063 repl->hook_entry,
2064 repl->underflow);
2065 if (ret != 0) {
2066 xt_free_table_info(newinfo);
2067 return ret;
2068 }
2069
2070 ret = xt_register_table(table, &bootstrap, newinfo);
2071 if (ret != 0) {
2072 xt_free_table_info(newinfo);
2073 return ret;
2074 }
2075
2076 return 0;
2077 }
2078
2079 void ipt_unregister_table(struct xt_table *table)
2080 {
2081 struct xt_table_info *private;
2082 void *loc_cpu_entry;
2083
2084 private = xt_unregister_table(table);
2085
2086 /* Decrease module usage counts and free resources */
2087 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2088 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2089 xt_free_table_info(private);
2090 }
2091
2092 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2093 static inline bool
2094 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2095 u_int8_t type, u_int8_t code,
2096 bool invert)
2097 {
2098 return ((test_type == 0xFF) ||
2099 (type == test_type && code >= min_code && code <= max_code))
2100 ^ invert;
2101 }
2102
2103 static bool
2104 icmp_match(const struct sk_buff *skb,
2105 const struct net_device *in,
2106 const struct net_device *out,
2107 const struct xt_match *match,
2108 const void *matchinfo,
2109 int offset,
2110 unsigned int protoff,
2111 bool *hotdrop)
2112 {
2113 struct icmphdr _icmph, *ic;
2114 const struct ipt_icmp *icmpinfo = matchinfo;
2115
2116 /* Must not be a fragment. */
2117 if (offset)
2118 return false;
2119
2120 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2121 if (ic == NULL) {
2122 /* We've been asked to examine this packet, and we
2123 * can't. Hence, no choice but to drop.
2124 */
2125 duprintf("Dropping evil ICMP tinygram.\n");
2126 *hotdrop = true;
2127 return false;
2128 }
2129
2130 return icmp_type_code_match(icmpinfo->type,
2131 icmpinfo->code[0],
2132 icmpinfo->code[1],
2133 ic->type, ic->code,
2134 !!(icmpinfo->invflags&IPT_ICMP_INV));
2135 }
2136
2137 /* Called when user tries to insert an entry of this type. */
2138 static bool
2139 icmp_checkentry(const char *tablename,
2140 const void *entry,
2141 const struct xt_match *match,
2142 void *matchinfo,
2143 unsigned int hook_mask)
2144 {
2145 const struct ipt_icmp *icmpinfo = matchinfo;
2146
2147 /* Must specify no unknown invflags */
2148 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2149 }
2150
2151 /* The built-in targets: standard (NULL) and error. */
2152 static struct xt_target ipt_standard_target __read_mostly = {
2153 .name = IPT_STANDARD_TARGET,
2154 .targetsize = sizeof(int),
2155 .family = AF_INET,
2156 #ifdef CONFIG_COMPAT
2157 .compatsize = sizeof(compat_int_t),
2158 .compat_from_user = compat_standard_from_user,
2159 .compat_to_user = compat_standard_to_user,
2160 #endif
2161 };
2162
2163 static struct xt_target ipt_error_target __read_mostly = {
2164 .name = IPT_ERROR_TARGET,
2165 .target = ipt_error,
2166 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2167 .family = AF_INET,
2168 };
2169
2170 static struct nf_sockopt_ops ipt_sockopts = {
2171 .pf = PF_INET,
2172 .set_optmin = IPT_BASE_CTL,
2173 .set_optmax = IPT_SO_SET_MAX+1,
2174 .set = do_ipt_set_ctl,
2175 #ifdef CONFIG_COMPAT
2176 .compat_set = compat_do_ipt_set_ctl,
2177 #endif
2178 .get_optmin = IPT_BASE_CTL,
2179 .get_optmax = IPT_SO_GET_MAX+1,
2180 .get = do_ipt_get_ctl,
2181 #ifdef CONFIG_COMPAT
2182 .compat_get = compat_do_ipt_get_ctl,
2183 #endif
2184 .owner = THIS_MODULE,
2185 };
2186
2187 static struct xt_match icmp_matchstruct __read_mostly = {
2188 .name = "icmp",
2189 .match = icmp_match,
2190 .matchsize = sizeof(struct ipt_icmp),
2191 .checkentry = icmp_checkentry,
2192 .proto = IPPROTO_ICMP,
2193 .family = AF_INET,
2194 };
2195
2196 static int __init ip_tables_init(void)
2197 {
2198 int ret;
2199
2200 ret = xt_proto_init(AF_INET);
2201 if (ret < 0)
2202 goto err1;
2203
2204 /* Noone else will be downing sem now, so we won't sleep */
2205 ret = xt_register_target(&ipt_standard_target);
2206 if (ret < 0)
2207 goto err2;
2208 ret = xt_register_target(&ipt_error_target);
2209 if (ret < 0)
2210 goto err3;
2211 ret = xt_register_match(&icmp_matchstruct);
2212 if (ret < 0)
2213 goto err4;
2214
2215 /* Register setsockopt */
2216 ret = nf_register_sockopt(&ipt_sockopts);
2217 if (ret < 0)
2218 goto err5;
2219
2220 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2221 return 0;
2222
2223 err5:
2224 xt_unregister_match(&icmp_matchstruct);
2225 err4:
2226 xt_unregister_target(&ipt_error_target);
2227 err3:
2228 xt_unregister_target(&ipt_standard_target);
2229 err2:
2230 xt_proto_fini(AF_INET);
2231 err1:
2232 return ret;
2233 }
2234
2235 static void __exit ip_tables_fini(void)
2236 {
2237 nf_unregister_sockopt(&ipt_sockopts);
2238
2239 xt_unregister_match(&icmp_matchstruct);
2240 xt_unregister_target(&ipt_error_target);
2241 xt_unregister_target(&ipt_standard_target);
2242
2243 xt_proto_fini(AF_INET);
2244 }
2245
2246 EXPORT_SYMBOL(ipt_register_table);
2247 EXPORT_SYMBOL(ipt_unregister_table);
2248 EXPORT_SYMBOL(ipt_do_table);
2249 module_init(ip_tables_init);
2250 module_exit(ip_tables_fini);