1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
14 * - new API and handling of conntrack/nat helpers
15 * - now capable of multiple expectations for one master
16 * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
17 * - add usage/reference counts to ip_conntrack_expect
18 * - export ip_conntrack[_expect]_{find_get,put} functions
19 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
20 * - generalize L3 protocol denendent part.
21 * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
22 * - add support various size of conntrack structures.
23 * 26 Jan 2006: Harald Welte <laforge@netfilter.org>
24 * - restructure nf_conn (introduce nf_conn_help)
25 * - redesign 'features' how they were originally intended
26 * 26 Feb 2006: Pablo Neira Ayuso <pablo@eurodev.net>
27 * - add support for L3 protocol module load on demand.
29 * Derived from net/ipv4/netfilter/ip_conntrack_core.c
32 #include <linux/types.h>
33 #include <linux/netfilter.h>
34 #include <linux/module.h>
35 #include <linux/skbuff.h>
36 #include <linux/proc_fs.h>
37 #include <linux/vmalloc.h>
38 #include <linux/stddef.h>
39 #include <linux/slab.h>
40 #include <linux/random.h>
41 #include <linux/jhash.h>
42 #include <linux/err.h>
43 #include <linux/percpu.h>
44 #include <linux/moduleparam.h>
45 #include <linux/notifier.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/socket.h>
50 /* This rwlock protects the main hash table, protocol/helper/expected
51 registrations, conntrack timers*/
52 #define ASSERT_READ_LOCK(x)
53 #define ASSERT_WRITE_LOCK(x)
55 #include <net/netfilter/nf_conntrack.h>
56 #include <net/netfilter/nf_conntrack_l3proto.h>
57 #include <net/netfilter/nf_conntrack_protocol.h>
58 #include <net/netfilter/nf_conntrack_expect.h>
59 #include <net/netfilter/nf_conntrack_helper.h>
60 #include <net/netfilter/nf_conntrack_core.h>
62 #define NF_CONNTRACK_VERSION "0.5.0"
67 #define DEBUGP(format, args...)
70 DEFINE_RWLOCK(nf_conntrack_lock
);
72 /* nf_conntrack_standalone needs this */
73 atomic_t nf_conntrack_count
= ATOMIC_INIT(0);
75 void (*nf_conntrack_destroyed
)(struct nf_conn
*conntrack
) = NULL
;
76 struct nf_conntrack_protocol
**nf_ct_protos
[PF_MAX
] __read_mostly
;
77 struct nf_conntrack_l3proto
*nf_ct_l3protos
[PF_MAX
] __read_mostly
;
78 static LIST_HEAD(helpers
);
79 unsigned int nf_conntrack_htable_size __read_mostly
= 0;
80 int nf_conntrack_max __read_mostly
;
81 struct list_head
*nf_conntrack_hash __read_mostly
;
82 struct nf_conn nf_conntrack_untracked
;
83 unsigned int nf_ct_log_invalid __read_mostly
;
84 static LIST_HEAD(unconfirmed
);
85 static int nf_conntrack_vmalloc __read_mostly
;
87 static unsigned int nf_conntrack_next_id
;
89 #ifdef CONFIG_NF_CONNTRACK_EVENTS
90 ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain
);
91 ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain
);
93 DEFINE_PER_CPU(struct nf_conntrack_ecache
, nf_conntrack_ecache
);
95 /* deliver cached events and clear cache entry - must be called with locally
96 * disabled softirqs */
98 __nf_ct_deliver_cached_events(struct nf_conntrack_ecache
*ecache
)
100 DEBUGP("ecache: delivering events for %p\n", ecache
->ct
);
101 if (nf_ct_is_confirmed(ecache
->ct
) && !nf_ct_is_dying(ecache
->ct
)
103 atomic_notifier_call_chain(&nf_conntrack_chain
, ecache
->events
,
107 nf_ct_put(ecache
->ct
);
111 /* Deliver all cached events for a particular conntrack. This is called
112 * by code prior to async packet handling for freeing the skb */
113 void nf_ct_deliver_cached_events(const struct nf_conn
*ct
)
115 struct nf_conntrack_ecache
*ecache
;
118 ecache
= &__get_cpu_var(nf_conntrack_ecache
);
119 if (ecache
->ct
== ct
)
120 __nf_ct_deliver_cached_events(ecache
);
124 /* Deliver cached events for old pending events, if current conntrack != old */
125 void __nf_ct_event_cache_init(struct nf_conn
*ct
)
127 struct nf_conntrack_ecache
*ecache
;
129 /* take care of delivering potentially old events */
130 ecache
= &__get_cpu_var(nf_conntrack_ecache
);
131 BUG_ON(ecache
->ct
== ct
);
133 __nf_ct_deliver_cached_events(ecache
);
134 /* initialize for this conntrack/packet */
136 nf_conntrack_get(&ct
->ct_general
);
139 /* flush the event cache - touches other CPU's data and must not be called
140 * while packets are still passing through the code */
141 static void nf_ct_event_cache_flush(void)
143 struct nf_conntrack_ecache
*ecache
;
146 for_each_possible_cpu(cpu
) {
147 ecache
= &per_cpu(nf_conntrack_ecache
, cpu
);
149 nf_ct_put(ecache
->ct
);
153 static inline void nf_ct_event_cache_flush(void) {}
154 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
156 DEFINE_PER_CPU(struct ip_conntrack_stat
, nf_conntrack_stat
);
157 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat
);
160 * This scheme offers various size of "struct nf_conn" dependent on
161 * features(helper, nat, ...)
164 #define NF_CT_FEATURES_NAMELEN 256
166 /* name of slab cache. printed in /proc/slabinfo */
169 /* size of slab cache */
172 /* slab cache pointer */
173 kmem_cache_t
*cachep
;
175 /* allocated slab cache + modules which uses this slab cache */
178 } nf_ct_cache
[NF_CT_F_NUM
];
180 /* protect members of nf_ct_cache except of "use" */
181 DEFINE_RWLOCK(nf_ct_cache_lock
);
183 /* This avoids calling kmem_cache_create() with same name simultaneously */
184 static DEFINE_MUTEX(nf_ct_cache_mutex
);
186 extern struct nf_conntrack_protocol nf_conntrack_generic_protocol
;
187 struct nf_conntrack_protocol
*
188 __nf_ct_proto_find(u_int16_t l3proto
, u_int8_t protocol
)
190 if (unlikely(l3proto
>= AF_MAX
|| nf_ct_protos
[l3proto
] == NULL
))
191 return &nf_conntrack_generic_protocol
;
193 return nf_ct_protos
[l3proto
][protocol
];
196 /* this is guaranteed to always return a valid protocol helper, since
197 * it falls back to generic_protocol */
198 struct nf_conntrack_protocol
*
199 nf_ct_proto_find_get(u_int16_t l3proto
, u_int8_t protocol
)
201 struct nf_conntrack_protocol
*p
;
204 p
= __nf_ct_proto_find(l3proto
, protocol
);
205 if (!try_module_get(p
->me
))
206 p
= &nf_conntrack_generic_protocol
;
212 void nf_ct_proto_put(struct nf_conntrack_protocol
*p
)
217 struct nf_conntrack_l3proto
*
218 nf_ct_l3proto_find_get(u_int16_t l3proto
)
220 struct nf_conntrack_l3proto
*p
;
223 p
= __nf_ct_l3proto_find(l3proto
);
224 if (!try_module_get(p
->me
))
225 p
= &nf_conntrack_generic_l3proto
;
231 void nf_ct_l3proto_put(struct nf_conntrack_l3proto
*p
)
237 nf_ct_l3proto_try_module_get(unsigned short l3proto
)
240 struct nf_conntrack_l3proto
*p
;
242 retry
: p
= nf_ct_l3proto_find_get(l3proto
);
243 if (p
== &nf_conntrack_generic_l3proto
) {
244 ret
= request_module("nf_conntrack-%d", l3proto
);
254 void nf_ct_l3proto_module_put(unsigned short l3proto
)
256 struct nf_conntrack_l3proto
*p
;
259 p
= __nf_ct_l3proto_find(l3proto
);
265 static int nf_conntrack_hash_rnd_initted
;
266 static unsigned int nf_conntrack_hash_rnd
;
268 static u_int32_t
__hash_conntrack(const struct nf_conntrack_tuple
*tuple
,
269 unsigned int size
, unsigned int rnd
)
272 a
= jhash((void *)tuple
->src
.u3
.all
, sizeof(tuple
->src
.u3
.all
),
273 ((tuple
->src
.l3num
) << 16) | tuple
->dst
.protonum
);
274 b
= jhash((void *)tuple
->dst
.u3
.all
, sizeof(tuple
->dst
.u3
.all
),
275 (tuple
->src
.u
.all
<< 16) | tuple
->dst
.u
.all
);
277 return jhash_2words(a
, b
, rnd
) % size
;
280 static inline u_int32_t
hash_conntrack(const struct nf_conntrack_tuple
*tuple
)
282 return __hash_conntrack(tuple
, nf_conntrack_htable_size
,
283 nf_conntrack_hash_rnd
);
286 int nf_conntrack_register_cache(u_int32_t features
, const char *name
,
291 kmem_cache_t
*cachep
;
293 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
294 features
, name
, size
);
296 if (features
< NF_CT_F_BASIC
|| features
>= NF_CT_F_NUM
) {
297 DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
302 mutex_lock(&nf_ct_cache_mutex
);
304 write_lock_bh(&nf_ct_cache_lock
);
305 /* e.g: multiple helpers are loaded */
306 if (nf_ct_cache
[features
].use
> 0) {
307 DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
308 if ((!strncmp(nf_ct_cache
[features
].name
, name
,
309 NF_CT_FEATURES_NAMELEN
))
310 && nf_ct_cache
[features
].size
== size
) {
311 DEBUGP("nf_conntrack_register_cache: reusing.\n");
312 nf_ct_cache
[features
].use
++;
317 write_unlock_bh(&nf_ct_cache_lock
);
318 mutex_unlock(&nf_ct_cache_mutex
);
321 write_unlock_bh(&nf_ct_cache_lock
);
324 * The memory space for name of slab cache must be alive until
325 * cache is destroyed.
327 cache_name
= kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN
, GFP_ATOMIC
);
328 if (cache_name
== NULL
) {
329 DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
334 if (strlcpy(cache_name
, name
, NF_CT_FEATURES_NAMELEN
)
335 >= NF_CT_FEATURES_NAMELEN
) {
336 printk("nf_conntrack_register_cache: name too long\n");
341 cachep
= kmem_cache_create(cache_name
, size
, 0, 0,
344 printk("nf_conntrack_register_cache: Can't create slab cache "
345 "for the features = 0x%x\n", features
);
350 write_lock_bh(&nf_ct_cache_lock
);
351 nf_ct_cache
[features
].use
= 1;
352 nf_ct_cache
[features
].size
= size
;
353 nf_ct_cache
[features
].cachep
= cachep
;
354 nf_ct_cache
[features
].name
= cache_name
;
355 write_unlock_bh(&nf_ct_cache_lock
);
362 mutex_unlock(&nf_ct_cache_mutex
);
366 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
367 void nf_conntrack_unregister_cache(u_int32_t features
)
369 kmem_cache_t
*cachep
;
373 * This assures that kmem_cache_create() isn't called before destroying
376 DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features
);
377 mutex_lock(&nf_ct_cache_mutex
);
379 write_lock_bh(&nf_ct_cache_lock
);
380 if (--nf_ct_cache
[features
].use
> 0) {
381 write_unlock_bh(&nf_ct_cache_lock
);
382 mutex_unlock(&nf_ct_cache_mutex
);
385 cachep
= nf_ct_cache
[features
].cachep
;
386 name
= nf_ct_cache
[features
].name
;
387 nf_ct_cache
[features
].cachep
= NULL
;
388 nf_ct_cache
[features
].name
= NULL
;
389 nf_ct_cache
[features
].size
= 0;
390 write_unlock_bh(&nf_ct_cache_lock
);
394 kmem_cache_destroy(cachep
);
397 mutex_unlock(&nf_ct_cache_mutex
);
401 nf_ct_get_tuple(const struct sk_buff
*skb
,
403 unsigned int dataoff
,
406 struct nf_conntrack_tuple
*tuple
,
407 const struct nf_conntrack_l3proto
*l3proto
,
408 const struct nf_conntrack_protocol
*protocol
)
410 NF_CT_TUPLE_U_BLANK(tuple
);
412 tuple
->src
.l3num
= l3num
;
413 if (l3proto
->pkt_to_tuple(skb
, nhoff
, tuple
) == 0)
416 tuple
->dst
.protonum
= protonum
;
417 tuple
->dst
.dir
= IP_CT_DIR_ORIGINAL
;
419 return protocol
->pkt_to_tuple(skb
, dataoff
, tuple
);
423 nf_ct_invert_tuple(struct nf_conntrack_tuple
*inverse
,
424 const struct nf_conntrack_tuple
*orig
,
425 const struct nf_conntrack_l3proto
*l3proto
,
426 const struct nf_conntrack_protocol
*protocol
)
428 NF_CT_TUPLE_U_BLANK(inverse
);
430 inverse
->src
.l3num
= orig
->src
.l3num
;
431 if (l3proto
->invert_tuple(inverse
, orig
) == 0)
434 inverse
->dst
.dir
= !orig
->dst
.dir
;
436 inverse
->dst
.protonum
= orig
->dst
.protonum
;
437 return protocol
->invert_tuple(inverse
, orig
);
441 clean_from_lists(struct nf_conn
*ct
)
443 DEBUGP("clean_from_lists(%p)\n", ct
);
444 ASSERT_WRITE_LOCK(&nf_conntrack_lock
);
445 list_del(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].list
);
446 list_del(&ct
->tuplehash
[IP_CT_DIR_REPLY
].list
);
448 /* Destroy all pending expectations */
449 nf_ct_remove_expectations(ct
);
453 destroy_conntrack(struct nf_conntrack
*nfct
)
455 struct nf_conn
*ct
= (struct nf_conn
*)nfct
;
456 struct nf_conntrack_l3proto
*l3proto
;
457 struct nf_conntrack_protocol
*proto
;
459 DEBUGP("destroy_conntrack(%p)\n", ct
);
460 NF_CT_ASSERT(atomic_read(&nfct
->use
) == 0);
461 NF_CT_ASSERT(!timer_pending(&ct
->timeout
));
463 nf_conntrack_event(IPCT_DESTROY
, ct
);
464 set_bit(IPS_DYING_BIT
, &ct
->status
);
466 /* To make sure we don't get any weird locking issues here:
467 * destroy_conntrack() MUST NOT be called with a write lock
468 * to nf_conntrack_lock!!! -HW */
469 l3proto
= __nf_ct_l3proto_find(ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
.src
.l3num
);
470 if (l3proto
&& l3proto
->destroy
)
471 l3proto
->destroy(ct
);
473 proto
= __nf_ct_proto_find(ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
.src
.l3num
, ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
.dst
.protonum
);
474 if (proto
&& proto
->destroy
)
477 if (nf_conntrack_destroyed
)
478 nf_conntrack_destroyed(ct
);
480 write_lock_bh(&nf_conntrack_lock
);
481 /* Expectations will have been removed in clean_from_lists,
482 * except TFTP can create an expectation on the first packet,
483 * before connection is in the list, so we need to clean here,
485 nf_ct_remove_expectations(ct
);
487 /* We overload first tuple to link into unconfirmed list. */
488 if (!nf_ct_is_confirmed(ct
)) {
489 BUG_ON(list_empty(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].list
));
490 list_del(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].list
);
493 NF_CT_STAT_INC(delete);
494 write_unlock_bh(&nf_conntrack_lock
);
497 nf_ct_put(ct
->master
);
499 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct
);
500 nf_conntrack_free(ct
);
503 static void death_by_timeout(unsigned long ul_conntrack
)
505 struct nf_conn
*ct
= (void *)ul_conntrack
;
507 write_lock_bh(&nf_conntrack_lock
);
508 /* Inside lock so preempt is disabled on module removal path.
509 * Otherwise we can get spurious warnings. */
510 NF_CT_STAT_INC(delete_list
);
511 clean_from_lists(ct
);
512 write_unlock_bh(&nf_conntrack_lock
);
516 struct nf_conntrack_tuple_hash
*
517 __nf_conntrack_find(const struct nf_conntrack_tuple
*tuple
,
518 const struct nf_conn
*ignored_conntrack
)
520 struct nf_conntrack_tuple_hash
*h
;
521 unsigned int hash
= hash_conntrack(tuple
);
523 ASSERT_READ_LOCK(&nf_conntrack_lock
);
524 list_for_each_entry(h
, &nf_conntrack_hash
[hash
], list
) {
525 if (nf_ct_tuplehash_to_ctrack(h
) != ignored_conntrack
&&
526 nf_ct_tuple_equal(tuple
, &h
->tuple
)) {
527 NF_CT_STAT_INC(found
);
530 NF_CT_STAT_INC(searched
);
536 /* Find a connection corresponding to a tuple. */
537 struct nf_conntrack_tuple_hash
*
538 nf_conntrack_find_get(const struct nf_conntrack_tuple
*tuple
,
539 const struct nf_conn
*ignored_conntrack
)
541 struct nf_conntrack_tuple_hash
*h
;
543 read_lock_bh(&nf_conntrack_lock
);
544 h
= __nf_conntrack_find(tuple
, ignored_conntrack
);
546 atomic_inc(&nf_ct_tuplehash_to_ctrack(h
)->ct_general
.use
);
547 read_unlock_bh(&nf_conntrack_lock
);
552 static void __nf_conntrack_hash_insert(struct nf_conn
*ct
,
554 unsigned int repl_hash
)
556 ct
->id
= ++nf_conntrack_next_id
;
557 list_add(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].list
,
558 &nf_conntrack_hash
[hash
]);
559 list_add(&ct
->tuplehash
[IP_CT_DIR_REPLY
].list
,
560 &nf_conntrack_hash
[repl_hash
]);
563 void nf_conntrack_hash_insert(struct nf_conn
*ct
)
565 unsigned int hash
, repl_hash
;
567 hash
= hash_conntrack(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
);
568 repl_hash
= hash_conntrack(&ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
);
570 write_lock_bh(&nf_conntrack_lock
);
571 __nf_conntrack_hash_insert(ct
, hash
, repl_hash
);
572 write_unlock_bh(&nf_conntrack_lock
);
575 /* Confirm a connection given skb; places it in hash table */
577 __nf_conntrack_confirm(struct sk_buff
**pskb
)
579 unsigned int hash
, repl_hash
;
580 struct nf_conntrack_tuple_hash
*h
;
582 struct nf_conn_help
*help
;
583 enum ip_conntrack_info ctinfo
;
585 ct
= nf_ct_get(*pskb
, &ctinfo
);
587 /* ipt_REJECT uses nf_conntrack_attach to attach related
588 ICMP/TCP RST packets in other direction. Actual packet
589 which created connection will be IP_CT_NEW or for an
590 expected connection, IP_CT_RELATED. */
591 if (CTINFO2DIR(ctinfo
) != IP_CT_DIR_ORIGINAL
)
594 hash
= hash_conntrack(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
);
595 repl_hash
= hash_conntrack(&ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
);
597 /* We're not in hash table, and we refuse to set up related
598 connections for unconfirmed conns. But packet copies and
599 REJECT will give spurious warnings here. */
600 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
602 /* No external references means noone else could have
604 NF_CT_ASSERT(!nf_ct_is_confirmed(ct
));
605 DEBUGP("Confirming conntrack %p\n", ct
);
607 write_lock_bh(&nf_conntrack_lock
);
609 /* See if there's one in the list already, including reverse:
610 NAT could have grabbed it without realizing, since we're
611 not in the hash. If there is, we lost race. */
612 list_for_each_entry(h
, &nf_conntrack_hash
[hash
], list
)
613 if (nf_ct_tuple_equal(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
,
616 list_for_each_entry(h
, &nf_conntrack_hash
[repl_hash
], list
)
617 if (nf_ct_tuple_equal(&ct
->tuplehash
[IP_CT_DIR_REPLY
].tuple
,
621 /* Remove from unconfirmed list */
622 list_del(&ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].list
);
624 __nf_conntrack_hash_insert(ct
, hash
, repl_hash
);
625 /* Timer relative to confirmation time, not original
626 setting time, otherwise we'd get timer wrap in
627 weird delay cases. */
628 ct
->timeout
.expires
+= jiffies
;
629 add_timer(&ct
->timeout
);
630 atomic_inc(&ct
->ct_general
.use
);
631 set_bit(IPS_CONFIRMED_BIT
, &ct
->status
);
632 NF_CT_STAT_INC(insert
);
633 write_unlock_bh(&nf_conntrack_lock
);
634 help
= nfct_help(ct
);
635 if (help
&& help
->helper
)
636 nf_conntrack_event_cache(IPCT_HELPER
, *pskb
);
637 #ifdef CONFIG_NF_NAT_NEEDED
638 if (test_bit(IPS_SRC_NAT_DONE_BIT
, &ct
->status
) ||
639 test_bit(IPS_DST_NAT_DONE_BIT
, &ct
->status
))
640 nf_conntrack_event_cache(IPCT_NATINFO
, *pskb
);
642 nf_conntrack_event_cache(master_ct(ct
) ?
643 IPCT_RELATED
: IPCT_NEW
, *pskb
);
647 NF_CT_STAT_INC(insert_failed
);
648 write_unlock_bh(&nf_conntrack_lock
);
652 /* Returns true if a connection correspondings to the tuple (required
655 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple
*tuple
,
656 const struct nf_conn
*ignored_conntrack
)
658 struct nf_conntrack_tuple_hash
*h
;
660 read_lock_bh(&nf_conntrack_lock
);
661 h
= __nf_conntrack_find(tuple
, ignored_conntrack
);
662 read_unlock_bh(&nf_conntrack_lock
);
667 /* There's a small race here where we may free a just-assured
668 connection. Too bad: we're in trouble anyway. */
669 static int early_drop(struct list_head
*chain
)
671 /* Traverse backwards: gives us oldest, which is roughly LRU */
672 struct nf_conntrack_tuple_hash
*h
;
673 struct nf_conn
*ct
= NULL
, *tmp
;
676 read_lock_bh(&nf_conntrack_lock
);
677 list_for_each_entry_reverse(h
, chain
, list
) {
678 tmp
= nf_ct_tuplehash_to_ctrack(h
);
679 if (!test_bit(IPS_ASSURED_BIT
, &tmp
->status
)) {
681 atomic_inc(&ct
->ct_general
.use
);
685 read_unlock_bh(&nf_conntrack_lock
);
690 if (del_timer(&ct
->timeout
)) {
691 death_by_timeout((unsigned long)ct
);
693 NF_CT_STAT_INC(early_drop
);
699 static struct nf_conntrack_helper
*
700 __nf_ct_helper_find(const struct nf_conntrack_tuple
*tuple
)
702 struct nf_conntrack_helper
*h
;
704 list_for_each_entry(h
, &helpers
, list
) {
705 if (nf_ct_tuple_mask_cmp(tuple
, &h
->tuple
, &h
->mask
))
711 struct nf_conntrack_helper
*
712 nf_ct_helper_find_get( const struct nf_conntrack_tuple
*tuple
)
714 struct nf_conntrack_helper
*helper
;
716 /* need nf_conntrack_lock to assure that helper exists until
717 * try_module_get() is called */
718 read_lock_bh(&nf_conntrack_lock
);
720 helper
= __nf_ct_helper_find(tuple
);
722 /* need to increase module usage count to assure helper will
723 * not go away while the caller is e.g. busy putting a
724 * conntrack in the hash that uses the helper */
725 if (!try_module_get(helper
->me
))
729 read_unlock_bh(&nf_conntrack_lock
);
734 void nf_ct_helper_put(struct nf_conntrack_helper
*helper
)
736 module_put(helper
->me
);
739 static struct nf_conn
*
740 __nf_conntrack_alloc(const struct nf_conntrack_tuple
*orig
,
741 const struct nf_conntrack_tuple
*repl
,
742 const struct nf_conntrack_l3proto
*l3proto
)
744 struct nf_conn
*conntrack
= NULL
;
745 u_int32_t features
= 0;
746 struct nf_conntrack_helper
*helper
;
748 if (unlikely(!nf_conntrack_hash_rnd_initted
)) {
749 get_random_bytes(&nf_conntrack_hash_rnd
, 4);
750 nf_conntrack_hash_rnd_initted
= 1;
753 /* We don't want any race condition at early drop stage */
754 atomic_inc(&nf_conntrack_count
);
757 && atomic_read(&nf_conntrack_count
) > nf_conntrack_max
) {
758 unsigned int hash
= hash_conntrack(orig
);
759 /* Try dropping from this hash chain. */
760 if (!early_drop(&nf_conntrack_hash
[hash
])) {
761 atomic_dec(&nf_conntrack_count
);
764 "nf_conntrack: table full, dropping"
766 return ERR_PTR(-ENOMEM
);
770 /* find features needed by this conntrack. */
771 features
= l3proto
->get_features(orig
);
773 /* FIXME: protect helper list per RCU */
774 read_lock_bh(&nf_conntrack_lock
);
775 helper
= __nf_ct_helper_find(repl
);
777 features
|= NF_CT_F_HELP
;
778 read_unlock_bh(&nf_conntrack_lock
);
780 DEBUGP("nf_conntrack_alloc: features=0x%x\n", features
);
782 read_lock_bh(&nf_ct_cache_lock
);
784 if (unlikely(!nf_ct_cache
[features
].use
)) {
785 DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
790 conntrack
= kmem_cache_alloc(nf_ct_cache
[features
].cachep
, GFP_ATOMIC
);
791 if (conntrack
== NULL
) {
792 DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
796 memset(conntrack
, 0, nf_ct_cache
[features
].size
);
797 conntrack
->features
= features
;
798 atomic_set(&conntrack
->ct_general
.use
, 1);
799 conntrack
->ct_general
.destroy
= destroy_conntrack
;
800 conntrack
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
= *orig
;
801 conntrack
->tuplehash
[IP_CT_DIR_REPLY
].tuple
= *repl
;
802 /* Don't set timer yet: wait for confirmation */
803 init_timer(&conntrack
->timeout
);
804 conntrack
->timeout
.data
= (unsigned long)conntrack
;
805 conntrack
->timeout
.function
= death_by_timeout
;
806 read_unlock_bh(&nf_ct_cache_lock
);
810 read_unlock_bh(&nf_ct_cache_lock
);
811 atomic_dec(&nf_conntrack_count
);
815 struct nf_conn
*nf_conntrack_alloc(const struct nf_conntrack_tuple
*orig
,
816 const struct nf_conntrack_tuple
*repl
)
818 struct nf_conntrack_l3proto
*l3proto
;
820 l3proto
= __nf_ct_l3proto_find(orig
->src
.l3num
);
821 return __nf_conntrack_alloc(orig
, repl
, l3proto
);
824 void nf_conntrack_free(struct nf_conn
*conntrack
)
826 u_int32_t features
= conntrack
->features
;
827 NF_CT_ASSERT(features
>= NF_CT_F_BASIC
&& features
< NF_CT_F_NUM
);
828 DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features
,
830 kmem_cache_free(nf_ct_cache
[features
].cachep
, conntrack
);
831 atomic_dec(&nf_conntrack_count
);
834 /* Allocate a new conntrack: we return -ENOMEM if classification
835 failed due to stress. Otherwise it really is unclassifiable. */
836 static struct nf_conntrack_tuple_hash
*
837 init_conntrack(const struct nf_conntrack_tuple
*tuple
,
838 struct nf_conntrack_l3proto
*l3proto
,
839 struct nf_conntrack_protocol
*protocol
,
841 unsigned int dataoff
)
843 struct nf_conn
*conntrack
;
844 struct nf_conntrack_tuple repl_tuple
;
845 struct nf_conntrack_expect
*exp
;
847 if (!nf_ct_invert_tuple(&repl_tuple
, tuple
, l3proto
, protocol
)) {
848 DEBUGP("Can't invert tuple.\n");
852 conntrack
= __nf_conntrack_alloc(tuple
, &repl_tuple
, l3proto
);
853 if (conntrack
== NULL
|| IS_ERR(conntrack
)) {
854 DEBUGP("Can't allocate conntrack.\n");
855 return (struct nf_conntrack_tuple_hash
*)conntrack
;
858 if (!protocol
->new(conntrack
, skb
, dataoff
)) {
859 nf_conntrack_free(conntrack
);
860 DEBUGP("init conntrack: can't track with proto module\n");
864 write_lock_bh(&nf_conntrack_lock
);
865 exp
= find_expectation(tuple
);
868 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
870 /* Welcome, Mr. Bond. We've been expecting you... */
871 __set_bit(IPS_EXPECTED_BIT
, &conntrack
->status
);
872 conntrack
->master
= exp
->master
;
873 #ifdef CONFIG_NF_CONNTRACK_MARK
874 conntrack
->mark
= exp
->master
->mark
;
876 #ifdef CONFIG_NF_CONNTRACK_SECMARK
877 conntrack
->secmark
= exp
->master
->secmark
;
879 nf_conntrack_get(&conntrack
->master
->ct_general
);
880 NF_CT_STAT_INC(expect_new
);
882 struct nf_conn_help
*help
= nfct_help(conntrack
);
885 help
->helper
= __nf_ct_helper_find(&repl_tuple
);
889 /* Overload tuple linked list to put us in unconfirmed list. */
890 list_add(&conntrack
->tuplehash
[IP_CT_DIR_ORIGINAL
].list
, &unconfirmed
);
892 write_unlock_bh(&nf_conntrack_lock
);
896 exp
->expectfn(conntrack
, exp
);
897 nf_conntrack_expect_put(exp
);
900 return &conntrack
->tuplehash
[IP_CT_DIR_ORIGINAL
];
903 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
904 static inline struct nf_conn
*
905 resolve_normal_ct(struct sk_buff
*skb
,
906 unsigned int dataoff
,
909 struct nf_conntrack_l3proto
*l3proto
,
910 struct nf_conntrack_protocol
*proto
,
912 enum ip_conntrack_info
*ctinfo
)
914 struct nf_conntrack_tuple tuple
;
915 struct nf_conntrack_tuple_hash
*h
;
918 if (!nf_ct_get_tuple(skb
, (unsigned int)(skb
->nh
.raw
- skb
->data
),
919 dataoff
, l3num
, protonum
, &tuple
, l3proto
,
921 DEBUGP("resolve_normal_ct: Can't get tuple\n");
925 /* look for tuple match */
926 h
= nf_conntrack_find_get(&tuple
, NULL
);
928 h
= init_conntrack(&tuple
, l3proto
, proto
, skb
, dataoff
);
934 ct
= nf_ct_tuplehash_to_ctrack(h
);
936 /* It exists; we have (non-exclusive) reference. */
937 if (NF_CT_DIRECTION(h
) == IP_CT_DIR_REPLY
) {
938 *ctinfo
= IP_CT_ESTABLISHED
+ IP_CT_IS_REPLY
;
939 /* Please set reply bit if this packet OK */
942 /* Once we've had two way comms, always ESTABLISHED. */
943 if (test_bit(IPS_SEEN_REPLY_BIT
, &ct
->status
)) {
944 DEBUGP("nf_conntrack_in: normal packet for %p\n", ct
);
945 *ctinfo
= IP_CT_ESTABLISHED
;
946 } else if (test_bit(IPS_EXPECTED_BIT
, &ct
->status
)) {
947 DEBUGP("nf_conntrack_in: related packet for %p\n", ct
);
948 *ctinfo
= IP_CT_RELATED
;
950 DEBUGP("nf_conntrack_in: new packet for %p\n", ct
);
955 skb
->nfct
= &ct
->ct_general
;
956 skb
->nfctinfo
= *ctinfo
;
961 nf_conntrack_in(int pf
, unsigned int hooknum
, struct sk_buff
**pskb
)
964 enum ip_conntrack_info ctinfo
;
965 struct nf_conntrack_l3proto
*l3proto
;
966 struct nf_conntrack_protocol
*proto
;
967 unsigned int dataoff
;
972 /* Previously seen (loopback or untracked)? Ignore. */
974 NF_CT_STAT_INC(ignore
);
978 l3proto
= __nf_ct_l3proto_find((u_int16_t
)pf
);
979 if ((ret
= l3proto
->prepare(pskb
, hooknum
, &dataoff
, &protonum
)) <= 0) {
980 DEBUGP("not prepared to track yet or error occured\n");
984 proto
= __nf_ct_proto_find((u_int16_t
)pf
, protonum
);
986 /* It may be an special packet, error, unclean...
987 * inverse of the return code tells to the netfilter
988 * core what to do with the packet. */
989 if (proto
->error
!= NULL
&&
990 (ret
= proto
->error(*pskb
, dataoff
, &ctinfo
, pf
, hooknum
)) <= 0) {
991 NF_CT_STAT_INC(error
);
992 NF_CT_STAT_INC(invalid
);
996 ct
= resolve_normal_ct(*pskb
, dataoff
, pf
, protonum
, l3proto
, proto
,
997 &set_reply
, &ctinfo
);
999 /* Not valid part of a connection */
1000 NF_CT_STAT_INC(invalid
);
1005 /* Too stressed to deal. */
1006 NF_CT_STAT_INC(drop
);
1010 NF_CT_ASSERT((*pskb
)->nfct
);
1012 ret
= proto
->packet(ct
, *pskb
, dataoff
, ctinfo
, pf
, hooknum
);
1014 /* Invalid: inverse of the return code tells
1015 * the netfilter core what to do */
1016 DEBUGP("nf_conntrack_in: Can't track with proto module\n");
1017 nf_conntrack_put((*pskb
)->nfct
);
1018 (*pskb
)->nfct
= NULL
;
1019 NF_CT_STAT_INC(invalid
);
1023 if (set_reply
&& !test_and_set_bit(IPS_SEEN_REPLY_BIT
, &ct
->status
))
1024 nf_conntrack_event_cache(IPCT_STATUS
, *pskb
);
1029 int nf_ct_invert_tuplepr(struct nf_conntrack_tuple
*inverse
,
1030 const struct nf_conntrack_tuple
*orig
)
1032 return nf_ct_invert_tuple(inverse
, orig
,
1033 __nf_ct_l3proto_find(orig
->src
.l3num
),
1034 __nf_ct_proto_find(orig
->src
.l3num
,
1035 orig
->dst
.protonum
));
1038 int nf_conntrack_helper_register(struct nf_conntrack_helper
*me
)
1041 BUG_ON(me
->timeout
== 0);
1043 ret
= nf_conntrack_register_cache(NF_CT_F_HELP
, "nf_conntrack:help",
1044 sizeof(struct nf_conn
)
1045 + sizeof(struct nf_conn_help
)
1046 + __alignof__(struct nf_conn_help
));
1048 printk(KERN_ERR
"nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
1051 write_lock_bh(&nf_conntrack_lock
);
1052 list_add(&me
->list
, &helpers
);
1053 write_unlock_bh(&nf_conntrack_lock
);
1058 struct nf_conntrack_helper
*
1059 __nf_conntrack_helper_find_byname(const char *name
)
1061 struct nf_conntrack_helper
*h
;
1063 list_for_each_entry(h
, &helpers
, list
) {
1064 if (!strcmp(h
->name
, name
))
1071 static inline void unhelp(struct nf_conntrack_tuple_hash
*i
,
1072 const struct nf_conntrack_helper
*me
)
1074 struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(i
);
1075 struct nf_conn_help
*help
= nfct_help(ct
);
1077 if (help
&& help
->helper
== me
) {
1078 nf_conntrack_event(IPCT_HELPER
, ct
);
1079 help
->helper
= NULL
;
1083 void nf_conntrack_helper_unregister(struct nf_conntrack_helper
*me
)
1086 struct nf_conntrack_tuple_hash
*h
;
1087 struct nf_conntrack_expect
*exp
, *tmp
;
1089 /* Need write lock here, to delete helper. */
1090 write_lock_bh(&nf_conntrack_lock
);
1091 list_del(&me
->list
);
1093 /* Get rid of expectations */
1094 list_for_each_entry_safe(exp
, tmp
, &nf_conntrack_expect_list
, list
) {
1095 struct nf_conn_help
*help
= nfct_help(exp
->master
);
1096 if (help
->helper
== me
&& del_timer(&exp
->timeout
)) {
1097 nf_ct_unlink_expect(exp
);
1098 nf_conntrack_expect_put(exp
);
1102 /* Get rid of expecteds, set helpers to NULL. */
1103 list_for_each_entry(h
, &unconfirmed
, list
)
1105 for (i
= 0; i
< nf_conntrack_htable_size
; i
++) {
1106 list_for_each_entry(h
, &nf_conntrack_hash
[i
], list
)
1109 write_unlock_bh(&nf_conntrack_lock
);
1111 /* Someone could be still looking at the helper in a bh. */
1115 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1116 void __nf_ct_refresh_acct(struct nf_conn
*ct
,
1117 enum ip_conntrack_info ctinfo
,
1118 const struct sk_buff
*skb
,
1119 unsigned long extra_jiffies
,
1124 NF_CT_ASSERT(ct
->timeout
.data
== (unsigned long)ct
);
1127 write_lock_bh(&nf_conntrack_lock
);
1129 /* Only update if this is not a fixed timeout */
1130 if (test_bit(IPS_FIXED_TIMEOUT_BIT
, &ct
->status
)) {
1131 write_unlock_bh(&nf_conntrack_lock
);
1135 /* If not in hash table, timer will not be active yet */
1136 if (!nf_ct_is_confirmed(ct
)) {
1137 ct
->timeout
.expires
= extra_jiffies
;
1138 event
= IPCT_REFRESH
;
1140 /* Need del_timer for race avoidance (may already be dying). */
1141 if (del_timer(&ct
->timeout
)) {
1142 ct
->timeout
.expires
= jiffies
+ extra_jiffies
;
1143 add_timer(&ct
->timeout
);
1144 event
= IPCT_REFRESH
;
1148 #ifdef CONFIG_NF_CT_ACCT
1150 ct
->counters
[CTINFO2DIR(ctinfo
)].packets
++;
1151 ct
->counters
[CTINFO2DIR(ctinfo
)].bytes
+=
1152 skb
->len
- (unsigned int)(skb
->nh
.raw
- skb
->data
);
1153 if ((ct
->counters
[CTINFO2DIR(ctinfo
)].packets
& 0x80000000)
1154 || (ct
->counters
[CTINFO2DIR(ctinfo
)].bytes
& 0x80000000))
1155 event
|= IPCT_COUNTER_FILLING
;
1159 write_unlock_bh(&nf_conntrack_lock
);
1161 /* must be unlocked when calling event cache */
1163 nf_conntrack_event_cache(event
, skb
);
1166 #if defined(CONFIG_NF_CT_NETLINK) || \
1167 defined(CONFIG_NF_CT_NETLINK_MODULE)
1169 #include <linux/netfilter/nfnetlink.h>
1170 #include <linux/netfilter/nfnetlink_conntrack.h>
1171 #include <linux/mutex.h>
1174 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1175 * in ip_conntrack_core, since we don't want the protocols to autoload
1176 * or depend on ctnetlink */
1177 int nf_ct_port_tuple_to_nfattr(struct sk_buff
*skb
,
1178 const struct nf_conntrack_tuple
*tuple
)
1180 NFA_PUT(skb
, CTA_PROTO_SRC_PORT
, sizeof(u_int16_t
),
1181 &tuple
->src
.u
.tcp
.port
);
1182 NFA_PUT(skb
, CTA_PROTO_DST_PORT
, sizeof(u_int16_t
),
1183 &tuple
->dst
.u
.tcp
.port
);
1190 static const size_t cta_min_proto
[CTA_PROTO_MAX
] = {
1191 [CTA_PROTO_SRC_PORT
-1] = sizeof(u_int16_t
),
1192 [CTA_PROTO_DST_PORT
-1] = sizeof(u_int16_t
)
1195 int nf_ct_port_nfattr_to_tuple(struct nfattr
*tb
[],
1196 struct nf_conntrack_tuple
*t
)
1198 if (!tb
[CTA_PROTO_SRC_PORT
-1] || !tb
[CTA_PROTO_DST_PORT
-1])
1201 if (nfattr_bad_size(tb
, CTA_PROTO_MAX
, cta_min_proto
))
1205 *(u_int16_t
*)NFA_DATA(tb
[CTA_PROTO_SRC_PORT
-1]);
1207 *(u_int16_t
*)NFA_DATA(tb
[CTA_PROTO_DST_PORT
-1]);
1213 /* Used by ipt_REJECT and ip6t_REJECT. */
1214 void __nf_conntrack_attach(struct sk_buff
*nskb
, struct sk_buff
*skb
)
1217 enum ip_conntrack_info ctinfo
;
1219 /* This ICMP is in reverse direction to the packet which caused it */
1220 ct
= nf_ct_get(skb
, &ctinfo
);
1221 if (CTINFO2DIR(ctinfo
) == IP_CT_DIR_ORIGINAL
)
1222 ctinfo
= IP_CT_RELATED
+ IP_CT_IS_REPLY
;
1224 ctinfo
= IP_CT_RELATED
;
1226 /* Attach to new skbuff, and increment count */
1227 nskb
->nfct
= &ct
->ct_general
;
1228 nskb
->nfctinfo
= ctinfo
;
1229 nf_conntrack_get(nskb
->nfct
);
1233 do_iter(const struct nf_conntrack_tuple_hash
*i
,
1234 int (*iter
)(struct nf_conn
*i
, void *data
),
1237 return iter(nf_ct_tuplehash_to_ctrack(i
), data
);
1240 /* Bring out ya dead! */
1241 static struct nf_conn
*
1242 get_next_corpse(int (*iter
)(struct nf_conn
*i
, void *data
),
1243 void *data
, unsigned int *bucket
)
1245 struct nf_conntrack_tuple_hash
*h
;
1248 write_lock_bh(&nf_conntrack_lock
);
1249 for (; *bucket
< nf_conntrack_htable_size
; (*bucket
)++) {
1250 list_for_each_entry(h
, &nf_conntrack_hash
[*bucket
], list
) {
1251 ct
= nf_ct_tuplehash_to_ctrack(h
);
1256 list_for_each_entry(h
, &unconfirmed
, list
) {
1257 ct
= nf_ct_tuplehash_to_ctrack(h
);
1261 write_unlock_bh(&nf_conntrack_lock
);
1264 atomic_inc(&ct
->ct_general
.use
);
1265 write_unlock_bh(&nf_conntrack_lock
);
1270 nf_ct_iterate_cleanup(int (*iter
)(struct nf_conn
*i
, void *data
), void *data
)
1273 unsigned int bucket
= 0;
1275 while ((ct
= get_next_corpse(iter
, data
, &bucket
)) != NULL
) {
1276 /* Time to push up daises... */
1277 if (del_timer(&ct
->timeout
))
1278 death_by_timeout((unsigned long)ct
);
1279 /* ... else the timer will get him soon. */
1285 static int kill_all(struct nf_conn
*i
, void *data
)
1290 static void free_conntrack_hash(struct list_head
*hash
, int vmalloced
, int size
)
1295 free_pages((unsigned long)hash
,
1296 get_order(sizeof(struct list_head
) * size
));
1299 void nf_conntrack_flush()
1301 nf_ct_iterate_cleanup(kill_all
, NULL
);
1304 /* Mishearing the voices in his head, our hero wonders how he's
1305 supposed to kill the mall. */
1306 void nf_conntrack_cleanup(void)
1310 ip_ct_attach
= NULL
;
1312 /* This makes sure all current packets have passed through
1313 netfilter framework. Roll on, two-stage module
1317 nf_ct_event_cache_flush();
1319 nf_conntrack_flush();
1320 if (atomic_read(&nf_conntrack_count
) != 0) {
1322 goto i_see_dead_people
;
1324 /* wait until all references to nf_conntrack_untracked are dropped */
1325 while (atomic_read(&nf_conntrack_untracked
.ct_general
.use
) > 1)
1328 for (i
= 0; i
< NF_CT_F_NUM
; i
++) {
1329 if (nf_ct_cache
[i
].use
== 0)
1332 NF_CT_ASSERT(nf_ct_cache
[i
].use
== 1);
1333 nf_ct_cache
[i
].use
= 1;
1334 nf_conntrack_unregister_cache(i
);
1336 kmem_cache_destroy(nf_conntrack_expect_cachep
);
1337 free_conntrack_hash(nf_conntrack_hash
, nf_conntrack_vmalloc
,
1338 nf_conntrack_htable_size
);
1340 /* free l3proto protocol tables */
1341 for (i
= 0; i
< PF_MAX
; i
++)
1342 if (nf_ct_protos
[i
]) {
1343 kfree(nf_ct_protos
[i
]);
1344 nf_ct_protos
[i
] = NULL
;
1348 static struct list_head
*alloc_hashtable(int size
, int *vmalloced
)
1350 struct list_head
*hash
;
1354 hash
= (void*)__get_free_pages(GFP_KERNEL
,
1355 get_order(sizeof(struct list_head
)
1359 printk(KERN_WARNING
"nf_conntrack: falling back to vmalloc.\n");
1360 hash
= vmalloc(sizeof(struct list_head
) * size
);
1364 for (i
= 0; i
< size
; i
++)
1365 INIT_LIST_HEAD(&hash
[i
]);
1370 int set_hashsize(const char *val
, struct kernel_param
*kp
)
1372 int i
, bucket
, hashsize
, vmalloced
;
1373 int old_vmalloced
, old_size
;
1375 struct list_head
*hash
, *old_hash
;
1376 struct nf_conntrack_tuple_hash
*h
;
1378 /* On boot, we can set this without any fancy locking. */
1379 if (!nf_conntrack_htable_size
)
1380 return param_set_uint(val
, kp
);
1382 hashsize
= simple_strtol(val
, NULL
, 0);
1386 hash
= alloc_hashtable(hashsize
, &vmalloced
);
1390 /* We have to rehahs for the new table anyway, so we also can
1391 * use a newrandom seed */
1392 get_random_bytes(&rnd
, 4);
1394 write_lock_bh(&nf_conntrack_lock
);
1395 for (i
= 0; i
< nf_conntrack_htable_size
; i
++) {
1396 while (!list_empty(&nf_conntrack_hash
[i
])) {
1397 h
= list_entry(nf_conntrack_hash
[i
].next
,
1398 struct nf_conntrack_tuple_hash
, list
);
1400 bucket
= __hash_conntrack(&h
->tuple
, hashsize
, rnd
);
1401 list_add_tail(&h
->list
, &hash
[bucket
]);
1404 old_size
= nf_conntrack_htable_size
;
1405 old_vmalloced
= nf_conntrack_vmalloc
;
1406 old_hash
= nf_conntrack_hash
;
1408 nf_conntrack_htable_size
= hashsize
;
1409 nf_conntrack_vmalloc
= vmalloced
;
1410 nf_conntrack_hash
= hash
;
1411 nf_conntrack_hash_rnd
= rnd
;
1412 write_unlock_bh(&nf_conntrack_lock
);
1414 free_conntrack_hash(old_hash
, old_vmalloced
, old_size
);
1418 module_param_call(hashsize
, set_hashsize
, param_get_uint
,
1419 &nf_conntrack_htable_size
, 0600);
1421 int __init
nf_conntrack_init(void)
1426 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1427 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1428 if (!nf_conntrack_htable_size
) {
1429 nf_conntrack_htable_size
1430 = (((num_physpages
<< PAGE_SHIFT
) / 16384)
1431 / sizeof(struct list_head
));
1432 if (num_physpages
> (1024 * 1024 * 1024 / PAGE_SIZE
))
1433 nf_conntrack_htable_size
= 8192;
1434 if (nf_conntrack_htable_size
< 16)
1435 nf_conntrack_htable_size
= 16;
1437 nf_conntrack_max
= 8 * nf_conntrack_htable_size
;
1439 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1440 NF_CONNTRACK_VERSION
, nf_conntrack_htable_size
,
1443 nf_conntrack_hash
= alloc_hashtable(nf_conntrack_htable_size
,
1444 &nf_conntrack_vmalloc
);
1445 if (!nf_conntrack_hash
) {
1446 printk(KERN_ERR
"Unable to create nf_conntrack_hash\n");
1450 ret
= nf_conntrack_register_cache(NF_CT_F_BASIC
, "nf_conntrack:basic",
1451 sizeof(struct nf_conn
));
1453 printk(KERN_ERR
"Unable to create nf_conn slab cache\n");
1457 nf_conntrack_expect_cachep
= kmem_cache_create("nf_conntrack_expect",
1458 sizeof(struct nf_conntrack_expect
),
1460 if (!nf_conntrack_expect_cachep
) {
1461 printk(KERN_ERR
"Unable to create nf_expect slab cache\n");
1462 goto err_free_conntrack_slab
;
1465 /* Don't NEED lock here, but good form anyway. */
1466 write_lock_bh(&nf_conntrack_lock
);
1467 for (i
= 0; i
< PF_MAX
; i
++)
1468 nf_ct_l3protos
[i
] = &nf_conntrack_generic_l3proto
;
1469 write_unlock_bh(&nf_conntrack_lock
);
1471 /* For use by REJECT target */
1472 ip_ct_attach
= __nf_conntrack_attach
;
1474 /* Set up fake conntrack:
1475 - to never be deleted, not in any hashes */
1476 atomic_set(&nf_conntrack_untracked
.ct_general
.use
, 1);
1477 /* - and look it like as a confirmed connection */
1478 set_bit(IPS_CONFIRMED_BIT
, &nf_conntrack_untracked
.status
);
1482 err_free_conntrack_slab
:
1483 nf_conntrack_unregister_cache(NF_CT_F_BASIC
);
1485 free_conntrack_hash(nf_conntrack_hash
, nf_conntrack_vmalloc
,
1486 nf_conntrack_htable_size
);