2 * netfilter module to limit the number of parallel tcp
3 * connections per IP address.
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections
7 * (C) CC Computer Consultants GmbH, 2007
11 * Kernel module to match connection tracking information.
12 * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter/xt_connlimit.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 #define CONNLIMIT_SLOTS 256U
38 #define CONNLIMIT_LOCK_SLOTS 8U
40 #define CONNLIMIT_LOCK_SLOTS 256U
43 #define CONNLIMIT_GC_MAX_NODES 8
45 /* we will save the tuples of all connections we care about */
46 struct xt_connlimit_conn
{
47 struct hlist_node node
;
48 struct nf_conntrack_tuple tuple
;
51 struct xt_connlimit_rb
{
53 struct hlist_head hhead
; /* connections/hosts in same subnet */
54 union nf_inet_addr addr
; /* search key */
57 static spinlock_t xt_connlimit_locks
[CONNLIMIT_LOCK_SLOTS
] __cacheline_aligned_in_smp
;
59 struct xt_connlimit_data
{
60 struct rb_root climit_root
[CONNLIMIT_SLOTS
];
63 static u_int32_t connlimit_rnd __read_mostly
;
64 static struct kmem_cache
*connlimit_rb_cachep __read_mostly
;
65 static struct kmem_cache
*connlimit_conn_cachep __read_mostly
;
67 static inline unsigned int connlimit_iphash(__be32 addr
)
69 return jhash_1word((__force __u32
)addr
,
70 connlimit_rnd
) % CONNLIMIT_SLOTS
;
73 static inline unsigned int
74 connlimit_iphash6(const union nf_inet_addr
*addr
)
76 return jhash2((u32
*)addr
->ip6
, ARRAY_SIZE(addr
->ip6
),
77 connlimit_rnd
) % CONNLIMIT_SLOTS
;
80 static inline bool already_closed(const struct nf_conn
*conn
)
82 if (nf_ct_protonum(conn
) == IPPROTO_TCP
)
83 return conn
->proto
.tcp
.state
== TCP_CONNTRACK_TIME_WAIT
||
84 conn
->proto
.tcp
.state
== TCP_CONNTRACK_CLOSE
;
90 same_source(const union nf_inet_addr
*addr
,
91 const union nf_inet_addr
*u3
, u_int8_t family
)
93 if (family
== NFPROTO_IPV4
)
94 return ntohl(addr
->ip
) - ntohl(u3
->ip
);
96 return memcmp(addr
->ip6
, u3
->ip6
, sizeof(addr
->ip6
));
99 bool nf_conncount_add(struct hlist_head
*head
,
100 const struct nf_conntrack_tuple
*tuple
)
102 struct xt_connlimit_conn
*conn
;
104 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
107 conn
->tuple
= *tuple
;
108 hlist_add_head(&conn
->node
, head
);
111 EXPORT_SYMBOL_GPL(nf_conncount_add
);
113 unsigned int nf_conncount_lookup(struct net
*net
, struct hlist_head
*head
,
114 const struct nf_conntrack_tuple
*tuple
,
115 const struct nf_conntrack_zone
*zone
,
118 const struct nf_conntrack_tuple_hash
*found
;
119 struct xt_connlimit_conn
*conn
;
120 struct hlist_node
*n
;
121 struct nf_conn
*found_ct
;
122 unsigned int length
= 0;
126 /* check the saved connections */
127 hlist_for_each_entry_safe(conn
, n
, head
, node
) {
128 found
= nf_conntrack_find_get(net
, zone
, &conn
->tuple
);
130 hlist_del(&conn
->node
);
131 kmem_cache_free(connlimit_conn_cachep
, conn
);
135 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
137 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
)) {
139 * Just to be sure we have it only once in the list.
140 * We should not see tuples twice unless someone hooks
141 * this into a table without "-p tcp --syn".
144 } else if (already_closed(found_ct
)) {
146 * we do not care about connections which are
147 * closed already -> ditch it
150 hlist_del(&conn
->node
);
151 kmem_cache_free(connlimit_conn_cachep
, conn
);
161 EXPORT_SYMBOL_GPL(nf_conncount_lookup
);
163 static void tree_nodes_free(struct rb_root
*root
,
164 struct xt_connlimit_rb
*gc_nodes
[],
165 unsigned int gc_count
)
167 struct xt_connlimit_rb
*rbconn
;
170 rbconn
= gc_nodes
[--gc_count
];
171 rb_erase(&rbconn
->node
, root
);
172 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
177 count_tree(struct net
*net
, struct rb_root
*root
,
178 const struct nf_conntrack_tuple
*tuple
,
179 const union nf_inet_addr
*addr
,
180 u8 family
, const struct nf_conntrack_zone
*zone
)
182 struct xt_connlimit_rb
*gc_nodes
[CONNLIMIT_GC_MAX_NODES
];
183 struct rb_node
**rbnode
, *parent
;
184 struct xt_connlimit_rb
*rbconn
;
185 struct xt_connlimit_conn
*conn
;
186 unsigned int gc_count
;
192 rbnode
= &(root
->rb_node
);
197 rbconn
= rb_entry(*rbnode
, struct xt_connlimit_rb
, node
);
200 diff
= same_source(addr
, &rbconn
->addr
, family
);
202 rbnode
= &((*rbnode
)->rb_left
);
203 } else if (diff
> 0) {
204 rbnode
= &((*rbnode
)->rb_right
);
206 /* same source network -> be counted! */
209 count
= nf_conncount_lookup(net
, &rbconn
->hhead
, tuple
,
212 tree_nodes_free(root
, gc_nodes
, gc_count
);
216 if (!nf_conncount_add(&rbconn
->hhead
, tuple
))
217 return 0; /* hotdrop */
222 if (no_gc
|| gc_count
>= ARRAY_SIZE(gc_nodes
))
225 /* only used for GC on hhead, retval and 'addit' ignored */
226 nf_conncount_lookup(net
, &rbconn
->hhead
, tuple
, zone
, &addit
);
227 if (hlist_empty(&rbconn
->hhead
))
228 gc_nodes
[gc_count
++] = rbconn
;
233 tree_nodes_free(root
, gc_nodes
, gc_count
);
234 /* tree_node_free before new allocation permits
235 * allocator to re-use newly free'd object.
237 * This is a rare event; in most cases we will find
238 * existing node to re-use. (or gc_count is 0).
243 /* no match, need to insert new node */
244 rbconn
= kmem_cache_alloc(connlimit_rb_cachep
, GFP_ATOMIC
);
248 conn
= kmem_cache_alloc(connlimit_conn_cachep
, GFP_ATOMIC
);
250 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
254 conn
->tuple
= *tuple
;
255 rbconn
->addr
= *addr
;
257 INIT_HLIST_HEAD(&rbconn
->hhead
);
258 hlist_add_head(&conn
->node
, &rbconn
->hhead
);
260 rb_link_node(&rbconn
->node
, parent
, rbnode
);
261 rb_insert_color(&rbconn
->node
, root
);
265 static int count_them(struct net
*net
,
266 struct xt_connlimit_data
*data
,
267 const struct nf_conntrack_tuple
*tuple
,
268 const union nf_inet_addr
*addr
,
270 const struct nf_conntrack_zone
*zone
)
272 struct rb_root
*root
;
276 if (family
== NFPROTO_IPV6
)
277 hash
= connlimit_iphash6(addr
);
279 hash
= connlimit_iphash(addr
->ip
);
280 root
= &data
->climit_root
[hash
];
282 spin_lock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
284 count
= count_tree(net
, root
, tuple
, addr
, family
, zone
);
286 spin_unlock_bh(&xt_connlimit_locks
[hash
% CONNLIMIT_LOCK_SLOTS
]);
292 connlimit_mt(const struct sk_buff
*skb
, struct xt_action_param
*par
)
294 struct net
*net
= xt_net(par
);
295 const struct xt_connlimit_info
*info
= par
->matchinfo
;
296 union nf_inet_addr addr
;
297 struct nf_conntrack_tuple tuple
;
298 const struct nf_conntrack_tuple
*tuple_ptr
= &tuple
;
299 const struct nf_conntrack_zone
*zone
= &nf_ct_zone_dflt
;
300 enum ip_conntrack_info ctinfo
;
301 const struct nf_conn
*ct
;
302 unsigned int connections
;
304 ct
= nf_ct_get(skb
, &ctinfo
);
306 tuple_ptr
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
307 zone
= nf_ct_zone(ct
);
308 } else if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
),
309 xt_family(par
), net
, &tuple
)) {
313 if (xt_family(par
) == NFPROTO_IPV6
) {
314 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
317 memcpy(&addr
.ip6
, (info
->flags
& XT_CONNLIMIT_DADDR
) ?
318 &iph
->daddr
: &iph
->saddr
, sizeof(addr
.ip6
));
320 for (i
= 0; i
< ARRAY_SIZE(addr
.ip6
); ++i
)
321 addr
.ip6
[i
] &= info
->mask
.ip6
[i
];
323 const struct iphdr
*iph
= ip_hdr(skb
);
324 addr
.ip
= (info
->flags
& XT_CONNLIMIT_DADDR
) ?
325 iph
->daddr
: iph
->saddr
;
327 addr
.ip
&= info
->mask
.ip
;
330 connections
= count_them(net
, info
->data
, tuple_ptr
, &addr
,
331 xt_family(par
), zone
);
332 if (connections
== 0)
333 /* kmalloc failed, drop it entirely */
336 return (connections
> info
->limit
) ^
337 !!(info
->flags
& XT_CONNLIMIT_INVERT
);
344 static int connlimit_mt_check(const struct xt_mtchk_param
*par
)
346 struct xt_connlimit_info
*info
= par
->matchinfo
;
350 net_get_random_once(&connlimit_rnd
, sizeof(connlimit_rnd
));
352 ret
= nf_ct_netns_get(par
->net
, par
->family
);
354 pr_info("cannot load conntrack support for "
355 "address family %u\n", par
->family
);
359 /* init private data */
360 info
->data
= kmalloc(sizeof(struct xt_connlimit_data
), GFP_KERNEL
);
361 if (info
->data
== NULL
) {
362 nf_ct_netns_put(par
->net
, par
->family
);
366 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root
); ++i
)
367 info
->data
->climit_root
[i
] = RB_ROOT
;
372 void nf_conncount_cache_free(struct hlist_head
*hhead
)
374 struct xt_connlimit_conn
*conn
;
375 struct hlist_node
*n
;
377 hlist_for_each_entry_safe(conn
, n
, hhead
, node
)
378 kmem_cache_free(connlimit_conn_cachep
, conn
);
380 EXPORT_SYMBOL_GPL(nf_conncount_cache_free
);
382 static void destroy_tree(struct rb_root
*r
)
384 struct xt_connlimit_rb
*rbconn
;
385 struct rb_node
*node
;
387 while ((node
= rb_first(r
)) != NULL
) {
388 rbconn
= rb_entry(node
, struct xt_connlimit_rb
, node
);
392 nf_conncount_cache_free(&rbconn
->hhead
);
394 kmem_cache_free(connlimit_rb_cachep
, rbconn
);
398 static void connlimit_mt_destroy(const struct xt_mtdtor_param
*par
)
400 const struct xt_connlimit_info
*info
= par
->matchinfo
;
403 nf_ct_netns_put(par
->net
, par
->family
);
405 for (i
= 0; i
< ARRAY_SIZE(info
->data
->climit_root
); ++i
)
406 destroy_tree(&info
->data
->climit_root
[i
]);
411 static struct xt_match connlimit_mt_reg __read_mostly
= {
414 .family
= NFPROTO_UNSPEC
,
415 .checkentry
= connlimit_mt_check
,
416 .match
= connlimit_mt
,
417 .matchsize
= sizeof(struct xt_connlimit_info
),
418 .usersize
= offsetof(struct xt_connlimit_info
, data
),
419 .destroy
= connlimit_mt_destroy
,
423 static int __init
connlimit_mt_init(void)
427 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS
> CONNLIMIT_SLOTS
);
428 BUILD_BUG_ON((CONNLIMIT_SLOTS
% CONNLIMIT_LOCK_SLOTS
) != 0);
430 for (i
= 0; i
< CONNLIMIT_LOCK_SLOTS
; ++i
)
431 spin_lock_init(&xt_connlimit_locks
[i
]);
433 connlimit_conn_cachep
= kmem_cache_create("xt_connlimit_conn",
434 sizeof(struct xt_connlimit_conn
),
436 if (!connlimit_conn_cachep
)
439 connlimit_rb_cachep
= kmem_cache_create("xt_connlimit_rb",
440 sizeof(struct xt_connlimit_rb
),
442 if (!connlimit_rb_cachep
) {
443 kmem_cache_destroy(connlimit_conn_cachep
);
446 ret
= xt_register_match(&connlimit_mt_reg
);
448 kmem_cache_destroy(connlimit_conn_cachep
);
449 kmem_cache_destroy(connlimit_rb_cachep
);
454 static void __exit
connlimit_mt_exit(void)
456 xt_unregister_match(&connlimit_mt_reg
);
457 kmem_cache_destroy(connlimit_conn_cachep
);
458 kmem_cache_destroy(connlimit_rb_cachep
);
461 module_init(connlimit_mt_init
);
462 module_exit(connlimit_mt_exit
);
463 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
464 MODULE_DESCRIPTION("Xtables: Number of connections matching");
465 MODULE_LICENSE("GPL");
466 MODULE_ALIAS("ipt_connlimit");
467 MODULE_ALIAS("ip6t_connlimit");