]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/xt_connlimit.c
netfilter: nf_conncount: expose connection list interface
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / xt_connlimit.c
1 /*
2 * netfilter module to limit the number of parallel tcp
3 * connections per IP address.
4 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
5 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
6 * only ignore TIME_WAIT or gone connections
7 * (C) CC Computer Consultants GmbH, 2007
8 *
9 * based on ...
10 *
11 * Kernel module to match connection tracking information.
12 * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au).
13 */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/rbtree.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/skbuff.h>
26 #include <linux/spinlock.h>
27 #include <linux/netfilter/nf_conntrack_tcp.h>
28 #include <linux/netfilter/x_tables.h>
29 #include <linux/netfilter/xt_connlimit.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34
35 #define CONNLIMIT_SLOTS 256U
36
37 #ifdef CONFIG_LOCKDEP
38 #define CONNLIMIT_LOCK_SLOTS 8U
39 #else
40 #define CONNLIMIT_LOCK_SLOTS 256U
41 #endif
42
43 #define CONNLIMIT_GC_MAX_NODES 8
44
45 /* we will save the tuples of all connections we care about */
46 struct xt_connlimit_conn {
47 struct hlist_node node;
48 struct nf_conntrack_tuple tuple;
49 };
50
51 struct xt_connlimit_rb {
52 struct rb_node node;
53 struct hlist_head hhead; /* connections/hosts in same subnet */
54 union nf_inet_addr addr; /* search key */
55 };
56
57 static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp;
58
59 struct xt_connlimit_data {
60 struct rb_root climit_root[CONNLIMIT_SLOTS];
61 };
62
63 static u_int32_t connlimit_rnd __read_mostly;
64 static struct kmem_cache *connlimit_rb_cachep __read_mostly;
65 static struct kmem_cache *connlimit_conn_cachep __read_mostly;
66
67 static inline unsigned int connlimit_iphash(__be32 addr)
68 {
69 return jhash_1word((__force __u32)addr,
70 connlimit_rnd) % CONNLIMIT_SLOTS;
71 }
72
73 static inline unsigned int
74 connlimit_iphash6(const union nf_inet_addr *addr)
75 {
76 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6),
77 connlimit_rnd) % CONNLIMIT_SLOTS;
78 }
79
80 static inline bool already_closed(const struct nf_conn *conn)
81 {
82 if (nf_ct_protonum(conn) == IPPROTO_TCP)
83 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
84 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
85 else
86 return 0;
87 }
88
89 static int
90 same_source(const union nf_inet_addr *addr,
91 const union nf_inet_addr *u3, u_int8_t family)
92 {
93 if (family == NFPROTO_IPV4)
94 return ntohl(addr->ip) - ntohl(u3->ip);
95
96 return memcmp(addr->ip6, u3->ip6, sizeof(addr->ip6));
97 }
98
99 bool nf_conncount_add(struct hlist_head *head,
100 const struct nf_conntrack_tuple *tuple)
101 {
102 struct xt_connlimit_conn *conn;
103
104 conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
105 if (conn == NULL)
106 return false;
107 conn->tuple = *tuple;
108 hlist_add_head(&conn->node, head);
109 return true;
110 }
111 EXPORT_SYMBOL_GPL(nf_conncount_add);
112
113 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
114 const struct nf_conntrack_tuple *tuple,
115 const struct nf_conntrack_zone *zone,
116 bool *addit)
117 {
118 const struct nf_conntrack_tuple_hash *found;
119 struct xt_connlimit_conn *conn;
120 struct hlist_node *n;
121 struct nf_conn *found_ct;
122 unsigned int length = 0;
123
124 *addit = true;
125
126 /* check the saved connections */
127 hlist_for_each_entry_safe(conn, n, head, node) {
128 found = nf_conntrack_find_get(net, zone, &conn->tuple);
129 if (found == NULL) {
130 hlist_del(&conn->node);
131 kmem_cache_free(connlimit_conn_cachep, conn);
132 continue;
133 }
134
135 found_ct = nf_ct_tuplehash_to_ctrack(found);
136
137 if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
138 /*
139 * Just to be sure we have it only once in the list.
140 * We should not see tuples twice unless someone hooks
141 * this into a table without "-p tcp --syn".
142 */
143 *addit = false;
144 } else if (already_closed(found_ct)) {
145 /*
146 * we do not care about connections which are
147 * closed already -> ditch it
148 */
149 nf_ct_put(found_ct);
150 hlist_del(&conn->node);
151 kmem_cache_free(connlimit_conn_cachep, conn);
152 continue;
153 }
154
155 nf_ct_put(found_ct);
156 length++;
157 }
158
159 return length;
160 }
161 EXPORT_SYMBOL_GPL(nf_conncount_lookup);
162
163 static void tree_nodes_free(struct rb_root *root,
164 struct xt_connlimit_rb *gc_nodes[],
165 unsigned int gc_count)
166 {
167 struct xt_connlimit_rb *rbconn;
168
169 while (gc_count) {
170 rbconn = gc_nodes[--gc_count];
171 rb_erase(&rbconn->node, root);
172 kmem_cache_free(connlimit_rb_cachep, rbconn);
173 }
174 }
175
176 static unsigned int
177 count_tree(struct net *net, struct rb_root *root,
178 const struct nf_conntrack_tuple *tuple,
179 const union nf_inet_addr *addr,
180 u8 family, const struct nf_conntrack_zone *zone)
181 {
182 struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
183 struct rb_node **rbnode, *parent;
184 struct xt_connlimit_rb *rbconn;
185 struct xt_connlimit_conn *conn;
186 unsigned int gc_count;
187 bool no_gc = false;
188
189 restart:
190 gc_count = 0;
191 parent = NULL;
192 rbnode = &(root->rb_node);
193 while (*rbnode) {
194 int diff;
195 bool addit;
196
197 rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
198
199 parent = *rbnode;
200 diff = same_source(addr, &rbconn->addr, family);
201 if (diff < 0) {
202 rbnode = &((*rbnode)->rb_left);
203 } else if (diff > 0) {
204 rbnode = &((*rbnode)->rb_right);
205 } else {
206 /* same source network -> be counted! */
207 unsigned int count;
208
209 count = nf_conncount_lookup(net, &rbconn->hhead, tuple,
210 zone, &addit);
211
212 tree_nodes_free(root, gc_nodes, gc_count);
213 if (!addit)
214 return count;
215
216 if (!nf_conncount_add(&rbconn->hhead, tuple))
217 return 0; /* hotdrop */
218
219 return count + 1;
220 }
221
222 if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
223 continue;
224
225 /* only used for GC on hhead, retval and 'addit' ignored */
226 nf_conncount_lookup(net, &rbconn->hhead, tuple, zone, &addit);
227 if (hlist_empty(&rbconn->hhead))
228 gc_nodes[gc_count++] = rbconn;
229 }
230
231 if (gc_count) {
232 no_gc = true;
233 tree_nodes_free(root, gc_nodes, gc_count);
234 /* tree_node_free before new allocation permits
235 * allocator to re-use newly free'd object.
236 *
237 * This is a rare event; in most cases we will find
238 * existing node to re-use. (or gc_count is 0).
239 */
240 goto restart;
241 }
242
243 /* no match, need to insert new node */
244 rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC);
245 if (rbconn == NULL)
246 return 0;
247
248 conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
249 if (conn == NULL) {
250 kmem_cache_free(connlimit_rb_cachep, rbconn);
251 return 0;
252 }
253
254 conn->tuple = *tuple;
255 rbconn->addr = *addr;
256
257 INIT_HLIST_HEAD(&rbconn->hhead);
258 hlist_add_head(&conn->node, &rbconn->hhead);
259
260 rb_link_node(&rbconn->node, parent, rbnode);
261 rb_insert_color(&rbconn->node, root);
262 return 1;
263 }
264
265 static int count_them(struct net *net,
266 struct xt_connlimit_data *data,
267 const struct nf_conntrack_tuple *tuple,
268 const union nf_inet_addr *addr,
269 u_int8_t family,
270 const struct nf_conntrack_zone *zone)
271 {
272 struct rb_root *root;
273 int count;
274 u32 hash;
275
276 if (family == NFPROTO_IPV6)
277 hash = connlimit_iphash6(addr);
278 else
279 hash = connlimit_iphash(addr->ip);
280 root = &data->climit_root[hash];
281
282 spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
283
284 count = count_tree(net, root, tuple, addr, family, zone);
285
286 spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
287
288 return count;
289 }
290
291 static bool
292 connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
293 {
294 struct net *net = xt_net(par);
295 const struct xt_connlimit_info *info = par->matchinfo;
296 union nf_inet_addr addr;
297 struct nf_conntrack_tuple tuple;
298 const struct nf_conntrack_tuple *tuple_ptr = &tuple;
299 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
300 enum ip_conntrack_info ctinfo;
301 const struct nf_conn *ct;
302 unsigned int connections;
303
304 ct = nf_ct_get(skb, &ctinfo);
305 if (ct != NULL) {
306 tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
307 zone = nf_ct_zone(ct);
308 } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
309 xt_family(par), net, &tuple)) {
310 goto hotdrop;
311 }
312
313 if (xt_family(par) == NFPROTO_IPV6) {
314 const struct ipv6hdr *iph = ipv6_hdr(skb);
315 unsigned int i;
316
317 memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
318 &iph->daddr : &iph->saddr, sizeof(addr.ip6));
319
320 for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i)
321 addr.ip6[i] &= info->mask.ip6[i];
322 } else {
323 const struct iphdr *iph = ip_hdr(skb);
324 addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
325 iph->daddr : iph->saddr;
326
327 addr.ip &= info->mask.ip;
328 }
329
330 connections = count_them(net, info->data, tuple_ptr, &addr,
331 xt_family(par), zone);
332 if (connections == 0)
333 /* kmalloc failed, drop it entirely */
334 goto hotdrop;
335
336 return (connections > info->limit) ^
337 !!(info->flags & XT_CONNLIMIT_INVERT);
338
339 hotdrop:
340 par->hotdrop = true;
341 return false;
342 }
343
344 static int connlimit_mt_check(const struct xt_mtchk_param *par)
345 {
346 struct xt_connlimit_info *info = par->matchinfo;
347 unsigned int i;
348 int ret;
349
350 net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd));
351
352 ret = nf_ct_netns_get(par->net, par->family);
353 if (ret < 0) {
354 pr_info("cannot load conntrack support for "
355 "address family %u\n", par->family);
356 return ret;
357 }
358
359 /* init private data */
360 info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
361 if (info->data == NULL) {
362 nf_ct_netns_put(par->net, par->family);
363 return -ENOMEM;
364 }
365
366 for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
367 info->data->climit_root[i] = RB_ROOT;
368
369 return 0;
370 }
371
372 void nf_conncount_cache_free(struct hlist_head *hhead)
373 {
374 struct xt_connlimit_conn *conn;
375 struct hlist_node *n;
376
377 hlist_for_each_entry_safe(conn, n, hhead, node)
378 kmem_cache_free(connlimit_conn_cachep, conn);
379 }
380 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
381
382 static void destroy_tree(struct rb_root *r)
383 {
384 struct xt_connlimit_rb *rbconn;
385 struct rb_node *node;
386
387 while ((node = rb_first(r)) != NULL) {
388 rbconn = rb_entry(node, struct xt_connlimit_rb, node);
389
390 rb_erase(node, r);
391
392 nf_conncount_cache_free(&rbconn->hhead);
393
394 kmem_cache_free(connlimit_rb_cachep, rbconn);
395 }
396 }
397
398 static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
399 {
400 const struct xt_connlimit_info *info = par->matchinfo;
401 unsigned int i;
402
403 nf_ct_netns_put(par->net, par->family);
404
405 for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
406 destroy_tree(&info->data->climit_root[i]);
407
408 kfree(info->data);
409 }
410
411 static struct xt_match connlimit_mt_reg __read_mostly = {
412 .name = "connlimit",
413 .revision = 1,
414 .family = NFPROTO_UNSPEC,
415 .checkentry = connlimit_mt_check,
416 .match = connlimit_mt,
417 .matchsize = sizeof(struct xt_connlimit_info),
418 .usersize = offsetof(struct xt_connlimit_info, data),
419 .destroy = connlimit_mt_destroy,
420 .me = THIS_MODULE,
421 };
422
423 static int __init connlimit_mt_init(void)
424 {
425 int ret, i;
426
427 BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
428 BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
429
430 for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i)
431 spin_lock_init(&xt_connlimit_locks[i]);
432
433 connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
434 sizeof(struct xt_connlimit_conn),
435 0, 0, NULL);
436 if (!connlimit_conn_cachep)
437 return -ENOMEM;
438
439 connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb",
440 sizeof(struct xt_connlimit_rb),
441 0, 0, NULL);
442 if (!connlimit_rb_cachep) {
443 kmem_cache_destroy(connlimit_conn_cachep);
444 return -ENOMEM;
445 }
446 ret = xt_register_match(&connlimit_mt_reg);
447 if (ret != 0) {
448 kmem_cache_destroy(connlimit_conn_cachep);
449 kmem_cache_destroy(connlimit_rb_cachep);
450 }
451 return ret;
452 }
453
454 static void __exit connlimit_mt_exit(void)
455 {
456 xt_unregister_match(&connlimit_mt_reg);
457 kmem_cache_destroy(connlimit_conn_cachep);
458 kmem_cache_destroy(connlimit_rb_cachep);
459 }
460
461 module_init(connlimit_mt_init);
462 module_exit(connlimit_mt_exit);
463 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
464 MODULE_DESCRIPTION("Xtables: Number of connections matching");
465 MODULE_LICENSE("GPL");
466 MODULE_ALIAS("ipt_connlimit");
467 MODULE_ALIAS("ip6t_connlimit");