2 * Backported from upstream commit 5c789e131cbb ("netfilter:
3 * nf_conncount: Add list lock and gc worker, and RCU for init tree search")
5 * count the number of connections matching an arbitrary key.
7 * (C) 2017 Red Hat GmbH
8 * Author: Florian Westphal <fw@strlen.de>
10 * split from xt_connlimit.c:
11 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
12 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
13 * only ignore TIME_WAIT or gone connections
14 * (C) CC Computer Consultants GmbH, 2007
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/in6.h>
20 #include <linux/ipv6.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/list.h>
24 #include <linux/rbtree.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/netfilter/nf_conntrack_tcp.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_conntrack.h>
32 #include <net/netfilter/nf_conntrack_count.h>
33 #include <net/netfilter/nf_conntrack_core.h>
34 #include <net/netfilter/nf_conntrack_tuple.h>
35 #include <net/netfilter/nf_conntrack_zones.h>
37 #define CONNCOUNT_SLOTS 256U
40 #define CONNCOUNT_LOCK_SLOTS 8U
42 #define CONNCOUNT_LOCK_SLOTS 256U
45 #define CONNCOUNT_GC_MAX_NODES 8
48 /* we will save the tuples of all connections we care about */
49 struct nf_conncount_tuple
{
50 struct list_head node
;
51 struct nf_conntrack_tuple tuple
;
52 struct nf_conntrack_zone zone
;
55 struct rcu_head rcu_head
;
58 struct nf_conncount_rb
{
60 struct nf_conncount_list list
;
62 struct rcu_head rcu_head
;
65 static spinlock_t nf_conncount_locks
[CONNCOUNT_LOCK_SLOTS
] __cacheline_aligned_in_smp
;
67 struct nf_conncount_data
{
69 struct rb_root root
[CONNCOUNT_SLOTS
];
71 struct work_struct gc_work
;
72 unsigned long pending_trees
[BITS_TO_LONGS(CONNCOUNT_SLOTS
)];
76 static u_int32_t conncount_rnd __read_mostly
;
77 static struct kmem_cache
*conncount_rb_cachep __read_mostly
;
78 static struct kmem_cache
*conncount_conn_cachep __read_mostly
;
80 static inline bool already_closed(const struct nf_conn
*conn
)
82 if (nf_ct_protonum(conn
) == IPPROTO_TCP
)
83 return conn
->proto
.tcp
.state
== TCP_CONNTRACK_TIME_WAIT
||
84 conn
->proto
.tcp
.state
== TCP_CONNTRACK_CLOSE
;
89 static int key_diff(const u32
*a
, const u32
*b
, unsigned int klen
)
91 return memcmp(a
, b
, klen
* sizeof(u32
));
94 static enum nf_conncount_list_add
95 nf_conncount_add(struct nf_conncount_list
*list
,
96 const struct nf_conntrack_tuple
*tuple
,
97 const struct nf_conntrack_zone
*zone
)
99 struct nf_conncount_tuple
*conn
;
101 if (WARN_ON_ONCE(list
->count
> INT_MAX
))
102 return NF_CONNCOUNT_ERR
;
104 conn
= kmem_cache_alloc(conncount_conn_cachep
, GFP_ATOMIC
);
106 return NF_CONNCOUNT_ERR
;
108 conn
->tuple
= *tuple
;
110 conn
->cpu
= raw_smp_processor_id();
111 conn
->jiffies32
= (u32
)jiffies
;
112 spin_lock(&list
->list_lock
);
113 if (list
->dead
== true) {
114 kmem_cache_free(conncount_conn_cachep
, conn
);
115 spin_unlock(&list
->list_lock
);
116 return NF_CONNCOUNT_SKIP
;
118 list_add_tail(&conn
->node
, &list
->head
);
120 spin_unlock(&list
->list_lock
);
121 return NF_CONNCOUNT_ADDED
;
124 static void __conn_free(struct rcu_head
*h
)
126 struct nf_conncount_tuple
*conn
;
128 conn
= container_of(h
, struct nf_conncount_tuple
, rcu_head
);
129 kmem_cache_free(conncount_conn_cachep
, conn
);
132 static bool conn_free(struct nf_conncount_list
*list
,
133 struct nf_conncount_tuple
*conn
)
135 bool free_entry
= false;
137 spin_lock(&list
->list_lock
);
139 if (list
->count
== 0) {
140 spin_unlock(&list
->list_lock
);
145 list_del_rcu(&conn
->node
);
146 if (list
->count
== 0)
149 spin_unlock(&list
->list_lock
);
150 call_rcu(&conn
->rcu_head
, __conn_free
);
154 static const struct nf_conntrack_tuple_hash
*
155 find_or_evict(struct net
*net
, struct nf_conncount_list
*list
,
156 struct nf_conncount_tuple
*conn
, bool *free_entry
)
158 const struct nf_conntrack_tuple_hash
*found
;
160 int cpu
= raw_smp_processor_id();
163 found
= nf_conntrack_find_get(net
, &conn
->zone
, &conn
->tuple
);
169 /* conn might have been added just before by another cpu and
170 * might still be unconfirmed. In this case, nf_conntrack_find()
171 * returns no result. Thus only evict if this cpu added the
172 * stale entry or if the entry is older than two jiffies.
175 if (conn
->cpu
== cpu
|| age
>= 2) {
176 *free_entry
= conn_free(list
, conn
);
177 return ERR_PTR(-ENOENT
);
180 return ERR_PTR(-EAGAIN
);
183 static void nf_conncount_lookup(struct net
*net
,
184 struct nf_conncount_list
*list
,
185 const struct nf_conntrack_tuple
*tuple
,
186 const struct nf_conntrack_zone
*zone
,
189 const struct nf_conntrack_tuple_hash
*found
;
190 struct nf_conncount_tuple
*conn
, *conn_n
;
191 struct nf_conn
*found_ct
;
192 unsigned int collect
= 0;
193 bool free_entry
= false;
195 /* best effort only */
196 *addit
= tuple
? true : false;
198 /* check the saved connections */
199 list_for_each_entry_safe(conn
, conn_n
, &list
->head
, node
) {
200 if (collect
> CONNCOUNT_GC_MAX_NODES
)
203 found
= find_or_evict(net
, list
, conn
, &free_entry
);
205 /* Not found, but might be about to be confirmed */
206 if (PTR_ERR(found
) == -EAGAIN
) {
210 if (nf_ct_tuple_equal(&conn
->tuple
, tuple
) &&
211 nf_ct_zone_id(&conn
->zone
, conn
->zone
.dir
) ==
212 nf_ct_zone_id(zone
, zone
->dir
))
214 } else if (PTR_ERR(found
) == -ENOENT
)
219 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
221 if (tuple
&& nf_ct_tuple_equal(&conn
->tuple
, tuple
) &&
222 nf_ct_zone_equal(found_ct
, zone
, zone
->dir
)) {
224 * We should not see tuples twice unless someone hooks
225 * this into a table without "-p tcp --syn".
227 * Attempt to avoid a re-add in this case.
230 } else if (already_closed(found_ct
)) {
232 * we do not care about connections which are
233 * closed already -> ditch it
236 conn_free(list
, conn
);
245 static void nf_conncount_list_init(struct nf_conncount_list
*list
)
247 spin_lock_init(&list
->list_lock
);
248 INIT_LIST_HEAD(&list
->head
);
253 /* Return true if the list is empty */
254 static bool nf_conncount_gc_list(struct net
*net
,
255 struct nf_conncount_list
*list
)
257 const struct nf_conntrack_tuple_hash
*found
;
258 struct nf_conncount_tuple
*conn
, *conn_n
;
259 struct nf_conn
*found_ct
;
260 unsigned int collected
= 0;
261 bool free_entry
= false;
263 list_for_each_entry_safe(conn
, conn_n
, &list
->head
, node
) {
264 found
= find_or_evict(net
, list
, conn
, &free_entry
);
266 if (PTR_ERR(found
) == -ENOENT
) {
274 found_ct
= nf_ct_tuplehash_to_ctrack(found
);
275 if (already_closed(found_ct
)) {
277 * we do not care about connections which are
278 * closed already -> ditch it
281 if (conn_free(list
, conn
))
288 if (collected
> CONNCOUNT_GC_MAX_NODES
)
294 static void __tree_nodes_free(struct rcu_head
*h
)
296 struct nf_conncount_rb
*rbconn
;
298 rbconn
= container_of(h
, struct nf_conncount_rb
, rcu_head
);
299 kmem_cache_free(conncount_rb_cachep
, rbconn
);
302 static void tree_nodes_free(struct rb_root
*root
,
303 struct nf_conncount_rb
*gc_nodes
[],
304 unsigned int gc_count
)
306 struct nf_conncount_rb
*rbconn
;
309 rbconn
= gc_nodes
[--gc_count
];
310 spin_lock(&rbconn
->list
.list_lock
);
311 if (rbconn
->list
.count
== 0 && rbconn
->list
.dead
== false) {
312 rbconn
->list
.dead
= true;
313 rb_erase(&rbconn
->node
, root
);
314 call_rcu(&rbconn
->rcu_head
, __tree_nodes_free
);
316 spin_unlock(&rbconn
->list
.list_lock
);
320 static void schedule_gc_worker(struct nf_conncount_data
*data
, int tree
)
322 set_bit(tree
, data
->pending_trees
);
323 schedule_work(&data
->gc_work
);
327 insert_tree(struct net
*net
,
328 struct nf_conncount_data
*data
,
329 struct rb_root
*root
,
333 const struct nf_conntrack_tuple
*tuple
,
334 const struct nf_conntrack_zone
*zone
)
336 enum nf_conncount_list_add ret
;
337 struct nf_conncount_rb
*gc_nodes
[CONNCOUNT_GC_MAX_NODES
];
338 struct rb_node
**rbnode
, *parent
;
339 struct nf_conncount_rb
*rbconn
;
340 struct nf_conncount_tuple
*conn
;
341 unsigned int count
= 0, gc_count
= 0;
342 bool node_found
= false;
344 spin_lock_bh(&nf_conncount_locks
[hash
% CONNCOUNT_LOCK_SLOTS
]);
347 rbnode
= &(root
->rb_node
);
350 rbconn
= rb_entry(*rbnode
, struct nf_conncount_rb
, node
);
353 diff
= key_diff(key
, rbconn
->key
, keylen
);
355 rbnode
= &((*rbnode
)->rb_left
);
356 } else if (diff
> 0) {
357 rbnode
= &((*rbnode
)->rb_right
);
359 /* unlikely: other cpu added node already */
361 ret
= nf_conncount_add(&rbconn
->list
, tuple
, zone
);
362 if (ret
== NF_CONNCOUNT_ERR
) {
363 count
= 0; /* hotdrop */
364 } else if (ret
== NF_CONNCOUNT_ADDED
) {
365 count
= rbconn
->list
.count
;
367 /* NF_CONNCOUNT_SKIP, rbconn is already
368 * reclaimed by gc, insert a new tree node
375 if (gc_count
>= ARRAY_SIZE(gc_nodes
))
378 if (nf_conncount_gc_list(net
, &rbconn
->list
))
379 gc_nodes
[gc_count
++] = rbconn
;
383 tree_nodes_free(root
, gc_nodes
, gc_count
);
384 /* tree_node_free before new allocation permits
385 * allocator to re-use newly free'd object.
387 * This is a rare event; in most cases we will find
388 * existing node to re-use. (or gc_count is 0).
391 if (gc_count
>= ARRAY_SIZE(gc_nodes
))
392 schedule_gc_worker(data
, hash
);
398 /* expected case: match, insert new node */
399 rbconn
= kmem_cache_alloc(conncount_rb_cachep
, GFP_ATOMIC
);
403 conn
= kmem_cache_alloc(conncount_conn_cachep
, GFP_ATOMIC
);
405 kmem_cache_free(conncount_rb_cachep
, rbconn
);
409 conn
->tuple
= *tuple
;
411 memcpy(rbconn
->key
, key
, sizeof(u32
) * keylen
);
413 nf_conncount_list_init(&rbconn
->list
);
414 list_add(&conn
->node
, &rbconn
->list
.head
);
417 rb_link_node(&rbconn
->node
, parent
, rbnode
);
418 rb_insert_color(&rbconn
->node
, root
);
420 spin_unlock_bh(&nf_conncount_locks
[hash
% CONNCOUNT_LOCK_SLOTS
]);
425 count_tree(struct net
*net
,
426 struct nf_conncount_data
*data
,
428 const struct nf_conntrack_tuple
*tuple
,
429 const struct nf_conntrack_zone
*zone
)
431 enum nf_conncount_list_add ret
;
432 struct rb_root
*root
;
433 struct rb_node
*parent
;
434 struct nf_conncount_rb
*rbconn
;
436 u8 keylen
= data
->keylen
;
438 hash
= jhash2(key
, data
->keylen
, conncount_rnd
) % CONNCOUNT_SLOTS
;
439 root
= &data
->root
[hash
];
441 parent
= rcu_dereference_raw(root
->rb_node
);
446 rbconn
= rb_entry(parent
, struct nf_conncount_rb
, node
);
448 diff
= key_diff(key
, rbconn
->key
, keylen
);
450 parent
= rcu_dereference_raw(parent
->rb_left
);
451 } else if (diff
> 0) {
452 parent
= rcu_dereference_raw(parent
->rb_right
);
454 /* same source network -> be counted! */
455 nf_conncount_lookup(net
, &rbconn
->list
, tuple
, zone
,
459 return rbconn
->list
.count
;
461 ret
= nf_conncount_add(&rbconn
->list
, tuple
, zone
);
462 if (ret
== NF_CONNCOUNT_ERR
) {
463 return 0; /* hotdrop */
464 } else if (ret
== NF_CONNCOUNT_ADDED
) {
465 return rbconn
->list
.count
;
467 /* NF_CONNCOUNT_SKIP, rbconn is already
468 * reclaimed by gc, insert a new tree node
478 return insert_tree(net
, data
, root
, hash
, key
, keylen
, tuple
, zone
);
481 static void tree_gc_worker(struct work_struct
*work
)
483 struct nf_conncount_data
*data
= container_of(work
, struct nf_conncount_data
, gc_work
);
484 struct nf_conncount_rb
*gc_nodes
[CONNCOUNT_GC_MAX_NODES
], *rbconn
;
485 struct rb_root
*root
;
486 struct rb_node
*node
;
487 unsigned int tree
, next_tree
, gc_count
= 0;
489 tree
= data
->gc_tree
% CONNCOUNT_LOCK_SLOTS
;
490 root
= &data
->root
[tree
];
493 for (node
= rb_first(root
); node
!= NULL
; node
= rb_next(node
)) {
494 rbconn
= rb_entry(node
, struct nf_conncount_rb
, node
);
495 if (nf_conncount_gc_list(data
->net
, &rbconn
->list
))
496 gc_nodes
[gc_count
++] = rbconn
;
500 spin_lock_bh(&nf_conncount_locks
[tree
]);
503 tree_nodes_free(root
, gc_nodes
, gc_count
);
506 clear_bit(tree
, data
->pending_trees
);
508 next_tree
= (tree
+ 1) % CONNCOUNT_SLOTS
;
509 next_tree
= find_next_bit(data
->pending_trees
, next_tree
, CONNCOUNT_SLOTS
);
511 if (next_tree
< CONNCOUNT_SLOTS
) {
512 data
->gc_tree
= next_tree
;
516 spin_unlock_bh(&nf_conncount_locks
[tree
]);
519 /* Count and return number of conntrack entries in 'net' with particular 'key'.
520 * If 'tuple' is not null, insert it into the accounting data structure.
521 * Call with RCU read lock.
523 unsigned int rpl_nf_conncount_count(struct net
*net
,
524 struct nf_conncount_data
*data
,
526 const struct nf_conntrack_tuple
*tuple
,
527 const struct nf_conntrack_zone
*zone
)
529 return count_tree(net
, data
, key
, tuple
, zone
);
531 EXPORT_SYMBOL_GPL(rpl_nf_conncount_count
);
533 struct nf_conncount_data
*rpl_nf_conncount_init(struct net
*net
, unsigned int family
,
536 struct nf_conncount_data
*data
;
539 if (keylen
% sizeof(u32
) ||
540 keylen
/ sizeof(u32
) > MAX_KEYLEN
||
542 return ERR_PTR(-EINVAL
);
544 net_get_random_once(&conncount_rnd
, sizeof(conncount_rnd
));
546 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
548 return ERR_PTR(-ENOMEM
);
550 ret
= nf_ct_netns_get(net
, family
);
556 for (i
= 0; i
< ARRAY_SIZE(data
->root
); ++i
)
557 data
->root
[i
] = RB_ROOT
;
559 data
->keylen
= keylen
/ sizeof(u32
);
561 INIT_WORK(&data
->gc_work
, tree_gc_worker
);
565 EXPORT_SYMBOL_GPL(rpl_nf_conncount_init
);
567 static void nf_conncount_cache_free(struct nf_conncount_list
*list
)
569 struct nf_conncount_tuple
*conn
, *conn_n
;
571 list_for_each_entry_safe(conn
, conn_n
, &list
->head
, node
)
572 kmem_cache_free(conncount_conn_cachep
, conn
);
575 static void destroy_tree(struct rb_root
*r
)
577 struct nf_conncount_rb
*rbconn
;
578 struct rb_node
*node
;
580 while ((node
= rb_first(r
)) != NULL
) {
581 rbconn
= rb_entry(node
, struct nf_conncount_rb
, node
);
585 nf_conncount_cache_free(&rbconn
->list
);
587 kmem_cache_free(conncount_rb_cachep
, rbconn
);
591 void rpl_nf_conncount_destroy(struct net
*net
, unsigned int family
,
592 struct nf_conncount_data
*data
)
596 cancel_work_sync(&data
->gc_work
);
597 nf_ct_netns_put(net
, family
);
599 for (i
= 0; i
< ARRAY_SIZE(data
->root
); ++i
)
600 destroy_tree(&data
->root
[i
]);
604 EXPORT_SYMBOL_GPL(rpl_nf_conncount_destroy
);
606 int rpl_nf_conncount_modinit(void)
610 BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS
> CONNCOUNT_SLOTS
);
611 BUILD_BUG_ON((CONNCOUNT_SLOTS
% CONNCOUNT_LOCK_SLOTS
) != 0);
613 for (i
= 0; i
< CONNCOUNT_LOCK_SLOTS
; ++i
)
614 spin_lock_init(&nf_conncount_locks
[i
]);
616 conncount_conn_cachep
= kmem_cache_create("nf_conncount_tuple",
617 sizeof(struct nf_conncount_tuple
),
619 if (!conncount_conn_cachep
)
622 conncount_rb_cachep
= kmem_cache_create("nf_conncount_rb",
623 sizeof(struct nf_conncount_rb
),
625 if (!conncount_rb_cachep
) {
626 kmem_cache_destroy(conncount_conn_cachep
);
633 void rpl_nf_conncount_modexit(void)
635 kmem_cache_destroy(conncount_conn_cachep
);
636 kmem_cache_destroy(conncount_rb_cachep
);