2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/rbtree.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables.h>
25 struct delayed_work gc_work
;
28 struct nft_rbtree_elem
{
30 struct nft_set_ext ext
;
33 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem
*rbe
)
35 return nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) &&
36 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
);
39 static bool nft_rbtree_equal(const struct nft_set
*set
, const void *this,
40 const struct nft_rbtree_elem
*interval
)
42 return memcmp(this, nft_set_ext_key(&interval
->ext
), set
->klen
) == 0;
45 static bool __nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
46 const u32
*key
, const struct nft_set_ext
**ext
,
49 struct nft_rbtree
*priv
= nft_set_priv(set
);
50 const struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
51 u8 genmask
= nft_genmask_cur(net
);
52 const struct rb_node
*parent
;
56 parent
= rcu_dereference_raw(priv
->root
.rb_node
);
57 while (parent
!= NULL
) {
58 if (read_seqcount_retry(&priv
->count
, seq
))
61 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
63 this = nft_set_ext_key(&rbe
->ext
);
64 d
= memcmp(this, key
, set
->klen
);
66 parent
= rcu_dereference_raw(parent
->rb_left
);
68 nft_rbtree_equal(set
, this, interval
) &&
69 nft_rbtree_interval_end(rbe
) &&
70 !nft_rbtree_interval_end(interval
))
74 parent
= rcu_dereference_raw(parent
->rb_right
);
76 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
77 parent
= rcu_dereference_raw(parent
->rb_left
);
80 if (nft_rbtree_interval_end(rbe
))
88 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
89 nft_set_elem_active(&interval
->ext
, genmask
) &&
90 !nft_rbtree_interval_end(interval
)) {
91 *ext
= &interval
->ext
;
98 static bool nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
99 const u32
*key
, const struct nft_set_ext
**ext
)
101 struct nft_rbtree
*priv
= nft_set_priv(set
);
102 unsigned int seq
= read_seqcount_begin(&priv
->count
);
105 ret
= __nft_rbtree_lookup(net
, set
, key
, ext
, seq
);
106 if (ret
|| !read_seqcount_retry(&priv
->count
, seq
))
109 read_lock_bh(&priv
->lock
);
110 seq
= read_seqcount_begin(&priv
->count
);
111 ret
= __nft_rbtree_lookup(net
, set
, key
, ext
, seq
);
112 read_unlock_bh(&priv
->lock
);
117 static bool __nft_rbtree_get(const struct net
*net
, const struct nft_set
*set
,
118 const u32
*key
, struct nft_rbtree_elem
**elem
,
119 unsigned int seq
, unsigned int flags
, u8 genmask
)
121 struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
122 struct nft_rbtree
*priv
= nft_set_priv(set
);
123 const struct rb_node
*parent
;
127 parent
= rcu_dereference_raw(priv
->root
.rb_node
);
128 while (parent
!= NULL
) {
129 if (read_seqcount_retry(&priv
->count
, seq
))
132 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
134 this = nft_set_ext_key(&rbe
->ext
);
135 d
= memcmp(this, key
, set
->klen
);
137 parent
= rcu_dereference_raw(parent
->rb_left
);
140 parent
= rcu_dereference_raw(parent
->rb_right
);
142 if (!nft_set_elem_active(&rbe
->ext
, genmask
))
143 parent
= rcu_dereference_raw(parent
->rb_left
);
145 if (!nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) ||
146 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
) ==
147 (flags
& NFT_SET_ELEM_INTERVAL_END
)) {
155 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
156 nft_set_elem_active(&interval
->ext
, genmask
) &&
157 !nft_rbtree_interval_end(interval
)) {
165 static void *nft_rbtree_get(const struct net
*net
, const struct nft_set
*set
,
166 const struct nft_set_elem
*elem
, unsigned int flags
)
168 struct nft_rbtree
*priv
= nft_set_priv(set
);
169 unsigned int seq
= read_seqcount_begin(&priv
->count
);
170 struct nft_rbtree_elem
*rbe
= ERR_PTR(-ENOENT
);
171 const u32
*key
= (const u32
*)&elem
->key
.val
;
172 u8 genmask
= nft_genmask_cur(net
);
175 ret
= __nft_rbtree_get(net
, set
, key
, &rbe
, seq
, flags
, genmask
);
176 if (ret
|| !read_seqcount_retry(&priv
->count
, seq
))
179 read_lock_bh(&priv
->lock
);
180 seq
= read_seqcount_begin(&priv
->count
);
181 ret
= __nft_rbtree_get(net
, set
, key
, &rbe
, seq
, flags
, genmask
);
183 rbe
= ERR_PTR(-ENOENT
);
184 read_unlock_bh(&priv
->lock
);
189 static int __nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
190 struct nft_rbtree_elem
*new,
191 struct nft_set_ext
**ext
)
193 struct nft_rbtree
*priv
= nft_set_priv(set
);
194 u8 genmask
= nft_genmask_next(net
);
195 struct nft_rbtree_elem
*rbe
;
196 struct rb_node
*parent
, **p
;
200 p
= &priv
->root
.rb_node
;
203 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
204 d
= memcmp(nft_set_ext_key(&rbe
->ext
),
205 nft_set_ext_key(&new->ext
),
208 p
= &parent
->rb_left
;
210 p
= &parent
->rb_right
;
212 if (nft_rbtree_interval_end(rbe
) &&
213 !nft_rbtree_interval_end(new)) {
214 p
= &parent
->rb_left
;
215 } else if (!nft_rbtree_interval_end(rbe
) &&
216 nft_rbtree_interval_end(new)) {
217 p
= &parent
->rb_right
;
218 } else if (nft_set_elem_active(&rbe
->ext
, genmask
)) {
222 p
= &parent
->rb_left
;
226 rb_link_node_rcu(&new->node
, parent
, p
);
227 rb_insert_color(&new->node
, &priv
->root
);
231 static int nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
232 const struct nft_set_elem
*elem
,
233 struct nft_set_ext
**ext
)
235 struct nft_rbtree
*priv
= nft_set_priv(set
);
236 struct nft_rbtree_elem
*rbe
= elem
->priv
;
239 write_lock_bh(&priv
->lock
);
240 write_seqcount_begin(&priv
->count
);
241 err
= __nft_rbtree_insert(net
, set
, rbe
, ext
);
242 write_seqcount_end(&priv
->count
);
243 write_unlock_bh(&priv
->lock
);
248 static void nft_rbtree_remove(const struct net
*net
,
249 const struct nft_set
*set
,
250 const struct nft_set_elem
*elem
)
252 struct nft_rbtree
*priv
= nft_set_priv(set
);
253 struct nft_rbtree_elem
*rbe
= elem
->priv
;
255 write_lock_bh(&priv
->lock
);
256 write_seqcount_begin(&priv
->count
);
257 rb_erase(&rbe
->node
, &priv
->root
);
258 write_seqcount_end(&priv
->count
);
259 write_unlock_bh(&priv
->lock
);
262 static void nft_rbtree_activate(const struct net
*net
,
263 const struct nft_set
*set
,
264 const struct nft_set_elem
*elem
)
266 struct nft_rbtree_elem
*rbe
= elem
->priv
;
268 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
269 nft_set_elem_clear_busy(&rbe
->ext
);
272 static bool nft_rbtree_flush(const struct net
*net
,
273 const struct nft_set
*set
, void *priv
)
275 struct nft_rbtree_elem
*rbe
= priv
;
277 if (!nft_set_elem_mark_busy(&rbe
->ext
) ||
278 !nft_is_active(net
, &rbe
->ext
)) {
279 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
285 static void *nft_rbtree_deactivate(const struct net
*net
,
286 const struct nft_set
*set
,
287 const struct nft_set_elem
*elem
)
289 const struct nft_rbtree
*priv
= nft_set_priv(set
);
290 const struct rb_node
*parent
= priv
->root
.rb_node
;
291 struct nft_rbtree_elem
*rbe
, *this = elem
->priv
;
292 u8 genmask
= nft_genmask_next(net
);
295 while (parent
!= NULL
) {
296 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
298 d
= memcmp(nft_set_ext_key(&rbe
->ext
), &elem
->key
.val
,
301 parent
= parent
->rb_left
;
303 parent
= parent
->rb_right
;
305 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
306 parent
= parent
->rb_left
;
309 if (nft_rbtree_interval_end(rbe
) &&
310 !nft_rbtree_interval_end(this)) {
311 parent
= parent
->rb_left
;
313 } else if (!nft_rbtree_interval_end(rbe
) &&
314 nft_rbtree_interval_end(this)) {
315 parent
= parent
->rb_right
;
318 nft_rbtree_flush(net
, set
, rbe
);
325 static void nft_rbtree_walk(const struct nft_ctx
*ctx
,
327 struct nft_set_iter
*iter
)
329 struct nft_rbtree
*priv
= nft_set_priv(set
);
330 struct nft_rbtree_elem
*rbe
;
331 struct nft_set_elem elem
;
332 struct rb_node
*node
;
334 read_lock_bh(&priv
->lock
);
335 for (node
= rb_first(&priv
->root
); node
!= NULL
; node
= rb_next(node
)) {
336 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
338 if (iter
->count
< iter
->skip
)
340 if (!nft_set_elem_active(&rbe
->ext
, iter
->genmask
))
345 iter
->err
= iter
->fn(ctx
, set
, iter
, &elem
);
347 read_unlock_bh(&priv
->lock
);
353 read_unlock_bh(&priv
->lock
);
356 static void nft_rbtree_gc(struct work_struct
*work
)
358 struct nft_set_gc_batch
*gcb
= NULL
;
359 struct rb_node
*node
, *prev
= NULL
;
360 struct nft_rbtree_elem
*rbe
;
361 struct nft_rbtree
*priv
;
365 priv
= container_of(work
, struct nft_rbtree
, gc_work
.work
);
366 set
= nft_set_container_of(priv
);
368 write_lock_bh(&priv
->lock
);
369 write_seqcount_begin(&priv
->count
);
370 for (node
= rb_first(&priv
->root
); node
!= NULL
; node
= rb_next(node
)) {
371 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
373 if (nft_rbtree_interval_end(rbe
)) {
377 if (!nft_set_elem_expired(&rbe
->ext
))
379 if (nft_set_elem_mark_busy(&rbe
->ext
))
382 gcb
= nft_set_gc_batch_check(set
, gcb
, GFP_ATOMIC
);
386 atomic_dec(&set
->nelems
);
387 nft_set_gc_batch_add(gcb
, rbe
);
390 rbe
= rb_entry(prev
, struct nft_rbtree_elem
, node
);
391 atomic_dec(&set
->nelems
);
392 nft_set_gc_batch_add(gcb
, rbe
);
394 node
= rb_next(node
);
398 for (i
= 0; i
< gcb
->head
.cnt
; i
++) {
400 rb_erase(&rbe
->node
, &priv
->root
);
403 write_seqcount_end(&priv
->count
);
404 write_unlock_bh(&priv
->lock
);
406 nft_set_gc_batch_complete(gcb
);
408 queue_delayed_work(system_power_efficient_wq
, &priv
->gc_work
,
409 nft_set_gc_interval(set
));
412 static unsigned int nft_rbtree_privsize(const struct nlattr
* const nla
[],
413 const struct nft_set_desc
*desc
)
415 return sizeof(struct nft_rbtree
);
418 static int nft_rbtree_init(const struct nft_set
*set
,
419 const struct nft_set_desc
*desc
,
420 const struct nlattr
* const nla
[])
422 struct nft_rbtree
*priv
= nft_set_priv(set
);
424 rwlock_init(&priv
->lock
);
425 seqcount_init(&priv
->count
);
426 priv
->root
= RB_ROOT
;
428 INIT_DEFERRABLE_WORK(&priv
->gc_work
, nft_rbtree_gc
);
429 if (set
->flags
& NFT_SET_TIMEOUT
)
430 queue_delayed_work(system_power_efficient_wq
, &priv
->gc_work
,
431 nft_set_gc_interval(set
));
436 static void nft_rbtree_destroy(const struct nft_set
*set
)
438 struct nft_rbtree
*priv
= nft_set_priv(set
);
439 struct nft_rbtree_elem
*rbe
;
440 struct rb_node
*node
;
442 cancel_delayed_work_sync(&priv
->gc_work
);
443 while ((node
= priv
->root
.rb_node
) != NULL
) {
444 rb_erase(node
, &priv
->root
);
445 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
446 nft_set_elem_destroy(set
, rbe
, true);
450 static bool nft_rbtree_estimate(const struct nft_set_desc
*desc
, u32 features
,
451 struct nft_set_estimate
*est
)
454 est
->size
= sizeof(struct nft_rbtree
) +
455 desc
->size
* sizeof(struct nft_rbtree_elem
);
459 est
->lookup
= NFT_SET_CLASS_O_LOG_N
;
460 est
->space
= NFT_SET_CLASS_O_N
;
465 struct nft_set_type nft_set_rbtree_type __read_mostly
= {
466 .owner
= THIS_MODULE
,
467 .features
= NFT_SET_INTERVAL
| NFT_SET_MAP
| NFT_SET_OBJECT
| NFT_SET_TIMEOUT
,
469 .privsize
= nft_rbtree_privsize
,
470 .elemsize
= offsetof(struct nft_rbtree_elem
, ext
),
471 .estimate
= nft_rbtree_estimate
,
472 .init
= nft_rbtree_init
,
473 .destroy
= nft_rbtree_destroy
,
474 .insert
= nft_rbtree_insert
,
475 .remove
= nft_rbtree_remove
,
476 .deactivate
= nft_rbtree_deactivate
,
477 .flush
= nft_rbtree_flush
,
478 .activate
= nft_rbtree_activate
,
479 .lookup
= nft_rbtree_lookup
,
480 .walk
= nft_rbtree_walk
,
481 .get
= nft_rbtree_get
,