2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/rbtree.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables.h>
27 struct nft_rbtree_elem
{
29 struct nft_set_ext ext
;
32 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem
*rbe
)
34 return nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) &&
35 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
);
38 static bool nft_rbtree_equal(const struct nft_set
*set
, const void *this,
39 const struct nft_rbtree_elem
*interval
)
41 return memcmp(this, nft_set_ext_key(&interval
->ext
), set
->klen
) == 0;
44 static bool __nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
45 const u32
*key
, const struct nft_set_ext
**ext
,
48 struct nft_rbtree
*priv
= nft_set_priv(set
);
49 const struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
50 u8 genmask
= nft_genmask_cur(net
);
51 const struct rb_node
*parent
;
55 parent
= rcu_dereference_raw(priv
->root
.rb_node
);
56 while (parent
!= NULL
) {
57 if (read_seqcount_retry(&priv
->count
, seq
))
60 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
62 this = nft_set_ext_key(&rbe
->ext
);
63 d
= memcmp(this, key
, set
->klen
);
65 parent
= rcu_dereference_raw(parent
->rb_left
);
67 nft_rbtree_equal(set
, this, interval
) &&
68 nft_rbtree_interval_end(this) &&
69 !nft_rbtree_interval_end(interval
))
73 parent
= rcu_dereference_raw(parent
->rb_right
);
75 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
76 parent
= rcu_dereference_raw(parent
->rb_left
);
79 if (nft_rbtree_interval_end(rbe
))
87 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
88 nft_set_elem_active(&interval
->ext
, genmask
) &&
89 !nft_rbtree_interval_end(interval
)) {
90 *ext
= &interval
->ext
;
97 static bool nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
98 const u32
*key
, const struct nft_set_ext
**ext
)
100 struct nft_rbtree
*priv
= nft_set_priv(set
);
101 unsigned int seq
= read_seqcount_begin(&priv
->count
);
104 ret
= __nft_rbtree_lookup(net
, set
, key
, ext
, seq
);
105 if (ret
|| !read_seqcount_retry(&priv
->count
, seq
))
108 read_lock_bh(&priv
->lock
);
109 seq
= read_seqcount_begin(&priv
->count
);
110 ret
= __nft_rbtree_lookup(net
, set
, key
, ext
, seq
);
111 read_unlock_bh(&priv
->lock
);
116 static bool __nft_rbtree_get(const struct net
*net
, const struct nft_set
*set
,
117 const u32
*key
, struct nft_rbtree_elem
**elem
,
118 unsigned int seq
, unsigned int flags
, u8 genmask
)
120 struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
121 struct nft_rbtree
*priv
= nft_set_priv(set
);
122 const struct rb_node
*parent
;
126 parent
= rcu_dereference_raw(priv
->root
.rb_node
);
127 while (parent
!= NULL
) {
128 if (read_seqcount_retry(&priv
->count
, seq
))
131 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
133 this = nft_set_ext_key(&rbe
->ext
);
134 d
= memcmp(this, key
, set
->klen
);
136 parent
= rcu_dereference_raw(parent
->rb_left
);
139 parent
= rcu_dereference_raw(parent
->rb_right
);
141 if (!nft_set_elem_active(&rbe
->ext
, genmask
))
142 parent
= rcu_dereference_raw(parent
->rb_left
);
144 if (!nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) ||
145 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
) ==
146 (flags
& NFT_SET_ELEM_INTERVAL_END
)) {
154 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
155 nft_set_elem_active(&interval
->ext
, genmask
) &&
156 !nft_rbtree_interval_end(interval
)) {
164 static void *nft_rbtree_get(const struct net
*net
, const struct nft_set
*set
,
165 const struct nft_set_elem
*elem
, unsigned int flags
)
167 struct nft_rbtree
*priv
= nft_set_priv(set
);
168 unsigned int seq
= read_seqcount_begin(&priv
->count
);
169 struct nft_rbtree_elem
*rbe
= ERR_PTR(-ENOENT
);
170 const u32
*key
= (const u32
*)&elem
->key
.val
;
171 u8 genmask
= nft_genmask_cur(net
);
174 ret
= __nft_rbtree_get(net
, set
, key
, &rbe
, seq
, flags
, genmask
);
175 if (ret
|| !read_seqcount_retry(&priv
->count
, seq
))
178 read_lock_bh(&priv
->lock
);
179 seq
= read_seqcount_begin(&priv
->count
);
180 ret
= __nft_rbtree_get(net
, set
, key
, &rbe
, seq
, flags
, genmask
);
182 rbe
= ERR_PTR(-ENOENT
);
183 read_unlock_bh(&priv
->lock
);
188 static int __nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
189 struct nft_rbtree_elem
*new,
190 struct nft_set_ext
**ext
)
192 struct nft_rbtree
*priv
= nft_set_priv(set
);
193 u8 genmask
= nft_genmask_next(net
);
194 struct nft_rbtree_elem
*rbe
;
195 struct rb_node
*parent
, **p
;
199 p
= &priv
->root
.rb_node
;
202 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
203 d
= memcmp(nft_set_ext_key(&rbe
->ext
),
204 nft_set_ext_key(&new->ext
),
207 p
= &parent
->rb_left
;
209 p
= &parent
->rb_right
;
211 if (nft_rbtree_interval_end(rbe
) &&
212 !nft_rbtree_interval_end(new)) {
213 p
= &parent
->rb_left
;
214 } else if (!nft_rbtree_interval_end(rbe
) &&
215 nft_rbtree_interval_end(new)) {
216 p
= &parent
->rb_right
;
217 } else if (nft_set_elem_active(&rbe
->ext
, genmask
)) {
221 p
= &parent
->rb_left
;
225 rb_link_node_rcu(&new->node
, parent
, p
);
226 rb_insert_color(&new->node
, &priv
->root
);
230 static int nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
231 const struct nft_set_elem
*elem
,
232 struct nft_set_ext
**ext
)
234 struct nft_rbtree
*priv
= nft_set_priv(set
);
235 struct nft_rbtree_elem
*rbe
= elem
->priv
;
238 write_lock_bh(&priv
->lock
);
239 write_seqcount_begin(&priv
->count
);
240 err
= __nft_rbtree_insert(net
, set
, rbe
, ext
);
241 write_seqcount_end(&priv
->count
);
242 write_unlock_bh(&priv
->lock
);
247 static void nft_rbtree_remove(const struct net
*net
,
248 const struct nft_set
*set
,
249 const struct nft_set_elem
*elem
)
251 struct nft_rbtree
*priv
= nft_set_priv(set
);
252 struct nft_rbtree_elem
*rbe
= elem
->priv
;
254 write_lock_bh(&priv
->lock
);
255 write_seqcount_begin(&priv
->count
);
256 rb_erase(&rbe
->node
, &priv
->root
);
257 write_seqcount_end(&priv
->count
);
258 write_unlock_bh(&priv
->lock
);
261 static void nft_rbtree_activate(const struct net
*net
,
262 const struct nft_set
*set
,
263 const struct nft_set_elem
*elem
)
265 struct nft_rbtree_elem
*rbe
= elem
->priv
;
267 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
270 static bool nft_rbtree_flush(const struct net
*net
,
271 const struct nft_set
*set
, void *priv
)
273 struct nft_rbtree_elem
*rbe
= priv
;
275 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
279 static void *nft_rbtree_deactivate(const struct net
*net
,
280 const struct nft_set
*set
,
281 const struct nft_set_elem
*elem
)
283 const struct nft_rbtree
*priv
= nft_set_priv(set
);
284 const struct rb_node
*parent
= priv
->root
.rb_node
;
285 struct nft_rbtree_elem
*rbe
, *this = elem
->priv
;
286 u8 genmask
= nft_genmask_next(net
);
289 while (parent
!= NULL
) {
290 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
292 d
= memcmp(nft_set_ext_key(&rbe
->ext
), &elem
->key
.val
,
295 parent
= parent
->rb_left
;
297 parent
= parent
->rb_right
;
299 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
300 parent
= parent
->rb_left
;
303 if (nft_rbtree_interval_end(rbe
) &&
304 !nft_rbtree_interval_end(this)) {
305 parent
= parent
->rb_left
;
307 } else if (!nft_rbtree_interval_end(rbe
) &&
308 nft_rbtree_interval_end(this)) {
309 parent
= parent
->rb_right
;
312 nft_rbtree_flush(net
, set
, rbe
);
319 static void nft_rbtree_walk(const struct nft_ctx
*ctx
,
321 struct nft_set_iter
*iter
)
323 struct nft_rbtree
*priv
= nft_set_priv(set
);
324 struct nft_rbtree_elem
*rbe
;
325 struct nft_set_elem elem
;
326 struct rb_node
*node
;
328 read_lock_bh(&priv
->lock
);
329 for (node
= rb_first(&priv
->root
); node
!= NULL
; node
= rb_next(node
)) {
330 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
332 if (iter
->count
< iter
->skip
)
334 if (!nft_set_elem_active(&rbe
->ext
, iter
->genmask
))
339 iter
->err
= iter
->fn(ctx
, set
, iter
, &elem
);
341 read_unlock_bh(&priv
->lock
);
347 read_unlock_bh(&priv
->lock
);
350 static unsigned int nft_rbtree_privsize(const struct nlattr
* const nla
[],
351 const struct nft_set_desc
*desc
)
353 return sizeof(struct nft_rbtree
);
356 static int nft_rbtree_init(const struct nft_set
*set
,
357 const struct nft_set_desc
*desc
,
358 const struct nlattr
* const nla
[])
360 struct nft_rbtree
*priv
= nft_set_priv(set
);
362 rwlock_init(&priv
->lock
);
363 seqcount_init(&priv
->count
);
364 priv
->root
= RB_ROOT
;
368 static void nft_rbtree_destroy(const struct nft_set
*set
)
370 struct nft_rbtree
*priv
= nft_set_priv(set
);
371 struct nft_rbtree_elem
*rbe
;
372 struct rb_node
*node
;
374 while ((node
= priv
->root
.rb_node
) != NULL
) {
375 rb_erase(node
, &priv
->root
);
376 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
377 nft_set_elem_destroy(set
, rbe
, true);
381 static bool nft_rbtree_estimate(const struct nft_set_desc
*desc
, u32 features
,
382 struct nft_set_estimate
*est
)
385 est
->size
= sizeof(struct nft_rbtree
) +
386 desc
->size
* sizeof(struct nft_rbtree_elem
);
390 est
->lookup
= NFT_SET_CLASS_O_LOG_N
;
391 est
->space
= NFT_SET_CLASS_O_N
;
396 static struct nft_set_type nft_rbtree_type
;
397 static struct nft_set_ops nft_rbtree_ops __read_mostly
= {
398 .type
= &nft_rbtree_type
,
399 .privsize
= nft_rbtree_privsize
,
400 .elemsize
= offsetof(struct nft_rbtree_elem
, ext
),
401 .estimate
= nft_rbtree_estimate
,
402 .init
= nft_rbtree_init
,
403 .destroy
= nft_rbtree_destroy
,
404 .insert
= nft_rbtree_insert
,
405 .remove
= nft_rbtree_remove
,
406 .deactivate
= nft_rbtree_deactivate
,
407 .flush
= nft_rbtree_flush
,
408 .activate
= nft_rbtree_activate
,
409 .lookup
= nft_rbtree_lookup
,
410 .walk
= nft_rbtree_walk
,
411 .get
= nft_rbtree_get
,
412 .features
= NFT_SET_INTERVAL
| NFT_SET_MAP
| NFT_SET_OBJECT
,
415 static struct nft_set_type nft_rbtree_type __read_mostly
= {
416 .ops
= &nft_rbtree_ops
,
417 .owner
= THIS_MODULE
,
420 static int __init
nft_rbtree_module_init(void)
422 return nft_register_set(&nft_rbtree_type
);
425 static void __exit
nft_rbtree_module_exit(void)
427 nft_unregister_set(&nft_rbtree_type
);
430 module_init(nft_rbtree_module_init
);
431 module_exit(nft_rbtree_module_exit
);
433 MODULE_LICENSE("GPL");
434 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
435 MODULE_ALIAS_NFT_SET();