2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/rbtree.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables.h>
26 struct nft_rbtree_elem
{
28 struct nft_set_ext ext
;
31 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem
*rbe
)
33 return nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) &&
34 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
);
37 static bool nft_rbtree_equal(const struct nft_set
*set
, const void *this,
38 const struct nft_rbtree_elem
*interval
)
40 return memcmp(this, nft_set_ext_key(&interval
->ext
), set
->klen
) == 0;
43 static bool nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
44 const u32
*key
, const struct nft_set_ext
**ext
)
46 struct nft_rbtree
*priv
= nft_set_priv(set
);
47 const struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
48 u8 genmask
= nft_genmask_cur(net
);
49 const struct rb_node
*parent
;
53 read_lock_bh(&priv
->lock
);
54 parent
= priv
->root
.rb_node
;
55 while (parent
!= NULL
) {
56 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
58 this = nft_set_ext_key(&rbe
->ext
);
59 d
= memcmp(this, key
, set
->klen
);
61 parent
= parent
->rb_left
;
63 nft_rbtree_equal(set
, this, interval
) &&
64 nft_rbtree_interval_end(this) &&
65 !nft_rbtree_interval_end(interval
))
69 parent
= parent
->rb_right
;
71 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
72 parent
= parent
->rb_left
;
75 if (nft_rbtree_interval_end(rbe
))
77 read_unlock_bh(&priv
->lock
);
84 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
85 nft_set_elem_active(&interval
->ext
, genmask
) &&
86 !nft_rbtree_interval_end(interval
)) {
87 read_unlock_bh(&priv
->lock
);
88 *ext
= &interval
->ext
;
92 read_unlock_bh(&priv
->lock
);
96 static int __nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
97 struct nft_rbtree_elem
*new,
98 struct nft_set_ext
**ext
)
100 struct nft_rbtree
*priv
= nft_set_priv(set
);
101 u8 genmask
= nft_genmask_next(net
);
102 struct nft_rbtree_elem
*rbe
;
103 struct rb_node
*parent
, **p
;
107 p
= &priv
->root
.rb_node
;
110 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
111 d
= memcmp(nft_set_ext_key(&rbe
->ext
),
112 nft_set_ext_key(&new->ext
),
115 p
= &parent
->rb_left
;
117 p
= &parent
->rb_right
;
119 if (nft_set_elem_active(&rbe
->ext
, genmask
)) {
120 if (nft_rbtree_interval_end(rbe
) &&
121 !nft_rbtree_interval_end(new))
122 p
= &parent
->rb_left
;
123 else if (!nft_rbtree_interval_end(rbe
) &&
124 nft_rbtree_interval_end(new))
125 p
= &parent
->rb_right
;
133 rb_link_node(&new->node
, parent
, p
);
134 rb_insert_color(&new->node
, &priv
->root
);
138 static int nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
139 const struct nft_set_elem
*elem
,
140 struct nft_set_ext
**ext
)
142 struct nft_rbtree
*priv
= nft_set_priv(set
);
143 struct nft_rbtree_elem
*rbe
= elem
->priv
;
146 write_lock_bh(&priv
->lock
);
147 err
= __nft_rbtree_insert(net
, set
, rbe
, ext
);
148 write_unlock_bh(&priv
->lock
);
153 static void nft_rbtree_remove(const struct net
*net
,
154 const struct nft_set
*set
,
155 const struct nft_set_elem
*elem
)
157 struct nft_rbtree
*priv
= nft_set_priv(set
);
158 struct nft_rbtree_elem
*rbe
= elem
->priv
;
160 write_lock_bh(&priv
->lock
);
161 rb_erase(&rbe
->node
, &priv
->root
);
162 write_unlock_bh(&priv
->lock
);
165 static void nft_rbtree_activate(const struct net
*net
,
166 const struct nft_set
*set
,
167 const struct nft_set_elem
*elem
)
169 struct nft_rbtree_elem
*rbe
= elem
->priv
;
171 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
174 static bool nft_rbtree_flush(const struct net
*net
,
175 const struct nft_set
*set
, void *priv
)
177 struct nft_rbtree_elem
*rbe
= priv
;
179 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
183 static void *nft_rbtree_deactivate(const struct net
*net
,
184 const struct nft_set
*set
,
185 const struct nft_set_elem
*elem
)
187 const struct nft_rbtree
*priv
= nft_set_priv(set
);
188 const struct rb_node
*parent
= priv
->root
.rb_node
;
189 struct nft_rbtree_elem
*rbe
, *this = elem
->priv
;
190 u8 genmask
= nft_genmask_next(net
);
193 while (parent
!= NULL
) {
194 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
196 d
= memcmp(nft_set_ext_key(&rbe
->ext
), &elem
->key
.val
,
199 parent
= parent
->rb_left
;
201 parent
= parent
->rb_right
;
203 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
204 parent
= parent
->rb_left
;
207 if (nft_rbtree_interval_end(rbe
) &&
208 !nft_rbtree_interval_end(this)) {
209 parent
= parent
->rb_left
;
211 } else if (!nft_rbtree_interval_end(rbe
) &&
212 nft_rbtree_interval_end(this)) {
213 parent
= parent
->rb_right
;
216 nft_rbtree_flush(net
, set
, rbe
);
223 static void nft_rbtree_walk(const struct nft_ctx
*ctx
,
225 struct nft_set_iter
*iter
)
227 struct nft_rbtree
*priv
= nft_set_priv(set
);
228 struct nft_rbtree_elem
*rbe
;
229 struct nft_set_elem elem
;
230 struct rb_node
*node
;
232 read_lock_bh(&priv
->lock
);
233 for (node
= rb_first(&priv
->root
); node
!= NULL
; node
= rb_next(node
)) {
234 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
236 if (iter
->count
< iter
->skip
)
238 if (!nft_set_elem_active(&rbe
->ext
, iter
->genmask
))
243 iter
->err
= iter
->fn(ctx
, set
, iter
, &elem
);
245 read_unlock_bh(&priv
->lock
);
251 read_unlock_bh(&priv
->lock
);
254 static unsigned int nft_rbtree_privsize(const struct nlattr
* const nla
[])
256 return sizeof(struct nft_rbtree
);
259 static int nft_rbtree_init(const struct nft_set
*set
,
260 const struct nft_set_desc
*desc
,
261 const struct nlattr
* const nla
[])
263 struct nft_rbtree
*priv
= nft_set_priv(set
);
265 rwlock_init(&priv
->lock
);
266 priv
->root
= RB_ROOT
;
270 static void nft_rbtree_destroy(const struct nft_set
*set
)
272 struct nft_rbtree
*priv
= nft_set_priv(set
);
273 struct nft_rbtree_elem
*rbe
;
274 struct rb_node
*node
;
276 while ((node
= priv
->root
.rb_node
) != NULL
) {
277 rb_erase(node
, &priv
->root
);
278 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
279 nft_set_elem_destroy(set
, rbe
, true);
283 static bool nft_rbtree_estimate(const struct nft_set_desc
*desc
, u32 features
,
284 struct nft_set_estimate
*est
)
288 nsize
= sizeof(struct nft_rbtree_elem
);
290 est
->size
= sizeof(struct nft_rbtree
) + desc
->size
* nsize
;
294 est
->lookup
= NFT_SET_CLASS_O_LOG_N
;
295 est
->space
= NFT_SET_CLASS_O_N
;
300 static struct nft_set_ops nft_rbtree_ops __read_mostly
= {
301 .privsize
= nft_rbtree_privsize
,
302 .elemsize
= offsetof(struct nft_rbtree_elem
, ext
),
303 .estimate
= nft_rbtree_estimate
,
304 .init
= nft_rbtree_init
,
305 .destroy
= nft_rbtree_destroy
,
306 .insert
= nft_rbtree_insert
,
307 .remove
= nft_rbtree_remove
,
308 .deactivate
= nft_rbtree_deactivate
,
309 .flush
= nft_rbtree_flush
,
310 .activate
= nft_rbtree_activate
,
311 .lookup
= nft_rbtree_lookup
,
312 .walk
= nft_rbtree_walk
,
313 .features
= NFT_SET_INTERVAL
| NFT_SET_MAP
| NFT_SET_OBJECT
,
314 .owner
= THIS_MODULE
,
317 static int __init
nft_rbtree_module_init(void)
319 return nft_register_set(&nft_rbtree_ops
);
322 static void __exit
nft_rbtree_module_exit(void)
324 nft_unregister_set(&nft_rbtree_ops
);
327 module_init(nft_rbtree_module_init
);
328 module_exit(nft_rbtree_module_exit
);
330 MODULE_LICENSE("GPL");
331 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
332 MODULE_ALIAS_NFT_SET();