]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/netfilter/nft_rbtree.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-bionic-kernel.git] / net / netfilter / nft_rbtree.c
1 /*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/rbtree.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables.h>
20
21 static DEFINE_SPINLOCK(nft_rbtree_lock);
22
23 struct nft_rbtree {
24 struct rb_root root;
25 };
26
27 struct nft_rbtree_elem {
28 struct rb_node node;
29 struct nft_set_ext ext;
30 };
31
32 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
33 {
34 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
35 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
36 }
37
38 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
39 const struct nft_rbtree_elem *interval)
40 {
41 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
42 }
43
44 static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key,
45 const struct nft_set_ext **ext)
46 {
47 const struct nft_rbtree *priv = nft_set_priv(set);
48 const struct nft_rbtree_elem *rbe, *interval = NULL;
49 const struct rb_node *parent;
50 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
51 const void *this;
52 int d;
53
54 spin_lock_bh(&nft_rbtree_lock);
55 parent = priv->root.rb_node;
56 while (parent != NULL) {
57 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
58
59 this = nft_set_ext_key(&rbe->ext);
60 d = memcmp(this, key, set->klen);
61 if (d < 0) {
62 parent = parent->rb_left;
63 /* In case of adjacent ranges, we always see the high
64 * part of the range in first place, before the low one.
65 * So don't update interval if the keys are equal.
66 */
67 if (interval && nft_rbtree_equal(set, this, interval))
68 continue;
69 interval = rbe;
70 } else if (d > 0)
71 parent = parent->rb_right;
72 else {
73 found:
74 if (!nft_set_elem_active(&rbe->ext, genmask)) {
75 parent = parent->rb_left;
76 continue;
77 }
78 if (nft_rbtree_interval_end(rbe))
79 goto out;
80 spin_unlock_bh(&nft_rbtree_lock);
81
82 *ext = &rbe->ext;
83 return true;
84 }
85 }
86
87 if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
88 rbe = interval;
89 goto found;
90 }
91 out:
92 spin_unlock_bh(&nft_rbtree_lock);
93 return false;
94 }
95
96 static int __nft_rbtree_insert(const struct nft_set *set,
97 struct nft_rbtree_elem *new)
98 {
99 struct nft_rbtree *priv = nft_set_priv(set);
100 struct nft_rbtree_elem *rbe;
101 struct rb_node *parent, **p;
102 u8 genmask = nft_genmask_next(read_pnet(&set->pnet));
103 int d;
104
105 parent = NULL;
106 p = &priv->root.rb_node;
107 while (*p != NULL) {
108 parent = *p;
109 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
110 d = memcmp(nft_set_ext_key(&rbe->ext),
111 nft_set_ext_key(&new->ext),
112 set->klen);
113 if (d < 0)
114 p = &parent->rb_left;
115 else if (d > 0)
116 p = &parent->rb_right;
117 else {
118 if (nft_set_elem_active(&rbe->ext, genmask)) {
119 if (nft_rbtree_interval_end(rbe) &&
120 !nft_rbtree_interval_end(new))
121 p = &parent->rb_left;
122 else if (!nft_rbtree_interval_end(rbe) &&
123 nft_rbtree_interval_end(new))
124 p = &parent->rb_right;
125 else
126 return -EEXIST;
127 }
128 }
129 }
130 rb_link_node(&new->node, parent, p);
131 rb_insert_color(&new->node, &priv->root);
132 return 0;
133 }
134
135 static int nft_rbtree_insert(const struct nft_set *set,
136 const struct nft_set_elem *elem)
137 {
138 struct nft_rbtree_elem *rbe = elem->priv;
139 int err;
140
141 spin_lock_bh(&nft_rbtree_lock);
142 err = __nft_rbtree_insert(set, rbe);
143 spin_unlock_bh(&nft_rbtree_lock);
144
145 return err;
146 }
147
148 static void nft_rbtree_remove(const struct nft_set *set,
149 const struct nft_set_elem *elem)
150 {
151 struct nft_rbtree *priv = nft_set_priv(set);
152 struct nft_rbtree_elem *rbe = elem->priv;
153
154 spin_lock_bh(&nft_rbtree_lock);
155 rb_erase(&rbe->node, &priv->root);
156 spin_unlock_bh(&nft_rbtree_lock);
157 }
158
159 static void nft_rbtree_activate(const struct nft_set *set,
160 const struct nft_set_elem *elem)
161 {
162 struct nft_rbtree_elem *rbe = elem->priv;
163
164 nft_set_elem_change_active(set, &rbe->ext);
165 }
166
167 static void *nft_rbtree_deactivate(const struct nft_set *set,
168 const struct nft_set_elem *elem)
169 {
170 const struct nft_rbtree *priv = nft_set_priv(set);
171 const struct rb_node *parent = priv->root.rb_node;
172 struct nft_rbtree_elem *rbe, *this = elem->priv;
173 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
174 int d;
175
176 while (parent != NULL) {
177 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
178
179 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
180 set->klen);
181 if (d < 0)
182 parent = parent->rb_left;
183 else if (d > 0)
184 parent = parent->rb_right;
185 else {
186 if (!nft_set_elem_active(&rbe->ext, genmask)) {
187 parent = parent->rb_left;
188 continue;
189 }
190 if (nft_rbtree_interval_end(rbe) &&
191 !nft_rbtree_interval_end(this)) {
192 parent = parent->rb_left;
193 continue;
194 } else if (!nft_rbtree_interval_end(rbe) &&
195 nft_rbtree_interval_end(this)) {
196 parent = parent->rb_right;
197 continue;
198 }
199 nft_set_elem_change_active(set, &rbe->ext);
200 return rbe;
201 }
202 }
203 return NULL;
204 }
205
206 static void nft_rbtree_walk(const struct nft_ctx *ctx,
207 const struct nft_set *set,
208 struct nft_set_iter *iter)
209 {
210 const struct nft_rbtree *priv = nft_set_priv(set);
211 struct nft_rbtree_elem *rbe;
212 struct nft_set_elem elem;
213 struct rb_node *node;
214 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
215
216 spin_lock_bh(&nft_rbtree_lock);
217 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
218 rbe = rb_entry(node, struct nft_rbtree_elem, node);
219
220 if (iter->count < iter->skip)
221 goto cont;
222 if (!nft_set_elem_active(&rbe->ext, genmask))
223 goto cont;
224
225 elem.priv = rbe;
226
227 iter->err = iter->fn(ctx, set, iter, &elem);
228 if (iter->err < 0) {
229 spin_unlock_bh(&nft_rbtree_lock);
230 return;
231 }
232 cont:
233 iter->count++;
234 }
235 spin_unlock_bh(&nft_rbtree_lock);
236 }
237
238 static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
239 {
240 return sizeof(struct nft_rbtree);
241 }
242
243 static int nft_rbtree_init(const struct nft_set *set,
244 const struct nft_set_desc *desc,
245 const struct nlattr * const nla[])
246 {
247 struct nft_rbtree *priv = nft_set_priv(set);
248
249 priv->root = RB_ROOT;
250 return 0;
251 }
252
253 static void nft_rbtree_destroy(const struct nft_set *set)
254 {
255 struct nft_rbtree *priv = nft_set_priv(set);
256 struct nft_rbtree_elem *rbe;
257 struct rb_node *node;
258
259 while ((node = priv->root.rb_node) != NULL) {
260 rb_erase(node, &priv->root);
261 rbe = rb_entry(node, struct nft_rbtree_elem, node);
262 nft_set_elem_destroy(set, rbe);
263 }
264 }
265
266 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
267 struct nft_set_estimate *est)
268 {
269 unsigned int nsize;
270
271 nsize = sizeof(struct nft_rbtree_elem);
272 if (desc->size)
273 est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
274 else
275 est->size = nsize;
276
277 est->class = NFT_SET_CLASS_O_LOG_N;
278
279 return true;
280 }
281
282 static struct nft_set_ops nft_rbtree_ops __read_mostly = {
283 .privsize = nft_rbtree_privsize,
284 .elemsize = offsetof(struct nft_rbtree_elem, ext),
285 .estimate = nft_rbtree_estimate,
286 .init = nft_rbtree_init,
287 .destroy = nft_rbtree_destroy,
288 .insert = nft_rbtree_insert,
289 .remove = nft_rbtree_remove,
290 .deactivate = nft_rbtree_deactivate,
291 .activate = nft_rbtree_activate,
292 .lookup = nft_rbtree_lookup,
293 .walk = nft_rbtree_walk,
294 .features = NFT_SET_INTERVAL | NFT_SET_MAP,
295 .owner = THIS_MODULE,
296 };
297
298 static int __init nft_rbtree_module_init(void)
299 {
300 return nft_register_set(&nft_rbtree_ops);
301 }
302
303 static void __exit nft_rbtree_module_exit(void)
304 {
305 nft_unregister_set(&nft_rbtree_ops);
306 }
307
308 module_init(nft_rbtree_module_init);
309 module_exit(nft_rbtree_module_exit);
310
311 MODULE_LICENSE("GPL");
312 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
313 MODULE_ALIAS_NFT_SET();