]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/netfilter/nft_set_rbtree.c
Merge tag 'for-4.18-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-hirsute-kernel.git] / net / netfilter / nft_set_rbtree.c
1 /*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/rbtree.h>
16 #include <linux/netlink.h>
17 #include <linux/netfilter.h>
18 #include <linux/netfilter/nf_tables.h>
19 #include <net/netfilter/nf_tables.h>
20
21 struct nft_rbtree {
22 struct rb_root root;
23 rwlock_t lock;
24 seqcount_t count;
25 struct delayed_work gc_work;
26 };
27
28 struct nft_rbtree_elem {
29 struct rb_node node;
30 struct nft_set_ext ext;
31 };
32
33 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
34 {
35 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
36 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
37 }
38
39 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
40 const struct nft_rbtree_elem *interval)
41 {
42 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
43 }
44
45 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
46 const u32 *key, const struct nft_set_ext **ext,
47 unsigned int seq)
48 {
49 struct nft_rbtree *priv = nft_set_priv(set);
50 const struct nft_rbtree_elem *rbe, *interval = NULL;
51 u8 genmask = nft_genmask_cur(net);
52 const struct rb_node *parent;
53 const void *this;
54 int d;
55
56 parent = rcu_dereference_raw(priv->root.rb_node);
57 while (parent != NULL) {
58 if (read_seqcount_retry(&priv->count, seq))
59 return false;
60
61 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
62
63 this = nft_set_ext_key(&rbe->ext);
64 d = memcmp(this, key, set->klen);
65 if (d < 0) {
66 parent = rcu_dereference_raw(parent->rb_left);
67 if (interval &&
68 nft_rbtree_equal(set, this, interval) &&
69 nft_rbtree_interval_end(rbe) &&
70 !nft_rbtree_interval_end(interval))
71 continue;
72 interval = rbe;
73 } else if (d > 0)
74 parent = rcu_dereference_raw(parent->rb_right);
75 else {
76 if (!nft_set_elem_active(&rbe->ext, genmask)) {
77 parent = rcu_dereference_raw(parent->rb_left);
78 continue;
79 }
80 if (nft_rbtree_interval_end(rbe))
81 goto out;
82
83 *ext = &rbe->ext;
84 return true;
85 }
86 }
87
88 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
89 nft_set_elem_active(&interval->ext, genmask) &&
90 !nft_rbtree_interval_end(interval)) {
91 *ext = &interval->ext;
92 return true;
93 }
94 out:
95 return false;
96 }
97
98 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
99 const u32 *key, const struct nft_set_ext **ext)
100 {
101 struct nft_rbtree *priv = nft_set_priv(set);
102 unsigned int seq = read_seqcount_begin(&priv->count);
103 bool ret;
104
105 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
106 if (ret || !read_seqcount_retry(&priv->count, seq))
107 return ret;
108
109 read_lock_bh(&priv->lock);
110 seq = read_seqcount_begin(&priv->count);
111 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
112 read_unlock_bh(&priv->lock);
113
114 return ret;
115 }
116
117 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
118 const u32 *key, struct nft_rbtree_elem **elem,
119 unsigned int seq, unsigned int flags, u8 genmask)
120 {
121 struct nft_rbtree_elem *rbe, *interval = NULL;
122 struct nft_rbtree *priv = nft_set_priv(set);
123 const struct rb_node *parent;
124 const void *this;
125 int d;
126
127 parent = rcu_dereference_raw(priv->root.rb_node);
128 while (parent != NULL) {
129 if (read_seqcount_retry(&priv->count, seq))
130 return false;
131
132 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
133
134 this = nft_set_ext_key(&rbe->ext);
135 d = memcmp(this, key, set->klen);
136 if (d < 0) {
137 parent = rcu_dereference_raw(parent->rb_left);
138 interval = rbe;
139 } else if (d > 0) {
140 parent = rcu_dereference_raw(parent->rb_right);
141 } else {
142 if (!nft_set_elem_active(&rbe->ext, genmask))
143 parent = rcu_dereference_raw(parent->rb_left);
144
145 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
146 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
147 (flags & NFT_SET_ELEM_INTERVAL_END)) {
148 *elem = rbe;
149 return true;
150 }
151 return false;
152 }
153 }
154
155 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
156 nft_set_elem_active(&interval->ext, genmask) &&
157 !nft_rbtree_interval_end(interval)) {
158 *elem = interval;
159 return true;
160 }
161
162 return false;
163 }
164
165 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
166 const struct nft_set_elem *elem, unsigned int flags)
167 {
168 struct nft_rbtree *priv = nft_set_priv(set);
169 unsigned int seq = read_seqcount_begin(&priv->count);
170 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
171 const u32 *key = (const u32 *)&elem->key.val;
172 u8 genmask = nft_genmask_cur(net);
173 bool ret;
174
175 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
176 if (ret || !read_seqcount_retry(&priv->count, seq))
177 return rbe;
178
179 read_lock_bh(&priv->lock);
180 seq = read_seqcount_begin(&priv->count);
181 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
182 if (!ret)
183 rbe = ERR_PTR(-ENOENT);
184 read_unlock_bh(&priv->lock);
185
186 return rbe;
187 }
188
189 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
190 struct nft_rbtree_elem *new,
191 struct nft_set_ext **ext)
192 {
193 struct nft_rbtree *priv = nft_set_priv(set);
194 u8 genmask = nft_genmask_next(net);
195 struct nft_rbtree_elem *rbe;
196 struct rb_node *parent, **p;
197 int d;
198
199 parent = NULL;
200 p = &priv->root.rb_node;
201 while (*p != NULL) {
202 parent = *p;
203 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
204 d = memcmp(nft_set_ext_key(&rbe->ext),
205 nft_set_ext_key(&new->ext),
206 set->klen);
207 if (d < 0)
208 p = &parent->rb_left;
209 else if (d > 0)
210 p = &parent->rb_right;
211 else {
212 if (nft_rbtree_interval_end(rbe) &&
213 !nft_rbtree_interval_end(new)) {
214 p = &parent->rb_left;
215 } else if (!nft_rbtree_interval_end(rbe) &&
216 nft_rbtree_interval_end(new)) {
217 p = &parent->rb_right;
218 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
219 *ext = &rbe->ext;
220 return -EEXIST;
221 } else {
222 p = &parent->rb_left;
223 }
224 }
225 }
226 rb_link_node_rcu(&new->node, parent, p);
227 rb_insert_color(&new->node, &priv->root);
228 return 0;
229 }
230
231 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
232 const struct nft_set_elem *elem,
233 struct nft_set_ext **ext)
234 {
235 struct nft_rbtree *priv = nft_set_priv(set);
236 struct nft_rbtree_elem *rbe = elem->priv;
237 int err;
238
239 write_lock_bh(&priv->lock);
240 write_seqcount_begin(&priv->count);
241 err = __nft_rbtree_insert(net, set, rbe, ext);
242 write_seqcount_end(&priv->count);
243 write_unlock_bh(&priv->lock);
244
245 return err;
246 }
247
248 static void nft_rbtree_remove(const struct net *net,
249 const struct nft_set *set,
250 const struct nft_set_elem *elem)
251 {
252 struct nft_rbtree *priv = nft_set_priv(set);
253 struct nft_rbtree_elem *rbe = elem->priv;
254
255 write_lock_bh(&priv->lock);
256 write_seqcount_begin(&priv->count);
257 rb_erase(&rbe->node, &priv->root);
258 write_seqcount_end(&priv->count);
259 write_unlock_bh(&priv->lock);
260 }
261
262 static void nft_rbtree_activate(const struct net *net,
263 const struct nft_set *set,
264 const struct nft_set_elem *elem)
265 {
266 struct nft_rbtree_elem *rbe = elem->priv;
267
268 nft_set_elem_change_active(net, set, &rbe->ext);
269 nft_set_elem_clear_busy(&rbe->ext);
270 }
271
272 static bool nft_rbtree_flush(const struct net *net,
273 const struct nft_set *set, void *priv)
274 {
275 struct nft_rbtree_elem *rbe = priv;
276
277 if (!nft_set_elem_mark_busy(&rbe->ext) ||
278 !nft_is_active(net, &rbe->ext)) {
279 nft_set_elem_change_active(net, set, &rbe->ext);
280 return true;
281 }
282 return false;
283 }
284
285 static void *nft_rbtree_deactivate(const struct net *net,
286 const struct nft_set *set,
287 const struct nft_set_elem *elem)
288 {
289 const struct nft_rbtree *priv = nft_set_priv(set);
290 const struct rb_node *parent = priv->root.rb_node;
291 struct nft_rbtree_elem *rbe, *this = elem->priv;
292 u8 genmask = nft_genmask_next(net);
293 int d;
294
295 while (parent != NULL) {
296 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
297
298 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
299 set->klen);
300 if (d < 0)
301 parent = parent->rb_left;
302 else if (d > 0)
303 parent = parent->rb_right;
304 else {
305 if (!nft_set_elem_active(&rbe->ext, genmask)) {
306 parent = parent->rb_left;
307 continue;
308 }
309 if (nft_rbtree_interval_end(rbe) &&
310 !nft_rbtree_interval_end(this)) {
311 parent = parent->rb_left;
312 continue;
313 } else if (!nft_rbtree_interval_end(rbe) &&
314 nft_rbtree_interval_end(this)) {
315 parent = parent->rb_right;
316 continue;
317 }
318 nft_rbtree_flush(net, set, rbe);
319 return rbe;
320 }
321 }
322 return NULL;
323 }
324
325 static void nft_rbtree_walk(const struct nft_ctx *ctx,
326 struct nft_set *set,
327 struct nft_set_iter *iter)
328 {
329 struct nft_rbtree *priv = nft_set_priv(set);
330 struct nft_rbtree_elem *rbe;
331 struct nft_set_elem elem;
332 struct rb_node *node;
333
334 read_lock_bh(&priv->lock);
335 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
336 rbe = rb_entry(node, struct nft_rbtree_elem, node);
337
338 if (iter->count < iter->skip)
339 goto cont;
340 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
341 goto cont;
342
343 elem.priv = rbe;
344
345 iter->err = iter->fn(ctx, set, iter, &elem);
346 if (iter->err < 0) {
347 read_unlock_bh(&priv->lock);
348 return;
349 }
350 cont:
351 iter->count++;
352 }
353 read_unlock_bh(&priv->lock);
354 }
355
356 static void nft_rbtree_gc(struct work_struct *work)
357 {
358 struct nft_set_gc_batch *gcb = NULL;
359 struct rb_node *node, *prev = NULL;
360 struct nft_rbtree_elem *rbe;
361 struct nft_rbtree *priv;
362 struct nft_set *set;
363 int i;
364
365 priv = container_of(work, struct nft_rbtree, gc_work.work);
366 set = nft_set_container_of(priv);
367
368 write_lock_bh(&priv->lock);
369 write_seqcount_begin(&priv->count);
370 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
371 rbe = rb_entry(node, struct nft_rbtree_elem, node);
372
373 if (nft_rbtree_interval_end(rbe)) {
374 prev = node;
375 continue;
376 }
377 if (!nft_set_elem_expired(&rbe->ext))
378 continue;
379 if (nft_set_elem_mark_busy(&rbe->ext))
380 continue;
381
382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
383 if (!gcb)
384 goto out;
385
386 atomic_dec(&set->nelems);
387 nft_set_gc_batch_add(gcb, rbe);
388
389 if (prev) {
390 rbe = rb_entry(prev, struct nft_rbtree_elem, node);
391 atomic_dec(&set->nelems);
392 nft_set_gc_batch_add(gcb, rbe);
393 }
394 node = rb_next(node);
395 }
396 out:
397 if (gcb) {
398 for (i = 0; i < gcb->head.cnt; i++) {
399 rbe = gcb->elems[i];
400 rb_erase(&rbe->node, &priv->root);
401 }
402 }
403 write_seqcount_end(&priv->count);
404 write_unlock_bh(&priv->lock);
405
406 nft_set_gc_batch_complete(gcb);
407
408 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
409 nft_set_gc_interval(set));
410 }
411
412 static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[],
413 const struct nft_set_desc *desc)
414 {
415 return sizeof(struct nft_rbtree);
416 }
417
418 static int nft_rbtree_init(const struct nft_set *set,
419 const struct nft_set_desc *desc,
420 const struct nlattr * const nla[])
421 {
422 struct nft_rbtree *priv = nft_set_priv(set);
423
424 rwlock_init(&priv->lock);
425 seqcount_init(&priv->count);
426 priv->root = RB_ROOT;
427
428 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
429 if (set->flags & NFT_SET_TIMEOUT)
430 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
431 nft_set_gc_interval(set));
432
433 return 0;
434 }
435
436 static void nft_rbtree_destroy(const struct nft_set *set)
437 {
438 struct nft_rbtree *priv = nft_set_priv(set);
439 struct nft_rbtree_elem *rbe;
440 struct rb_node *node;
441
442 cancel_delayed_work_sync(&priv->gc_work);
443 while ((node = priv->root.rb_node) != NULL) {
444 rb_erase(node, &priv->root);
445 rbe = rb_entry(node, struct nft_rbtree_elem, node);
446 nft_set_elem_destroy(set, rbe, true);
447 }
448 }
449
450 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
451 struct nft_set_estimate *est)
452 {
453 if (desc->size)
454 est->size = sizeof(struct nft_rbtree) +
455 desc->size * sizeof(struct nft_rbtree_elem);
456 else
457 est->size = ~0;
458
459 est->lookup = NFT_SET_CLASS_O_LOG_N;
460 est->space = NFT_SET_CLASS_O_N;
461
462 return true;
463 }
464
465 static struct nft_set_type nft_rbtree_type __read_mostly = {
466 .owner = THIS_MODULE,
467 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
468 .ops = {
469 .privsize = nft_rbtree_privsize,
470 .elemsize = offsetof(struct nft_rbtree_elem, ext),
471 .estimate = nft_rbtree_estimate,
472 .init = nft_rbtree_init,
473 .destroy = nft_rbtree_destroy,
474 .insert = nft_rbtree_insert,
475 .remove = nft_rbtree_remove,
476 .deactivate = nft_rbtree_deactivate,
477 .flush = nft_rbtree_flush,
478 .activate = nft_rbtree_activate,
479 .lookup = nft_rbtree_lookup,
480 .walk = nft_rbtree_walk,
481 .get = nft_rbtree_get,
482 },
483 };
484
485 static int __init nft_rbtree_module_init(void)
486 {
487 return nft_register_set(&nft_rbtree_type);
488 }
489
490 static void __exit nft_rbtree_module_exit(void)
491 {
492 nft_unregister_set(&nft_rbtree_type);
493 }
494
495 module_init(nft_rbtree_module_init);
496 module_exit(nft_rbtree_module_exit);
497
498 MODULE_LICENSE("GPL");
499 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
500 MODULE_ALIAS_NFT_SET();