]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/netfilter/nft_set_rbtree.c
netfilter: nftables: add catch-all set element support
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nft_set_rbtree.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
20a69341
PM
2/*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
20a69341
PM
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/rbtree.h>
13#include <linux/netlink.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter/nf_tables.h>
5785cf15 16#include <net/netfilter/nf_tables_core.h>
20a69341
PM
17
18struct nft_rbtree {
19 struct rb_root root;
9b7e26ae 20 rwlock_t lock;
b901892b 21 seqcount_rwlock_t count;
8d8540c4 22 struct delayed_work gc_work;
20a69341
PM
23};
24
25struct nft_rbtree_elem {
26 struct rb_node node;
fe2811eb 27 struct nft_set_ext ext;
20a69341
PM
28};
29
ef1d20e0
PNA
30static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31{
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34}
cc02e457 35
6f7c9caf
SB
36static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37{
38 return !nft_rbtree_interval_end(rbe);
39}
40
e701001e
PNA
41static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42 const struct nft_rbtree_elem *interval)
43{
44 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
45}
46
9b7e26ae
FW
47static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48 const u32 *key, const struct nft_set_ext **ext,
49 unsigned int seq)
20a69341 50{
03e5fd0e 51 struct nft_rbtree *priv = nft_set_priv(set);
20a69341 52 const struct nft_rbtree_elem *rbe, *interval = NULL;
42a55769 53 u8 genmask = nft_genmask_cur(net);
16c45eda 54 const struct rb_node *parent;
e701001e 55 const void *this;
20a69341
PM
56 int d;
57
9b7e26ae 58 parent = rcu_dereference_raw(priv->root.rb_node);
20a69341 59 while (parent != NULL) {
9b7e26ae
FW
60 if (read_seqcount_retry(&priv->count, seq))
61 return false;
62
20a69341
PM
63 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
64
e701001e
PNA
65 this = nft_set_ext_key(&rbe->ext);
66 d = memcmp(this, key, set->klen);
20a69341 67 if (d < 0) {
9b7e26ae 68 parent = rcu_dereference_raw(parent->rb_left);
f9121355
PNA
69 if (interval &&
70 nft_rbtree_equal(set, this, interval) &&
82e20b44 71 nft_rbtree_interval_end(rbe) &&
6f7c9caf 72 nft_rbtree_interval_start(interval))
e701001e 73 continue;
20a69341
PM
74 interval = rbe;
75 } else if (d > 0)
9b7e26ae 76 parent = rcu_dereference_raw(parent->rb_right);
20a69341 77 else {
cc02e457 78 if (!nft_set_elem_active(&rbe->ext, genmask)) {
9b7e26ae 79 parent = rcu_dereference_raw(parent->rb_left);
cc02e457
PM
80 continue;
81 }
340eaff6
PS
82
83 if (nft_set_elem_expired(&rbe->ext))
84 return false;
85
db3b665d
PNA
86 if (nft_rbtree_interval_end(rbe)) {
87 if (nft_set_is_anonymous(set))
88 return false;
89 parent = rcu_dereference_raw(parent->rb_left);
90 interval = NULL;
91 continue;
92 }
b2832dd6
PM
93
94 *ext = &rbe->ext;
20a69341
PM
95 return true;
96 }
97 }
98
c1eda3c6
PNA
99 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 nft_set_elem_active(&interval->ext, genmask) &&
340eaff6 101 !nft_set_elem_expired(&interval->ext) &&
6f7c9caf 102 nft_rbtree_interval_start(interval)) {
c1eda3c6
PNA
103 *ext = &interval->ext;
104 return true;
20a69341 105 }
db3b665d 106
20a69341
PM
107 return false;
108}
109
9b7e26ae
FW
110static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
111 const u32 *key, const struct nft_set_ext **ext)
112{
113 struct nft_rbtree *priv = nft_set_priv(set);
114 unsigned int seq = read_seqcount_begin(&priv->count);
115 bool ret;
116
117 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
118 if (ret || !read_seqcount_retry(&priv->count, seq))
119 return ret;
120
121 read_lock_bh(&priv->lock);
122 seq = read_seqcount_begin(&priv->count);
123 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
124 read_unlock_bh(&priv->lock);
125
126 return ret;
127}
128
ba0e4d99
PNA
129static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
130 const u32 *key, struct nft_rbtree_elem **elem,
131 unsigned int seq, unsigned int flags, u8 genmask)
132{
133 struct nft_rbtree_elem *rbe, *interval = NULL;
134 struct nft_rbtree *priv = nft_set_priv(set);
135 const struct rb_node *parent;
136 const void *this;
137 int d;
138
139 parent = rcu_dereference_raw(priv->root.rb_node);
140 while (parent != NULL) {
141 if (read_seqcount_retry(&priv->count, seq))
142 return false;
143
144 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
145
146 this = nft_set_ext_key(&rbe->ext);
147 d = memcmp(this, key, set->klen);
148 if (d < 0) {
149 parent = rcu_dereference_raw(parent->rb_left);
3b18d5eb
PNA
150 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
151 interval = rbe;
ba0e4d99
PNA
152 } else if (d > 0) {
153 parent = rcu_dereference_raw(parent->rb_right);
3b18d5eb
PNA
154 if (flags & NFT_SET_ELEM_INTERVAL_END)
155 interval = rbe;
ba0e4d99 156 } else {
db3b665d 157 if (!nft_set_elem_active(&rbe->ext, genmask)) {
ba0e4d99 158 parent = rcu_dereference_raw(parent->rb_left);
db3b665d
PNA
159 continue;
160 }
ba0e4d99 161
340eaff6
PS
162 if (nft_set_elem_expired(&rbe->ext))
163 return false;
164
ba0e4d99
PNA
165 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
166 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
167 (flags & NFT_SET_ELEM_INTERVAL_END)) {
168 *elem = rbe;
169 return true;
170 }
db3b665d
PNA
171
172 if (nft_rbtree_interval_end(rbe))
173 interval = NULL;
174
175 parent = rcu_dereference_raw(parent->rb_left);
ba0e4d99
PNA
176 }
177 }
178
179 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
180 nft_set_elem_active(&interval->ext, genmask) &&
340eaff6 181 !nft_set_elem_expired(&interval->ext) &&
3b18d5eb
PNA
182 ((!nft_rbtree_interval_end(interval) &&
183 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
184 (nft_rbtree_interval_end(interval) &&
185 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
ba0e4d99
PNA
186 *elem = interval;
187 return true;
188 }
189
190 return false;
191}
192
193static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
194 const struct nft_set_elem *elem, unsigned int flags)
195{
196 struct nft_rbtree *priv = nft_set_priv(set);
197 unsigned int seq = read_seqcount_begin(&priv->count);
198 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
199 const u32 *key = (const u32 *)&elem->key.val;
200 u8 genmask = nft_genmask_cur(net);
201 bool ret;
202
203 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
204 if (ret || !read_seqcount_retry(&priv->count, seq))
205 return rbe;
206
207 read_lock_bh(&priv->lock);
208 seq = read_seqcount_begin(&priv->count);
209 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
210 if (!ret)
211 rbe = ERR_PTR(-ENOENT);
212 read_unlock_bh(&priv->lock);
213
214 return rbe;
215}
216
42a55769 217static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
c016c7e4
PNA
218 struct nft_rbtree_elem *new,
219 struct nft_set_ext **ext)
20a69341 220{
07267630 221 bool overlap = false, dup_end_left = false, dup_end_right = false;
20a69341 222 struct nft_rbtree *priv = nft_set_priv(set);
42a55769 223 u8 genmask = nft_genmask_next(net);
20a69341
PM
224 struct nft_rbtree_elem *rbe;
225 struct rb_node *parent, **p;
226 int d;
227
7c84d414
SB
228 /* Detect overlaps as we descend the tree. Set the flag in these cases:
229 *
72239f27
SB
230 * a1. _ _ __>| ?_ _ __| (insert end before existing end)
231 * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
232 * a3. _ _ ___? >|_ _ __| (insert start before existing end)
7c84d414
SB
233 *
234 * and clear it later on, as we eventually reach the points indicated by
235 * '?' above, in the cases described below. We'll always meet these
236 * later, locally, due to tree ordering, and overlaps for the intervals
237 * that are the closest together are always evaluated last.
238 *
72239f27
SB
239 * b1. _ _ __>| !_ _ __| (insert end before existing start)
240 * b2. _ _ ___| !_ _ _>| (insert end after existing start)
226a88de
SB
241 * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
242 * '--' no nodes falling in this range
243 * b4. >|_ _ ! (insert start before existing start)
7c84d414 244 *
72239f27 245 * Case a3. resolves to b3.:
7c84d414
SB
246 * - if the inserted start element is the leftmost, because the '0'
247 * element in the tree serves as end element
226a88de
SB
248 * - otherwise, if an existing end is found immediately to the left. If
249 * there are existing nodes in between, we need to further descend the
250 * tree before we can conclude the new start isn't causing an overlap
251 *
252 * or to b4., which, preceded by a3., means we already traversed one or
253 * more existing intervals entirely, from the right.
7c84d414 254 *
72239f27 255 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
7c84d414
SB
256 * in that order.
257 *
258 * The flag is also cleared in two special cases:
259 *
226a88de
SB
260 * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
261 * b6. |__ _ >|!__ _ _ (insert end right after existing start)
7c84d414
SB
262 *
263 * which always happen as last step and imply that no further
264 * overlapping is possible.
07267630
SB
265 *
266 * Another special case comes from the fact that start elements matching
267 * an already existing start element are allowed: insertion is not
268 * performed but we return -EEXIST in that case, and the error will be
269 * cleared by the caller if NLM_F_EXCL is not present in the request.
270 * This way, request for insertion of an exact overlap isn't reported as
271 * error to userspace if not desired.
272 *
273 * However, if the existing start matches a pre-existing start, but the
274 * end element doesn't match the corresponding pre-existing end element,
275 * we need to report a partial overlap. This is a local condition that
276 * can be noticed without need for a tracking flag, by checking for a
277 * local duplicated end for a corresponding start, from left and right,
278 * separately.
7c84d414
SB
279 */
280
20a69341
PM
281 parent = NULL;
282 p = &priv->root.rb_node;
283 while (*p != NULL) {
284 parent = *p;
285 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
e562d860
PM
286 d = memcmp(nft_set_ext_key(&rbe->ext),
287 nft_set_ext_key(&new->ext),
288 set->klen);
7c84d414 289 if (d < 0) {
20a69341 290 p = &parent->rb_left;
7c84d414
SB
291
292 if (nft_rbtree_interval_start(new)) {
72239f27 293 if (nft_rbtree_interval_end(rbe) &&
33d07799 294 nft_set_elem_active(&rbe->ext, genmask) &&
226a88de 295 !nft_set_elem_expired(&rbe->ext) && !*p)
72239f27 296 overlap = false;
7c84d414 297 } else {
07267630
SB
298 if (dup_end_left && !*p)
299 return -ENOTEMPTY;
300
7c84d414
SB
301 overlap = nft_rbtree_interval_end(rbe) &&
302 nft_set_elem_active(&rbe->ext,
33d07799
SB
303 genmask) &&
304 !nft_set_elem_expired(&rbe->ext);
07267630
SB
305
306 if (overlap) {
307 dup_end_right = true;
308 continue;
309 }
7c84d414
SB
310 }
311 } else if (d > 0) {
20a69341 312 p = &parent->rb_right;
7c84d414
SB
313
314 if (nft_rbtree_interval_end(new)) {
07267630
SB
315 if (dup_end_right && !*p)
316 return -ENOTEMPTY;
317
7c84d414
SB
318 overlap = nft_rbtree_interval_end(rbe) &&
319 nft_set_elem_active(&rbe->ext,
33d07799
SB
320 genmask) &&
321 !nft_set_elem_expired(&rbe->ext);
07267630
SB
322
323 if (overlap) {
324 dup_end_left = true;
325 continue;
326 }
226a88de 327 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
33d07799 328 !nft_set_elem_expired(&rbe->ext)) {
226a88de 329 overlap = nft_rbtree_interval_end(rbe);
7c84d414
SB
330 }
331 } else {
d2df92e9 332 if (nft_rbtree_interval_end(rbe) &&
6f7c9caf 333 nft_rbtree_interval_start(new)) {
d2df92e9 334 p = &parent->rb_left;
7c84d414 335
33d07799
SB
336 if (nft_set_elem_active(&rbe->ext, genmask) &&
337 !nft_set_elem_expired(&rbe->ext))
7c84d414 338 overlap = false;
6f7c9caf 339 } else if (nft_rbtree_interval_start(rbe) &&
d2df92e9
PNA
340 nft_rbtree_interval_end(new)) {
341 p = &parent->rb_right;
7c84d414 342
33d07799
SB
343 if (nft_set_elem_active(&rbe->ext, genmask) &&
344 !nft_set_elem_expired(&rbe->ext))
7c84d414 345 overlap = false;
33d07799
SB
346 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
347 !nft_set_elem_expired(&rbe->ext)) {
d2df92e9
PNA
348 *ext = &rbe->ext;
349 return -EEXIST;
350 } else {
351 p = &parent->rb_left;
e701001e 352 }
cc02e457 353 }
07267630
SB
354
355 dup_end_left = dup_end_right = false;
20a69341 356 }
7c84d414
SB
357
358 if (overlap)
359 return -ENOTEMPTY;
360
9b7e26ae 361 rb_link_node_rcu(&new->node, parent, p);
20a69341
PM
362 rb_insert_color(&new->node, &priv->root);
363 return 0;
364}
365
42a55769 366static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
c016c7e4
PNA
367 const struct nft_set_elem *elem,
368 struct nft_set_ext **ext)
20a69341 369{
03e5fd0e 370 struct nft_rbtree *priv = nft_set_priv(set);
fe2811eb 371 struct nft_rbtree_elem *rbe = elem->priv;
20a69341
PM
372 int err;
373
03e5fd0e 374 write_lock_bh(&priv->lock);
9b7e26ae 375 write_seqcount_begin(&priv->count);
c016c7e4 376 err = __nft_rbtree_insert(net, set, rbe, ext);
9b7e26ae 377 write_seqcount_end(&priv->count);
03e5fd0e 378 write_unlock_bh(&priv->lock);
fe2811eb 379
20a69341
PM
380 return err;
381}
382
5cb82a38
PNA
383static void nft_rbtree_remove(const struct net *net,
384 const struct nft_set *set,
20a69341
PM
385 const struct nft_set_elem *elem)
386{
387 struct nft_rbtree *priv = nft_set_priv(set);
cc02e457 388 struct nft_rbtree_elem *rbe = elem->priv;
20a69341 389
03e5fd0e 390 write_lock_bh(&priv->lock);
9b7e26ae 391 write_seqcount_begin(&priv->count);
20a69341 392 rb_erase(&rbe->node, &priv->root);
9b7e26ae 393 write_seqcount_end(&priv->count);
03e5fd0e 394 write_unlock_bh(&priv->lock);
20a69341
PM
395}
396
42a55769
PNA
397static void nft_rbtree_activate(const struct net *net,
398 const struct nft_set *set,
cc02e457
PM
399 const struct nft_set_elem *elem)
400{
401 struct nft_rbtree_elem *rbe = elem->priv;
402
42a55769 403 nft_set_elem_change_active(net, set, &rbe->ext);
8d8540c4 404 nft_set_elem_clear_busy(&rbe->ext);
cc02e457
PM
405}
406
1ba1c414
PNA
407static bool nft_rbtree_flush(const struct net *net,
408 const struct nft_set *set, void *priv)
37df5301
PNA
409{
410 struct nft_rbtree_elem *rbe = priv;
411
8d8540c4
PNA
412 if (!nft_set_elem_mark_busy(&rbe->ext) ||
413 !nft_is_active(net, &rbe->ext)) {
414 nft_set_elem_change_active(net, set, &rbe->ext);
415 return true;
416 }
417 return false;
37df5301
PNA
418}
419
42a55769
PNA
420static void *nft_rbtree_deactivate(const struct net *net,
421 const struct nft_set *set,
cc02e457 422 const struct nft_set_elem *elem)
20a69341
PM
423{
424 const struct nft_rbtree *priv = nft_set_priv(set);
425 const struct rb_node *parent = priv->root.rb_node;
e701001e 426 struct nft_rbtree_elem *rbe, *this = elem->priv;
42a55769 427 u8 genmask = nft_genmask_next(net);
20a69341
PM
428 int d;
429
430 while (parent != NULL) {
431 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
432
7d740264
PM
433 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
434 set->klen);
20a69341
PM
435 if (d < 0)
436 parent = parent->rb_left;
437 else if (d > 0)
438 parent = parent->rb_right;
439 else {
e701001e 440 if (nft_rbtree_interval_end(rbe) &&
6f7c9caf 441 nft_rbtree_interval_start(this)) {
e701001e
PNA
442 parent = parent->rb_left;
443 continue;
6f7c9caf 444 } else if (nft_rbtree_interval_start(rbe) &&
e701001e
PNA
445 nft_rbtree_interval_end(this)) {
446 parent = parent->rb_right;
447 continue;
05b7639d
PNA
448 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
449 parent = parent->rb_left;
450 continue;
e701001e 451 }
1ba1c414 452 nft_rbtree_flush(net, set, rbe);
cc02e457 453 return rbe;
20a69341
PM
454 }
455 }
cc02e457 456 return NULL;
20a69341
PM
457}
458
459static void nft_rbtree_walk(const struct nft_ctx *ctx,
de70185d 460 struct nft_set *set,
20a69341
PM
461 struct nft_set_iter *iter)
462{
03e5fd0e 463 struct nft_rbtree *priv = nft_set_priv(set);
fe2811eb 464 struct nft_rbtree_elem *rbe;
20a69341
PM
465 struct nft_set_elem elem;
466 struct rb_node *node;
467
03e5fd0e 468 read_lock_bh(&priv->lock);
20a69341 469 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
cc02e457
PM
470 rbe = rb_entry(node, struct nft_rbtree_elem, node);
471
20a69341
PM
472 if (iter->count < iter->skip)
473 goto cont;
340eaff6
PS
474 if (nft_set_elem_expired(&rbe->ext))
475 goto cont;
8588ac09 476 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
cc02e457 477 goto cont;
20a69341 478
fe2811eb 479 elem.priv = rbe;
20a69341
PM
480
481 iter->err = iter->fn(ctx, set, iter, &elem);
7632667d 482 if (iter->err < 0) {
03e5fd0e 483 read_unlock_bh(&priv->lock);
20a69341 484 return;
7632667d 485 }
20a69341
PM
486cont:
487 iter->count++;
488 }
03e5fd0e 489 read_unlock_bh(&priv->lock);
20a69341
PM
490}
491
8d8540c4
PNA
492static void nft_rbtree_gc(struct work_struct *work)
493{
a13f814a 494 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
8d8540c4 495 struct nft_set_gc_batch *gcb = NULL;
8d8540c4 496 struct nft_rbtree *priv;
a13f814a 497 struct rb_node *node;
8d8540c4 498 struct nft_set *set;
8d8540c4
PNA
499
500 priv = container_of(work, struct nft_rbtree, gc_work.work);
501 set = nft_set_container_of(priv);
502
503 write_lock_bh(&priv->lock);
504 write_seqcount_begin(&priv->count);
505 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
506 rbe = rb_entry(node, struct nft_rbtree_elem, node);
507
508 if (nft_rbtree_interval_end(rbe)) {
a13f814a 509 rbe_end = rbe;
8d8540c4
PNA
510 continue;
511 }
512 if (!nft_set_elem_expired(&rbe->ext))
513 continue;
514 if (nft_set_elem_mark_busy(&rbe->ext))
515 continue;
516
a13f814a
TY
517 if (rbe_prev) {
518 rb_erase(&rbe_prev->node, &priv->root);
519 rbe_prev = NULL;
520 }
8d8540c4
PNA
521 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
522 if (!gcb)
c293ac95 523 break;
8d8540c4
PNA
524
525 atomic_dec(&set->nelems);
526 nft_set_gc_batch_add(gcb, rbe);
a13f814a 527 rbe_prev = rbe;
8d8540c4 528
a13f814a 529 if (rbe_end) {
8d8540c4 530 atomic_dec(&set->nelems);
a13f814a
TY
531 nft_set_gc_batch_add(gcb, rbe_end);
532 rb_erase(&rbe_end->node, &priv->root);
533 rbe_end = NULL;
8d8540c4
PNA
534 }
535 node = rb_next(node);
c293ac95
TY
536 if (!node)
537 break;
8d8540c4 538 }
a13f814a
TY
539 if (rbe_prev)
540 rb_erase(&rbe_prev->node, &priv->root);
8d8540c4
PNA
541 write_seqcount_end(&priv->count);
542 write_unlock_bh(&priv->lock);
543
aaa31047
PNA
544 rbe = nft_set_catchall_gc(set);
545 if (rbe) {
546 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
547 if (gcb)
548 nft_set_gc_batch_add(gcb, rbe);
549 }
8d8540c4
PNA
550 nft_set_gc_batch_complete(gcb);
551
552 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
553 nft_set_gc_interval(set));
554}
555
4ef360dd
TY
556static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
557 const struct nft_set_desc *desc)
20a69341
PM
558{
559 return sizeof(struct nft_rbtree);
560}
561
562static int nft_rbtree_init(const struct nft_set *set,
c50b960c 563 const struct nft_set_desc *desc,
20a69341
PM
564 const struct nlattr * const nla[])
565{
566 struct nft_rbtree *priv = nft_set_priv(set);
567
03e5fd0e 568 rwlock_init(&priv->lock);
b901892b 569 seqcount_rwlock_init(&priv->count, &priv->lock);
20a69341 570 priv->root = RB_ROOT;
8d8540c4
PNA
571
572 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
573 if (set->flags & NFT_SET_TIMEOUT)
574 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
575 nft_set_gc_interval(set));
576
20a69341
PM
577 return 0;
578}
579
580static void nft_rbtree_destroy(const struct nft_set *set)
581{
582 struct nft_rbtree *priv = nft_set_priv(set);
583 struct nft_rbtree_elem *rbe;
584 struct rb_node *node;
585
8d8540c4 586 cancel_delayed_work_sync(&priv->gc_work);
c293ac95 587 rcu_barrier();
20a69341
PM
588 while ((node = priv->root.rb_node) != NULL) {
589 rb_erase(node, &priv->root);
590 rbe = rb_entry(node, struct nft_rbtree_elem, node);
61f9e292 591 nft_set_elem_destroy(set, rbe, true);
20a69341
PM
592 }
593}
594
c50b960c
PM
595static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
596 struct nft_set_estimate *est)
597{
f3a2181e
SB
598 if (desc->field_count > 1)
599 return false;
600
c50b960c 601 if (desc->size)
080ed636
PNA
602 est->size = sizeof(struct nft_rbtree) +
603 desc->size * sizeof(struct nft_rbtree_elem);
c50b960c 604 else
080ed636 605 est->size = ~0;
c50b960c 606
55af753c 607 est->lookup = NFT_SET_CLASS_O_LOG_N;
0b5a7874 608 est->space = NFT_SET_CLASS_O_N;
c50b960c
PM
609
610 return true;
611}
612
24d19826 613const struct nft_set_type nft_set_rbtree_type = {
8d8540c4 614 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
71cc0873
PS
615 .ops = {
616 .privsize = nft_rbtree_privsize,
617 .elemsize = offsetof(struct nft_rbtree_elem, ext),
618 .estimate = nft_rbtree_estimate,
619 .init = nft_rbtree_init,
620 .destroy = nft_rbtree_destroy,
621 .insert = nft_rbtree_insert,
622 .remove = nft_rbtree_remove,
623 .deactivate = nft_rbtree_deactivate,
624 .flush = nft_rbtree_flush,
625 .activate = nft_rbtree_activate,
626 .lookup = nft_rbtree_lookup,
627 .walk = nft_rbtree_walk,
628 .get = nft_rbtree_get,
629 },
20a69341 630};