]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/netfilter/nft_set_hash.c
bb157bd47fe8966e4a80b31e12683deecdc4b96f
[mirror_ubuntu-artful-kernel.git] / net / netfilter / nft_set_hash.c
1 /*
2 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/jhash.h>
17 #include <linux/netlink.h>
18 #include <linux/workqueue.h>
19 #include <linux/rhashtable.h>
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/nf_tables.h>
22 #include <net/netfilter/nf_tables.h>
23
24 /* We target a hash table size of 4, element hint is 75% of final size */
25 #define NFT_HASH_ELEMENT_HINT 3
26
27 struct nft_hash {
28 struct rhashtable ht;
29 struct delayed_work gc_work;
30 };
31
32 struct nft_hash_elem {
33 struct rhash_head node;
34 struct nft_set_ext ext;
35 };
36
37 struct nft_hash_cmp_arg {
38 const struct nft_set *set;
39 const u32 *key;
40 u8 genmask;
41 };
42
43 static const struct rhashtable_params nft_hash_params;
44
45 static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
46 {
47 const struct nft_hash_cmp_arg *arg = data;
48
49 return jhash(arg->key, len, seed);
50 }
51
52 static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
53 {
54 const struct nft_hash_elem *he = data;
55
56 return jhash(nft_set_ext_key(&he->ext), len, seed);
57 }
58
59 static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
60 const void *ptr)
61 {
62 const struct nft_hash_cmp_arg *x = arg->key;
63 const struct nft_hash_elem *he = ptr;
64
65 if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
66 return 1;
67 if (nft_set_elem_expired(&he->ext))
68 return 1;
69 if (!nft_set_elem_active(&he->ext, x->genmask))
70 return 1;
71 return 0;
72 }
73
74 static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
75 const u32 *key, const struct nft_set_ext **ext)
76 {
77 struct nft_hash *priv = nft_set_priv(set);
78 const struct nft_hash_elem *he;
79 struct nft_hash_cmp_arg arg = {
80 .genmask = nft_genmask_cur(net),
81 .set = set,
82 .key = key,
83 };
84
85 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
86 if (he != NULL)
87 *ext = &he->ext;
88
89 return !!he;
90 }
91
92 static bool nft_hash_update(struct nft_set *set, const u32 *key,
93 void *(*new)(struct nft_set *,
94 const struct nft_expr *,
95 struct nft_regs *regs),
96 const struct nft_expr *expr,
97 struct nft_regs *regs,
98 const struct nft_set_ext **ext)
99 {
100 struct nft_hash *priv = nft_set_priv(set);
101 struct nft_hash_elem *he, *prev;
102 struct nft_hash_cmp_arg arg = {
103 .genmask = NFT_GENMASK_ANY,
104 .set = set,
105 .key = key,
106 };
107
108 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
109 if (he != NULL)
110 goto out;
111
112 he = new(set, expr, regs);
113 if (he == NULL)
114 goto err1;
115
116 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
117 nft_hash_params);
118 if (IS_ERR(prev))
119 goto err2;
120
121 /* Another cpu may race to insert the element with the same key */
122 if (prev) {
123 nft_set_elem_destroy(set, he, true);
124 he = prev;
125 }
126
127 out:
128 *ext = &he->ext;
129 return true;
130
131 err2:
132 nft_set_elem_destroy(set, he, true);
133 err1:
134 return false;
135 }
136
137 static int nft_hash_insert(const struct net *net, const struct nft_set *set,
138 const struct nft_set_elem *elem,
139 struct nft_set_ext **ext)
140 {
141 struct nft_hash *priv = nft_set_priv(set);
142 struct nft_hash_elem *he = elem->priv;
143 struct nft_hash_cmp_arg arg = {
144 .genmask = nft_genmask_next(net),
145 .set = set,
146 .key = elem->key.val.data,
147 };
148 struct nft_hash_elem *prev;
149
150 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
151 nft_hash_params);
152 if (IS_ERR(prev))
153 return PTR_ERR(prev);
154 if (prev) {
155 *ext = &prev->ext;
156 return -EEXIST;
157 }
158 return 0;
159 }
160
161 static void nft_hash_activate(const struct net *net, const struct nft_set *set,
162 const struct nft_set_elem *elem)
163 {
164 struct nft_hash_elem *he = elem->priv;
165
166 nft_set_elem_change_active(net, set, &he->ext);
167 nft_set_elem_clear_busy(&he->ext);
168 }
169
170 static bool nft_hash_deactivate_one(const struct net *net,
171 const struct nft_set *set, void *priv)
172 {
173 struct nft_hash_elem *he = priv;
174
175 if (!nft_set_elem_mark_busy(&he->ext) ||
176 !nft_is_active(net, &he->ext)) {
177 nft_set_elem_change_active(net, set, &he->ext);
178 return true;
179 }
180 return false;
181 }
182
183 static void *nft_hash_deactivate(const struct net *net,
184 const struct nft_set *set,
185 const struct nft_set_elem *elem)
186 {
187 struct nft_hash *priv = nft_set_priv(set);
188 struct nft_hash_elem *he;
189 struct nft_hash_cmp_arg arg = {
190 .genmask = nft_genmask_next(net),
191 .set = set,
192 .key = elem->key.val.data,
193 };
194
195 rcu_read_lock();
196 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
197 if (he != NULL &&
198 !nft_hash_deactivate_one(net, set, he))
199 he = NULL;
200
201 rcu_read_unlock();
202
203 return he;
204 }
205
206 static void nft_hash_remove(const struct net *net,
207 const struct nft_set *set,
208 const struct nft_set_elem *elem)
209 {
210 struct nft_hash *priv = nft_set_priv(set);
211 struct nft_hash_elem *he = elem->priv;
212
213 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
214 }
215
216 static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
217 struct nft_set_iter *iter)
218 {
219 struct nft_hash *priv = nft_set_priv(set);
220 struct nft_hash_elem *he;
221 struct rhashtable_iter hti;
222 struct nft_set_elem elem;
223 int err;
224
225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
226 iter->err = err;
227 if (err)
228 return;
229
230 err = rhashtable_walk_start(&hti);
231 if (err && err != -EAGAIN) {
232 iter->err = err;
233 goto out;
234 }
235
236 while ((he = rhashtable_walk_next(&hti))) {
237 if (IS_ERR(he)) {
238 err = PTR_ERR(he);
239 if (err != -EAGAIN) {
240 iter->err = err;
241 goto out;
242 }
243
244 continue;
245 }
246
247 if (iter->count < iter->skip)
248 goto cont;
249 if (nft_set_elem_expired(&he->ext))
250 goto cont;
251 if (!nft_set_elem_active(&he->ext, iter->genmask))
252 goto cont;
253
254 elem.priv = he;
255
256 iter->err = iter->fn(ctx, set, iter, &elem);
257 if (iter->err < 0)
258 goto out;
259
260 cont:
261 iter->count++;
262 }
263
264 out:
265 rhashtable_walk_stop(&hti);
266 rhashtable_walk_exit(&hti);
267 }
268
269 static void nft_hash_gc(struct work_struct *work)
270 {
271 struct nft_set *set;
272 struct nft_hash_elem *he;
273 struct nft_hash *priv;
274 struct nft_set_gc_batch *gcb = NULL;
275 struct rhashtable_iter hti;
276 int err;
277
278 priv = container_of(work, struct nft_hash, gc_work.work);
279 set = nft_set_container_of(priv);
280
281 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
282 if (err)
283 goto schedule;
284
285 err = rhashtable_walk_start(&hti);
286 if (err && err != -EAGAIN)
287 goto out;
288
289 while ((he = rhashtable_walk_next(&hti))) {
290 if (IS_ERR(he)) {
291 if (PTR_ERR(he) != -EAGAIN)
292 goto out;
293 continue;
294 }
295
296 if (!nft_set_elem_expired(&he->ext))
297 continue;
298 if (nft_set_elem_mark_busy(&he->ext))
299 continue;
300
301 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
302 if (gcb == NULL)
303 goto out;
304 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
305 atomic_dec(&set->nelems);
306 nft_set_gc_batch_add(gcb, he);
307 }
308 out:
309 rhashtable_walk_stop(&hti);
310 rhashtable_walk_exit(&hti);
311
312 nft_set_gc_batch_complete(gcb);
313 schedule:
314 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
315 nft_set_gc_interval(set));
316 }
317
318 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
319 {
320 return sizeof(struct nft_hash);
321 }
322
323 static const struct rhashtable_params nft_hash_params = {
324 .head_offset = offsetof(struct nft_hash_elem, node),
325 .hashfn = nft_hash_key,
326 .obj_hashfn = nft_hash_obj,
327 .obj_cmpfn = nft_hash_cmp,
328 .automatic_shrinking = true,
329 };
330
331 static int nft_hash_init(const struct nft_set *set,
332 const struct nft_set_desc *desc,
333 const struct nlattr * const tb[])
334 {
335 struct nft_hash *priv = nft_set_priv(set);
336 struct rhashtable_params params = nft_hash_params;
337 int err;
338
339 params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
340 params.key_len = set->klen;
341
342 err = rhashtable_init(&priv->ht, &params);
343 if (err < 0)
344 return err;
345
346 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
347 if (set->flags & NFT_SET_TIMEOUT)
348 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
349 nft_set_gc_interval(set));
350 return 0;
351 }
352
353 static void nft_hash_elem_destroy(void *ptr, void *arg)
354 {
355 nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
356 }
357
358 static void nft_hash_destroy(const struct nft_set *set)
359 {
360 struct nft_hash *priv = nft_set_priv(set);
361
362 cancel_delayed_work_sync(&priv->gc_work);
363 rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
364 (void *)set);
365 }
366
367 static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
368 struct nft_set_estimate *est)
369 {
370 unsigned int esize;
371
372 esize = sizeof(struct nft_hash_elem);
373 if (desc->size) {
374 est->size = sizeof(struct nft_hash) +
375 roundup_pow_of_two(desc->size * 4 / 3) *
376 sizeof(struct nft_hash_elem *) +
377 desc->size * esize;
378 } else {
379 /* Resizing happens when the load drops below 30% or goes
380 * above 75%. The average of 52.5% load (approximated by 50%)
381 * is used for the size estimation of the hash buckets,
382 * meaning we calculate two buckets per element.
383 */
384 est->size = esize + 2 * sizeof(struct nft_hash_elem *);
385 }
386
387 est->class = NFT_SET_CLASS_O_1;
388
389 return true;
390 }
391
392 static struct nft_set_ops nft_hash_ops __read_mostly = {
393 .privsize = nft_hash_privsize,
394 .elemsize = offsetof(struct nft_hash_elem, ext),
395 .estimate = nft_hash_estimate,
396 .init = nft_hash_init,
397 .destroy = nft_hash_destroy,
398 .insert = nft_hash_insert,
399 .activate = nft_hash_activate,
400 .deactivate = nft_hash_deactivate,
401 .deactivate_one = nft_hash_deactivate_one,
402 .remove = nft_hash_remove,
403 .lookup = nft_hash_lookup,
404 .update = nft_hash_update,
405 .walk = nft_hash_walk,
406 .features = NFT_SET_MAP | NFT_SET_TIMEOUT,
407 .owner = THIS_MODULE,
408 };
409
410 static int __init nft_hash_module_init(void)
411 {
412 return nft_register_set(&nft_hash_ops);
413 }
414
415 static void __exit nft_hash_module_exit(void)
416 {
417 nft_unregister_set(&nft_hash_ops);
418 }
419
420 module_init(nft_hash_module_init);
421 module_exit(nft_hash_module_exit);
422
423 MODULE_LICENSE("GPL");
424 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
425 MODULE_ALIAS_NFT_SET();