]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/netfilter/nft_set_hash.c
Merge tag 'pci-v4.10-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[mirror_ubuntu-zesty-kernel.git] / net / netfilter / nft_set_hash.c
1 /*
2 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/jhash.h>
17 #include <linux/netlink.h>
18 #include <linux/workqueue.h>
19 #include <linux/rhashtable.h>
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/nf_tables.h>
22 #include <net/netfilter/nf_tables.h>
23
24 /* We target a hash table size of 4, element hint is 75% of final size */
25 #define NFT_HASH_ELEMENT_HINT 3
26
27 struct nft_hash {
28 struct rhashtable ht;
29 struct delayed_work gc_work;
30 };
31
32 struct nft_hash_elem {
33 struct rhash_head node;
34 struct nft_set_ext ext;
35 };
36
37 struct nft_hash_cmp_arg {
38 const struct nft_set *set;
39 const u32 *key;
40 u8 genmask;
41 };
42
43 static const struct rhashtable_params nft_hash_params;
44
45 static inline u32 nft_hash_key(const void *data, u32 len, u32 seed)
46 {
47 const struct nft_hash_cmp_arg *arg = data;
48
49 return jhash(arg->key, len, seed);
50 }
51
52 static inline u32 nft_hash_obj(const void *data, u32 len, u32 seed)
53 {
54 const struct nft_hash_elem *he = data;
55
56 return jhash(nft_set_ext_key(&he->ext), len, seed);
57 }
58
59 static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
60 const void *ptr)
61 {
62 const struct nft_hash_cmp_arg *x = arg->key;
63 const struct nft_hash_elem *he = ptr;
64
65 if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
66 return 1;
67 if (nft_set_elem_expired(&he->ext))
68 return 1;
69 if (!nft_set_elem_active(&he->ext, x->genmask))
70 return 1;
71 return 0;
72 }
73
74 static bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
75 const u32 *key, const struct nft_set_ext **ext)
76 {
77 struct nft_hash *priv = nft_set_priv(set);
78 const struct nft_hash_elem *he;
79 struct nft_hash_cmp_arg arg = {
80 .genmask = nft_genmask_cur(net),
81 .set = set,
82 .key = key,
83 };
84
85 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
86 if (he != NULL)
87 *ext = &he->ext;
88
89 return !!he;
90 }
91
92 static bool nft_hash_update(struct nft_set *set, const u32 *key,
93 void *(*new)(struct nft_set *,
94 const struct nft_expr *,
95 struct nft_regs *regs),
96 const struct nft_expr *expr,
97 struct nft_regs *regs,
98 const struct nft_set_ext **ext)
99 {
100 struct nft_hash *priv = nft_set_priv(set);
101 struct nft_hash_elem *he, *prev;
102 struct nft_hash_cmp_arg arg = {
103 .genmask = NFT_GENMASK_ANY,
104 .set = set,
105 .key = key,
106 };
107
108 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
109 if (he != NULL)
110 goto out;
111
112 he = new(set, expr, regs);
113 if (he == NULL)
114 goto err1;
115
116 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
117 nft_hash_params);
118 if (IS_ERR(prev))
119 goto err2;
120
121 /* Another cpu may race to insert the element with the same key */
122 if (prev) {
123 nft_set_elem_destroy(set, he, true);
124 he = prev;
125 }
126
127 out:
128 *ext = &he->ext;
129 return true;
130
131 err2:
132 nft_set_elem_destroy(set, he, true);
133 err1:
134 return false;
135 }
136
137 static int nft_hash_insert(const struct net *net, const struct nft_set *set,
138 const struct nft_set_elem *elem,
139 struct nft_set_ext **ext)
140 {
141 struct nft_hash *priv = nft_set_priv(set);
142 struct nft_hash_elem *he = elem->priv;
143 struct nft_hash_cmp_arg arg = {
144 .genmask = nft_genmask_next(net),
145 .set = set,
146 .key = elem->key.val.data,
147 };
148 struct nft_hash_elem *prev;
149
150 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
151 nft_hash_params);
152 if (IS_ERR(prev))
153 return PTR_ERR(prev);
154 if (prev) {
155 *ext = &prev->ext;
156 return -EEXIST;
157 }
158 return 0;
159 }
160
161 static void nft_hash_activate(const struct net *net, const struct nft_set *set,
162 const struct nft_set_elem *elem)
163 {
164 struct nft_hash_elem *he = elem->priv;
165
166 nft_set_elem_change_active(net, set, &he->ext);
167 nft_set_elem_clear_busy(&he->ext);
168 }
169
170 static bool nft_hash_deactivate_one(const struct net *net,
171 const struct nft_set *set, void *priv)
172 {
173 struct nft_hash_elem *he = priv;
174
175 if (!nft_set_elem_mark_busy(&he->ext) ||
176 !nft_is_active(net, &he->ext)) {
177 nft_set_elem_change_active(net, set, &he->ext);
178 return true;
179 }
180 return false;
181 }
182
183 static void *nft_hash_deactivate(const struct net *net,
184 const struct nft_set *set,
185 const struct nft_set_elem *elem)
186 {
187 struct nft_hash *priv = nft_set_priv(set);
188 struct nft_hash_elem *he;
189 struct nft_hash_cmp_arg arg = {
190 .genmask = nft_genmask_next(net),
191 .set = set,
192 .key = elem->key.val.data,
193 };
194
195 rcu_read_lock();
196 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
197 if (he != NULL &&
198 !nft_hash_deactivate_one(net, set, he))
199 he = NULL;
200
201 rcu_read_unlock();
202
203 return he;
204 }
205
206 static void nft_hash_remove(const struct nft_set *set,
207 const struct nft_set_elem *elem)
208 {
209 struct nft_hash *priv = nft_set_priv(set);
210 struct nft_hash_elem *he = elem->priv;
211
212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
213 }
214
215 static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
216 struct nft_set_iter *iter)
217 {
218 struct nft_hash *priv = nft_set_priv(set);
219 struct nft_hash_elem *he;
220 struct rhashtable_iter hti;
221 struct nft_set_elem elem;
222 int err;
223
224 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
225 iter->err = err;
226 if (err)
227 return;
228
229 err = rhashtable_walk_start(&hti);
230 if (err && err != -EAGAIN) {
231 iter->err = err;
232 goto out;
233 }
234
235 while ((he = rhashtable_walk_next(&hti))) {
236 if (IS_ERR(he)) {
237 err = PTR_ERR(he);
238 if (err != -EAGAIN) {
239 iter->err = err;
240 goto out;
241 }
242
243 continue;
244 }
245
246 if (iter->count < iter->skip)
247 goto cont;
248 if (nft_set_elem_expired(&he->ext))
249 goto cont;
250 if (!nft_set_elem_active(&he->ext, iter->genmask))
251 goto cont;
252
253 elem.priv = he;
254
255 iter->err = iter->fn(ctx, set, iter, &elem);
256 if (iter->err < 0)
257 goto out;
258
259 cont:
260 iter->count++;
261 }
262
263 out:
264 rhashtable_walk_stop(&hti);
265 rhashtable_walk_exit(&hti);
266 }
267
268 static void nft_hash_gc(struct work_struct *work)
269 {
270 struct nft_set *set;
271 struct nft_hash_elem *he;
272 struct nft_hash *priv;
273 struct nft_set_gc_batch *gcb = NULL;
274 struct rhashtable_iter hti;
275 int err;
276
277 priv = container_of(work, struct nft_hash, gc_work.work);
278 set = nft_set_container_of(priv);
279
280 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
281 if (err)
282 goto schedule;
283
284 err = rhashtable_walk_start(&hti);
285 if (err && err != -EAGAIN)
286 goto out;
287
288 while ((he = rhashtable_walk_next(&hti))) {
289 if (IS_ERR(he)) {
290 if (PTR_ERR(he) != -EAGAIN)
291 goto out;
292 continue;
293 }
294
295 if (!nft_set_elem_expired(&he->ext))
296 continue;
297 if (nft_set_elem_mark_busy(&he->ext))
298 continue;
299
300 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
301 if (gcb == NULL)
302 goto out;
303 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
304 atomic_dec(&set->nelems);
305 nft_set_gc_batch_add(gcb, he);
306 }
307 out:
308 rhashtable_walk_stop(&hti);
309 rhashtable_walk_exit(&hti);
310
311 nft_set_gc_batch_complete(gcb);
312 schedule:
313 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
314 nft_set_gc_interval(set));
315 }
316
317 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
318 {
319 return sizeof(struct nft_hash);
320 }
321
322 static const struct rhashtable_params nft_hash_params = {
323 .head_offset = offsetof(struct nft_hash_elem, node),
324 .hashfn = nft_hash_key,
325 .obj_hashfn = nft_hash_obj,
326 .obj_cmpfn = nft_hash_cmp,
327 .automatic_shrinking = true,
328 };
329
330 static int nft_hash_init(const struct nft_set *set,
331 const struct nft_set_desc *desc,
332 const struct nlattr * const tb[])
333 {
334 struct nft_hash *priv = nft_set_priv(set);
335 struct rhashtable_params params = nft_hash_params;
336 int err;
337
338 params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT;
339 params.key_len = set->klen;
340
341 err = rhashtable_init(&priv->ht, &params);
342 if (err < 0)
343 return err;
344
345 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_hash_gc);
346 if (set->flags & NFT_SET_TIMEOUT)
347 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
348 nft_set_gc_interval(set));
349 return 0;
350 }
351
352 static void nft_hash_elem_destroy(void *ptr, void *arg)
353 {
354 nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
355 }
356
357 static void nft_hash_destroy(const struct nft_set *set)
358 {
359 struct nft_hash *priv = nft_set_priv(set);
360
361 cancel_delayed_work_sync(&priv->gc_work);
362 rhashtable_free_and_destroy(&priv->ht, nft_hash_elem_destroy,
363 (void *)set);
364 }
365
366 static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
367 struct nft_set_estimate *est)
368 {
369 unsigned int esize;
370
371 esize = sizeof(struct nft_hash_elem);
372 if (desc->size) {
373 est->size = sizeof(struct nft_hash) +
374 roundup_pow_of_two(desc->size * 4 / 3) *
375 sizeof(struct nft_hash_elem *) +
376 desc->size * esize;
377 } else {
378 /* Resizing happens when the load drops below 30% or goes
379 * above 75%. The average of 52.5% load (approximated by 50%)
380 * is used for the size estimation of the hash buckets,
381 * meaning we calculate two buckets per element.
382 */
383 est->size = esize + 2 * sizeof(struct nft_hash_elem *);
384 }
385
386 est->class = NFT_SET_CLASS_O_1;
387
388 return true;
389 }
390
391 static struct nft_set_ops nft_hash_ops __read_mostly = {
392 .privsize = nft_hash_privsize,
393 .elemsize = offsetof(struct nft_hash_elem, ext),
394 .estimate = nft_hash_estimate,
395 .init = nft_hash_init,
396 .destroy = nft_hash_destroy,
397 .insert = nft_hash_insert,
398 .activate = nft_hash_activate,
399 .deactivate = nft_hash_deactivate,
400 .deactivate_one = nft_hash_deactivate_one,
401 .remove = nft_hash_remove,
402 .lookup = nft_hash_lookup,
403 .update = nft_hash_update,
404 .walk = nft_hash_walk,
405 .features = NFT_SET_MAP | NFT_SET_TIMEOUT,
406 .owner = THIS_MODULE,
407 };
408
409 static int __init nft_hash_module_init(void)
410 {
411 return nft_register_set(&nft_hash_ops);
412 }
413
414 static void __exit nft_hash_module_exit(void)
415 {
416 nft_unregister_set(&nft_hash_ops);
417 }
418
419 module_init(nft_hash_module_init);
420 module_exit(nft_hash_module_exit);
421
422 MODULE_LICENSE("GPL");
423 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
424 MODULE_ALIAS_NFT_SET();