]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/netfilter/nf_flow_table_core.c
netfilter: flowtable: use dev_fill_forward_path() to obtain egress device
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nf_flow_table_core.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
ac2a6666
PNA
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/netfilter.h>
6#include <linux/rhashtable.h>
7#include <linux/netdevice.h>
4f3780c0
FF
8#include <net/ip.h>
9#include <net/ip6_route.h>
c0ea1bcb 10#include <net/netfilter/nf_tables.h>
ac2a6666
PNA
11#include <net/netfilter/nf_flow_table.h>
12#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_core.h>
40d102cd 14#include <net/netfilter/nf_conntrack_l4proto.h>
ac2a6666
PNA
15#include <net/netfilter/nf_conntrack_tuple.h>
16
84453a90
FF
17static DEFINE_MUTEX(flowtable_lock);
18static LIST_HEAD(flowtables);
19
047b300e 20static void
458a1828 21flow_offload_fill_dir(struct flow_offload *flow,
047b300e
FF
22 enum flow_offload_tuple_dir dir)
23{
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
458a1828 25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
047b300e
FF
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
047b300e
FF
44}
45
f1363e05 46struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
ac2a6666 47{
ac2a6666
PNA
48 struct flow_offload *flow;
49
50 if (unlikely(nf_ct_is_dying(ct) ||
51 !atomic_inc_not_zero(&ct->ct_general.use)))
52 return NULL;
53
62248df8
PNA
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55 if (!flow)
ac2a6666
PNA
56 goto err_ct_refcnt;
57
b32d2f34 58 flow->ct = ct;
ac2a6666 59
458a1828 60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
ac2a6666
PNA
62
63 if (ct->status & IPS_SRC_NAT)
355a8b13 64 __set_bit(NF_FLOW_SNAT, &flow->flags);
df1e2025 65 if (ct->status & IPS_DST_NAT)
355a8b13 66 __set_bit(NF_FLOW_DNAT, &flow->flags);
ac2a6666
PNA
67
68 return flow;
69
ac2a6666
PNA
70err_ct_refcnt:
71 nf_ct_put(ct);
72
73 return NULL;
74}
75EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
f1363e05
PNA
77static int flow_offload_fill_route(struct flow_offload *flow,
78 const struct nf_flow_route *route,
79 enum flow_offload_tuple_dir dir)
80{
81 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
f1363e05
PNA
82 struct dst_entry *dst = route->tuple[dir].dst;
83
f1363e05
PNA
84 switch (flow_tuple->l3proto) {
85 case NFPROTO_IPV4:
86 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
87 break;
88 case NFPROTO_IPV6:
89 flow_tuple->mtu = ip6_dst_mtu_forward(dst);
90 break;
91 }
92
c63a7cc4 93 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
7a27f6ab
PNA
94
95 switch (route->tuple[dir].xmit_type) {
96 case FLOW_OFFLOAD_XMIT_DIRECT:
97 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
98 ETH_ALEN);
99 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
100 ETH_ALEN);
101 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
102 break;
103 case FLOW_OFFLOAD_XMIT_XFRM:
104 case FLOW_OFFLOAD_XMIT_NEIGH:
105 if (!dst_hold_safe(route->tuple[dir].dst))
106 return -1;
107
108 flow_tuple->dst_cache = dst;
109 break;
110 }
5139c0c0 111 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
f1363e05
PNA
112
113 return 0;
114}
115
7a27f6ab
PNA
116static void nft_flow_dst_release(struct flow_offload *flow,
117 enum flow_offload_tuple_dir dir)
118{
119 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
120 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
121 dst_release(flow->tuplehash[dir].tuple.dst_cache);
122}
123
f1363e05
PNA
124int flow_offload_route_init(struct flow_offload *flow,
125 const struct nf_flow_route *route)
126{
127 int err;
128
129 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
130 if (err < 0)
131 return err;
132
133 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
134 if (err < 0)
135 goto err_route_reply;
136
137 flow->type = NF_FLOW_OFFLOAD_ROUTE;
138
139 return 0;
140
141err_route_reply:
7a27f6ab 142 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
f1363e05
PNA
143
144 return err;
145}
146EXPORT_SYMBOL_GPL(flow_offload_route_init);
147
da5984e5
FF
148static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
149{
150 tcp->state = TCP_CONNTRACK_ESTABLISHED;
151 tcp->seen[0].td_maxwin = 0;
152 tcp->seen[1].td_maxwin = 0;
153}
154
e97d9404
FW
155#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
156#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
157
1e5b2471 158static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
da5984e5
FF
159{
160 const struct nf_conntrack_l4proto *l4proto;
1e5b2471 161 int l4num = nf_ct_protonum(ct);
da5984e5 162 unsigned int timeout;
da5984e5 163
4a60dc74 164 l4proto = nf_ct_l4proto_find(l4num);
da5984e5
FF
165 if (!l4proto)
166 return;
167
da5984e5 168 if (l4num == IPPROTO_TCP)
e97d9404 169 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
da5984e5 170 else if (l4num == IPPROTO_UDP)
e97d9404 171 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
da5984e5
FF
172 else
173 return;
174
1e5b2471
PNA
175 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
176 ct->timeout = nfct_time_stamp + timeout;
177}
178
179static void flow_offload_fixup_ct_state(struct nf_conn *ct)
180{
181 if (nf_ct_protonum(ct) == IPPROTO_TCP)
182 flow_offload_fixup_tcp(&ct->proto.tcp);
183}
184
185static void flow_offload_fixup_ct(struct nf_conn *ct)
186{
187 flow_offload_fixup_ct_state(ct);
188 flow_offload_fixup_ct_timeout(ct);
da5984e5
FF
189}
190
f1363e05 191static void flow_offload_route_release(struct flow_offload *flow)
ac2a6666 192{
7a27f6ab
PNA
193 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
194 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
f1363e05
PNA
195}
196
197void flow_offload_free(struct flow_offload *flow)
198{
199 switch (flow->type) {
200 case NF_FLOW_OFFLOAD_ROUTE:
201 flow_offload_route_release(flow);
202 break;
203 default:
204 break;
205 }
b32d2f34 206 nf_ct_put(flow->ct);
62248df8 207 kfree_rcu(flow, rcu_head);
ac2a6666
PNA
208}
209EXPORT_SYMBOL_GPL(flow_offload_free);
210
a268de77
FF
211static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
212{
213 const struct flow_offload_tuple *tuple = data;
214
dbc859d9 215 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
a268de77
FF
216}
217
218static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
219{
220 const struct flow_offload_tuple_rhash *tuplehash = data;
221
dbc859d9 222 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
a268de77
FF
223}
224
225static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
226 const void *ptr)
227{
228 const struct flow_offload_tuple *tuple = arg->key;
229 const struct flow_offload_tuple_rhash *x = ptr;
230
dbc859d9 231 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
a268de77
FF
232 return 1;
233
234 return 0;
235}
236
237static const struct rhashtable_params nf_flow_offload_rhash_params = {
238 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
239 .hashfn = flow_offload_hash,
240 .obj_hashfn = flow_offload_hash_obj,
241 .obj_cmpfn = flow_offload_hash_cmp,
242 .automatic_shrinking = true,
243};
244
ac2a6666
PNA
245int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
246{
43c8f131
TY
247 int err;
248
fb46f1b7 249 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
daf61b02 250
43c8f131
TY
251 err = rhashtable_insert_fast(&flow_table->rhashtable,
252 &flow->tuplehash[0].node,
253 nf_flow_offload_rhash_params);
254 if (err < 0)
255 return err;
256
257 err = rhashtable_insert_fast(&flow_table->rhashtable,
258 &flow->tuplehash[1].node,
259 nf_flow_offload_rhash_params);
260 if (err < 0) {
261 rhashtable_remove_fast(&flow_table->rhashtable,
262 &flow->tuplehash[0].node,
263 nf_flow_offload_rhash_params);
264 return err;
265 }
ac2a6666 266
4203b19c
RD
267 nf_ct_offload_timeout(flow->ct);
268
f698fe40
PNA
269 if (nf_flowtable_hw_offload(flow_table)) {
270 __set_bit(NF_FLOW_HW, &flow->flags);
c29f74e0 271 nf_flow_offload_add(flow_table, flow);
f698fe40 272 }
c29f74e0 273
ac2a6666
PNA
274 return 0;
275}
276EXPORT_SYMBOL_GPL(flow_offload_add);
277
8b3646d6
PB
278void flow_offload_refresh(struct nf_flowtable *flow_table,
279 struct flow_offload *flow)
280{
281 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
282
283 if (likely(!nf_flowtable_hw_offload(flow_table) ||
284 !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
285 return;
286
287 nf_flow_offload_add(flow_table, flow);
288}
289EXPORT_SYMBOL_GPL(flow_offload_refresh);
290
3e68db2f
PNA
291static inline bool nf_flow_has_expired(const struct flow_offload *flow)
292{
1e5b2471 293 return nf_flow_timeout_delta(flow->timeout) <= 0;
3e68db2f
PNA
294}
295
0ff90b6c
FF
296static void flow_offload_del(struct nf_flowtable *flow_table,
297 struct flow_offload *flow)
ac2a6666 298{
ac2a6666
PNA
299 rhashtable_remove_fast(&flow_table->rhashtable,
300 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
a268de77 301 nf_flow_offload_rhash_params);
ac2a6666
PNA
302 rhashtable_remove_fast(&flow_table->rhashtable,
303 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
a268de77 304 nf_flow_offload_rhash_params);
ac2a6666 305
b32d2f34 306 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
da5984e5 307
3e68db2f 308 if (nf_flow_has_expired(flow))
b32d2f34 309 flow_offload_fixup_ct(flow->ct);
9ed81c8e 310 else
b32d2f34 311 flow_offload_fixup_ct_timeout(flow->ct);
3e68db2f 312
0ff90b6c 313 flow_offload_free(flow);
ac2a6666 314}
ac2a6666 315
59c466dd
FF
316void flow_offload_teardown(struct flow_offload *flow)
317{
355a8b13 318 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
da5984e5 319
b32d2f34 320 flow_offload_fixup_ct_state(flow->ct);
59c466dd
FF
321}
322EXPORT_SYMBOL_GPL(flow_offload_teardown);
323
ac2a6666
PNA
324struct flow_offload_tuple_rhash *
325flow_offload_lookup(struct nf_flowtable *flow_table,
326 struct flow_offload_tuple *tuple)
327{
ba03137f
FF
328 struct flow_offload_tuple_rhash *tuplehash;
329 struct flow_offload *flow;
330 int dir;
331
a2d88182
TY
332 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
333 nf_flow_offload_rhash_params);
ba03137f
FF
334 if (!tuplehash)
335 return NULL;
336
337 dir = tuplehash->tuple.dir;
338 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
355a8b13 339 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
ba03137f
FF
340 return NULL;
341
b32d2f34 342 if (unlikely(nf_ct_is_dying(flow->ct)))
8cd2bc98
TY
343 return NULL;
344
ba03137f 345 return tuplehash;
ac2a6666
PNA
346}
347EXPORT_SYMBOL_GPL(flow_offload_lookup);
348
49de9c09
TY
349static int
350nf_flow_table_iterate(struct nf_flowtable *flow_table,
351 void (*iter)(struct flow_offload *flow, void *data),
352 void *data)
ac2a6666
PNA
353{
354 struct flow_offload_tuple_rhash *tuplehash;
355 struct rhashtable_iter hti;
356 struct flow_offload *flow;
0de22baa 357 int err = 0;
ac2a6666 358
0de22baa 359 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
ac2a6666
PNA
360 rhashtable_walk_start(&hti);
361
362 while ((tuplehash = rhashtable_walk_next(&hti))) {
363 if (IS_ERR(tuplehash)) {
0de22baa
TY
364 if (PTR_ERR(tuplehash) != -EAGAIN) {
365 err = PTR_ERR(tuplehash);
366 break;
367 }
ac2a6666
PNA
368 continue;
369 }
370 if (tuplehash->tuple.dir)
371 continue;
372
373 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
374
375 iter(flow, data);
376 }
ac2a6666
PNA
377 rhashtable_walk_stop(&hti);
378 rhashtable_walk_exit(&hti);
379
380 return err;
381}
ac2a6666 382
b9660987 383static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
ac2a6666 384{
b9660987 385 struct nf_flowtable *flow_table = data;
ac2a6666 386
9ed81c8e
PNA
387 if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
388 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
389
390 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
355a8b13
PNA
391 if (test_bit(NF_FLOW_HW, &flow->flags)) {
392 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
c29f74e0 393 nf_flow_offload_del(flow_table, flow);
355a8b13 394 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
c29f74e0
PNA
395 flow_offload_del(flow_table, flow);
396 } else {
397 flow_offload_del(flow_table, flow);
398 }
355a8b13 399 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
79b9b685 400 nf_flow_offload_stats(flow_table, flow);
c29f74e0 401 }
b408c5b0
PNA
402}
403
a268de77 404static void nf_flow_offload_work_gc(struct work_struct *work)
b408c5b0
PNA
405{
406 struct nf_flowtable *flow_table;
407
408 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
b9660987 409 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
ac2a6666
PNA
410 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
411}
ac2a6666 412
f4401262
PNA
413static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
414 __be16 port, __be16 new_port)
ac2a6666
PNA
415{
416 struct tcphdr *tcph;
417
ac2a6666 418 tcph = (void *)(skb_network_header(skb) + thoff);
8d6bca15 419 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
ac2a6666
PNA
420}
421
f4401262
PNA
422static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
423 __be16 port, __be16 new_port)
ac2a6666
PNA
424{
425 struct udphdr *udph;
426
ac2a6666
PNA
427 udph = (void *)(skb_network_header(skb) + thoff);
428 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
429 inet_proto_csum_replace2(&udph->check, skb, port,
8d6bca15 430 new_port, false);
ac2a6666
PNA
431 if (!udph->check)
432 udph->check = CSUM_MANGLED_0;
433 }
ac2a6666
PNA
434}
435
f4401262
PNA
436static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
437 u8 protocol, __be16 port, __be16 new_port)
ac2a6666
PNA
438{
439 switch (protocol) {
440 case IPPROTO_TCP:
f4401262 441 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
ac2a6666
PNA
442 break;
443 case IPPROTO_UDP:
f4401262 444 nf_flow_nat_port_udp(skb, thoff, port, new_port);
ac2a6666
PNA
445 break;
446 }
ac2a6666
PNA
447}
448
f4401262
PNA
449void nf_flow_snat_port(const struct flow_offload *flow,
450 struct sk_buff *skb, unsigned int thoff,
451 u8 protocol, enum flow_offload_tuple_dir dir)
ac2a6666
PNA
452{
453 struct flow_ports *hdr;
454 __be16 port, new_port;
455
ac2a6666
PNA
456 hdr = (void *)(skb_network_header(skb) + thoff);
457
458 switch (dir) {
459 case FLOW_OFFLOAD_DIR_ORIGINAL:
460 port = hdr->source;
461 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
462 hdr->source = new_port;
463 break;
464 case FLOW_OFFLOAD_DIR_REPLY:
465 port = hdr->dest;
466 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
467 hdr->dest = new_port;
468 break;
ac2a6666
PNA
469 }
470
f4401262 471 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
ac2a6666
PNA
472}
473EXPORT_SYMBOL_GPL(nf_flow_snat_port);
474
f4401262
PNA
475void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
476 unsigned int thoff, u8 protocol,
477 enum flow_offload_tuple_dir dir)
ac2a6666
PNA
478{
479 struct flow_ports *hdr;
480 __be16 port, new_port;
481
ac2a6666
PNA
482 hdr = (void *)(skb_network_header(skb) + thoff);
483
484 switch (dir) {
485 case FLOW_OFFLOAD_DIR_ORIGINAL:
486 port = hdr->dest;
487 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
488 hdr->dest = new_port;
489 break;
490 case FLOW_OFFLOAD_DIR_REPLY:
491 port = hdr->source;
492 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
493 hdr->source = new_port;
494 break;
ac2a6666
PNA
495 }
496
f4401262 497 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
ac2a6666
PNA
498}
499EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
500
a268de77
FF
501int nf_flow_table_init(struct nf_flowtable *flowtable)
502{
503 int err;
504
505 INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
c29f74e0 506 flow_block_init(&flowtable->flow_block);
422c032a 507 init_rwsem(&flowtable->flow_block_lock);
a268de77
FF
508
509 err = rhashtable_init(&flowtable->rhashtable,
510 &nf_flow_offload_rhash_params);
511 if (err < 0)
512 return err;
513
514 queue_delayed_work(system_power_efficient_wq,
515 &flowtable->gc_work, HZ);
516
84453a90
FF
517 mutex_lock(&flowtable_lock);
518 list_add(&flowtable->list, &flowtables);
519 mutex_unlock(&flowtable_lock);
520
a268de77
FF
521 return 0;
522}
523EXPORT_SYMBOL_GPL(nf_flow_table_init);
524
c0ea1bcb
PNA
525static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
526{
527 struct net_device *dev = data;
528
59c466dd
FF
529 if (!dev) {
530 flow_offload_teardown(flow);
c0ea1bcb 531 return;
59c466dd 532 }
b32d2f34
PNA
533
534 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
a3fb3698
TY
535 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
536 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
445db8d0 537 flow_offload_teardown(flow);
c0ea1bcb
PNA
538}
539
a8284c68
PNA
540void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
541 struct net_device *dev)
c0ea1bcb 542{
84453a90 543 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
c0ea1bcb 544 flush_delayed_work(&flowtable->gc_work);
91bfaa15 545 nf_flow_table_offload_flush(flowtable);
c0ea1bcb
PNA
546}
547
5f1be84a 548void nf_flow_table_cleanup(struct net_device *dev)
c0ea1bcb 549{
84453a90
FF
550 struct nf_flowtable *flowtable;
551
552 mutex_lock(&flowtable_lock);
553 list_for_each_entry(flowtable, &flowtables, list)
a8284c68 554 nf_flow_table_gc_cleanup(flowtable, dev);
84453a90 555 mutex_unlock(&flowtable_lock);
c0ea1bcb
PNA
556}
557EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
558
b408c5b0
PNA
559void nf_flow_table_free(struct nf_flowtable *flow_table)
560{
84453a90
FF
561 mutex_lock(&flowtable_lock);
562 list_del(&flow_table->list);
563 mutex_unlock(&flowtable_lock);
978703f4 564
a268de77 565 cancel_delayed_work_sync(&flow_table->gc_work);
b408c5b0 566 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
b9660987 567 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
0f34f30a 568 nf_flow_table_offload_flush(flow_table);
c921ffe8
PB
569 if (nf_flowtable_hw_offload(flow_table))
570 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
571 flow_table);
a268de77 572 rhashtable_destroy(&flow_table->rhashtable);
b408c5b0
PNA
573}
574EXPORT_SYMBOL_GPL(nf_flow_table_free);
575
c29f74e0
PNA
576static int __init nf_flow_table_module_init(void)
577{
578 return nf_flow_table_offload_init();
579}
580
581static void __exit nf_flow_table_module_exit(void)
582{
583 nf_flow_table_offload_exit();
584}
585
586module_init(nf_flow_table_module_init);
587module_exit(nf_flow_table_module_exit);
588
ac2a6666
PNA
589MODULE_LICENSE("GPL");
590MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
4cacc395 591MODULE_DESCRIPTION("Netfilter flow table module");