]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/netfilter/nf_flow_table_core.c
netfilter: flowtable: pass flowtable to nf_flow_table_iterate()
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nf_flow_table_core.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
ac2a6666
PNA
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/netfilter.h>
6#include <linux/rhashtable.h>
7#include <linux/netdevice.h>
4f3780c0
FF
8#include <net/ip.h>
9#include <net/ip6_route.h>
c0ea1bcb 10#include <net/netfilter/nf_tables.h>
ac2a6666
PNA
11#include <net/netfilter/nf_flow_table.h>
12#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_core.h>
40d102cd 14#include <net/netfilter/nf_conntrack_l4proto.h>
ac2a6666
PNA
15#include <net/netfilter/nf_conntrack_tuple.h>
16
84453a90
FF
17static DEFINE_MUTEX(flowtable_lock);
18static LIST_HEAD(flowtables);
19
047b300e 20static void
458a1828 21flow_offload_fill_dir(struct flow_offload *flow,
047b300e
FF
22 enum flow_offload_tuple_dir dir)
23{
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
458a1828 25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
047b300e
FF
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
047b300e
FF
44}
45
f1363e05 46struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
ac2a6666 47{
ac2a6666
PNA
48 struct flow_offload *flow;
49
50 if (unlikely(nf_ct_is_dying(ct) ||
4c8bba65 51 !refcount_inc_not_zero(&ct->ct_general.use)))
ac2a6666
PNA
52 return NULL;
53
62248df8
PNA
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55 if (!flow)
ac2a6666
PNA
56 goto err_ct_refcnt;
57
b32d2f34 58 flow->ct = ct;
ac2a6666 59
458a1828 60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
ac2a6666
PNA
62
63 if (ct->status & IPS_SRC_NAT)
355a8b13 64 __set_bit(NF_FLOW_SNAT, &flow->flags);
df1e2025 65 if (ct->status & IPS_DST_NAT)
355a8b13 66 __set_bit(NF_FLOW_DNAT, &flow->flags);
ac2a6666
PNA
67
68 return flow;
69
ac2a6666
PNA
70err_ct_refcnt:
71 nf_ct_put(ct);
72
73 return NULL;
74}
75EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
8b9229d1
PNA
77static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
78{
79 const struct rt6_info *rt;
80
81 if (flow_tuple->l3proto == NFPROTO_IPV6) {
82 rt = (const struct rt6_info *)flow_tuple->dst_cache;
83 return rt6_get_cookie(rt);
84 }
85
86 return 0;
87}
88
f1363e05
PNA
89static int flow_offload_fill_route(struct flow_offload *flow,
90 const struct nf_flow_route *route,
91 enum flow_offload_tuple_dir dir)
92{
93 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
f1363e05 94 struct dst_entry *dst = route->tuple[dir].dst;
4cd91f7c 95 int i, j = 0;
f1363e05 96
f1363e05
PNA
97 switch (flow_tuple->l3proto) {
98 case NFPROTO_IPV4:
99 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
100 break;
101 case NFPROTO_IPV6:
427faee1 102 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
f1363e05
PNA
103 break;
104 }
105
c63a7cc4 106 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
4cd91f7c
PNA
107 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
108 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
109 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
26267bf9
FF
110 if (route->tuple[dir].in.ingress_vlans & BIT(i))
111 flow_tuple->in_vlan_ingress |= BIT(j);
4cd91f7c
PNA
112 j++;
113 }
114 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
7a27f6ab
PNA
115
116 switch (route->tuple[dir].xmit_type) {
117 case FLOW_OFFLOAD_XMIT_DIRECT:
118 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
119 ETH_ALEN);
120 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
121 ETH_ALEN);
122 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
73f97025 123 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
7a27f6ab
PNA
124 break;
125 case FLOW_OFFLOAD_XMIT_XFRM:
126 case FLOW_OFFLOAD_XMIT_NEIGH:
127 if (!dst_hold_safe(route->tuple[dir].dst))
128 return -1;
129
130 flow_tuple->dst_cache = dst;
8b9229d1 131 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
7a27f6ab 132 break;
78ed0a9b
RD
133 default:
134 WARN_ON_ONCE(1);
135 break;
7a27f6ab 136 }
5139c0c0 137 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
f1363e05
PNA
138
139 return 0;
140}
141
7a27f6ab
PNA
142static void nft_flow_dst_release(struct flow_offload *flow,
143 enum flow_offload_tuple_dir dir)
144{
145 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
146 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
147 dst_release(flow->tuplehash[dir].tuple.dst_cache);
148}
149
f1363e05
PNA
150int flow_offload_route_init(struct flow_offload *flow,
151 const struct nf_flow_route *route)
152{
153 int err;
154
155 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
156 if (err < 0)
157 return err;
158
159 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
160 if (err < 0)
161 goto err_route_reply;
162
163 flow->type = NF_FLOW_OFFLOAD_ROUTE;
164
165 return 0;
166
167err_route_reply:
7a27f6ab 168 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
f1363e05
PNA
169
170 return err;
171}
172EXPORT_SYMBOL_GPL(flow_offload_route_init);
173
da5984e5
FF
174static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
175{
da5984e5
FF
176 tcp->seen[0].td_maxwin = 0;
177 tcp->seen[1].td_maxwin = 0;
178}
179
01e0ad85 180static void flow_offload_fixup_ct(struct nf_conn *ct)
da5984e5 181{
1d91d2e1 182 struct net *net = nf_ct_net(ct);
1e5b2471 183 int l4num = nf_ct_protonum(ct);
4592ee7f 184 s32 timeout;
da5984e5 185
1d91d2e1
OS
186 if (l4num == IPPROTO_TCP) {
187 struct nf_tcp_net *tn = nf_tcp_pernet(net);
188
01e0ad85
PNA
189 flow_offload_fixup_tcp(&ct->proto.tcp);
190
191 timeout = tn->timeouts[ct->proto.tcp.state];
4592ee7f 192 timeout -= tn->offload_timeout;
1d91d2e1
OS
193 } else if (l4num == IPPROTO_UDP) {
194 struct nf_udp_net *tn = nf_udp_pernet(net);
195
4592ee7f
FW
196 timeout = tn->timeouts[UDP_CT_REPLIED];
197 timeout -= tn->offload_timeout;
1d91d2e1 198 } else {
da5984e5 199 return;
1d91d2e1 200 }
da5984e5 201
4592ee7f
FW
202 if (timeout < 0)
203 timeout = 0;
204
74e1cddd
ED
205 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
206 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
1e5b2471
PNA
207}
208
f1363e05 209static void flow_offload_route_release(struct flow_offload *flow)
ac2a6666 210{
7a27f6ab
PNA
211 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
212 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
f1363e05
PNA
213}
214
215void flow_offload_free(struct flow_offload *flow)
216{
217 switch (flow->type) {
218 case NF_FLOW_OFFLOAD_ROUTE:
219 flow_offload_route_release(flow);
220 break;
221 default:
222 break;
223 }
b32d2f34 224 nf_ct_put(flow->ct);
62248df8 225 kfree_rcu(flow, rcu_head);
ac2a6666
PNA
226}
227EXPORT_SYMBOL_GPL(flow_offload_free);
228
a268de77
FF
229static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
230{
231 const struct flow_offload_tuple *tuple = data;
232
dbc859d9 233 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
a268de77
FF
234}
235
236static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
237{
238 const struct flow_offload_tuple_rhash *tuplehash = data;
239
dbc859d9 240 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
a268de77
FF
241}
242
243static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
244 const void *ptr)
245{
246 const struct flow_offload_tuple *tuple = arg->key;
247 const struct flow_offload_tuple_rhash *x = ptr;
248
dbc859d9 249 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
a268de77
FF
250 return 1;
251
252 return 0;
253}
254
255static const struct rhashtable_params nf_flow_offload_rhash_params = {
256 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
257 .hashfn = flow_offload_hash,
258 .obj_hashfn = flow_offload_hash_obj,
259 .obj_cmpfn = flow_offload_hash_cmp,
260 .automatic_shrinking = true,
261};
262
1d91d2e1
OS
263unsigned long flow_offload_get_timeout(struct flow_offload *flow)
264{
1d91d2e1
OS
265 unsigned long timeout = NF_FLOW_TIMEOUT;
266 struct net *net = nf_ct_net(flow->ct);
267 int l4num = nf_ct_protonum(flow->ct);
268
1d91d2e1
OS
269 if (l4num == IPPROTO_TCP) {
270 struct nf_tcp_net *tn = nf_tcp_pernet(net);
271
272 timeout = tn->offload_timeout;
273 } else if (l4num == IPPROTO_UDP) {
274 struct nf_udp_net *tn = nf_udp_pernet(net);
275
276 timeout = tn->offload_timeout;
277 }
278
279 return timeout;
280}
281
ac2a6666
PNA
282int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
283{
43c8f131
TY
284 int err;
285
1d91d2e1 286 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
daf61b02 287
43c8f131
TY
288 err = rhashtable_insert_fast(&flow_table->rhashtable,
289 &flow->tuplehash[0].node,
290 nf_flow_offload_rhash_params);
291 if (err < 0)
292 return err;
293
294 err = rhashtable_insert_fast(&flow_table->rhashtable,
295 &flow->tuplehash[1].node,
296 nf_flow_offload_rhash_params);
297 if (err < 0) {
298 rhashtable_remove_fast(&flow_table->rhashtable,
299 &flow->tuplehash[0].node,
300 nf_flow_offload_rhash_params);
301 return err;
302 }
ac2a6666 303
4203b19c
RD
304 nf_ct_offload_timeout(flow->ct);
305
f698fe40
PNA
306 if (nf_flowtable_hw_offload(flow_table)) {
307 __set_bit(NF_FLOW_HW, &flow->flags);
c29f74e0 308 nf_flow_offload_add(flow_table, flow);
f698fe40 309 }
c29f74e0 310
ac2a6666
PNA
311 return 0;
312}
313EXPORT_SYMBOL_GPL(flow_offload_add);
314
8b3646d6
PB
315void flow_offload_refresh(struct nf_flowtable *flow_table,
316 struct flow_offload *flow)
317{
32c3973d
PNA
318 u32 timeout;
319
320 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
8b1acf90 321 if (timeout - READ_ONCE(flow->timeout) > HZ)
32c3973d 322 WRITE_ONCE(flow->timeout, timeout);
8b1acf90
FF
323 else
324 return;
8b3646d6 325
c07531c0 326 if (likely(!nf_flowtable_hw_offload(flow_table)))
8b3646d6
PB
327 return;
328
329 nf_flow_offload_add(flow_table, flow);
330}
331EXPORT_SYMBOL_GPL(flow_offload_refresh);
332
3e68db2f
PNA
333static inline bool nf_flow_has_expired(const struct flow_offload *flow)
334{
1e5b2471 335 return nf_flow_timeout_delta(flow->timeout) <= 0;
3e68db2f
PNA
336}
337
0ff90b6c
FF
338static void flow_offload_del(struct nf_flowtable *flow_table,
339 struct flow_offload *flow)
ac2a6666 340{
ac2a6666
PNA
341 rhashtable_remove_fast(&flow_table->rhashtable,
342 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
a268de77 343 nf_flow_offload_rhash_params);
ac2a6666
PNA
344 rhashtable_remove_fast(&flow_table->rhashtable,
345 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
a268de77 346 nf_flow_offload_rhash_params);
0ff90b6c 347 flow_offload_free(flow);
ac2a6666 348}
ac2a6666 349
59c466dd
FF
350void flow_offload_teardown(struct flow_offload *flow)
351{
01e0ad85 352 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
355a8b13 353 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
01e0ad85 354 flow_offload_fixup_ct(flow->ct);
59c466dd
FF
355}
356EXPORT_SYMBOL_GPL(flow_offload_teardown);
357
ac2a6666
PNA
358struct flow_offload_tuple_rhash *
359flow_offload_lookup(struct nf_flowtable *flow_table,
360 struct flow_offload_tuple *tuple)
361{
ba03137f
FF
362 struct flow_offload_tuple_rhash *tuplehash;
363 struct flow_offload *flow;
364 int dir;
365
a2d88182
TY
366 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
367 nf_flow_offload_rhash_params);
ba03137f
FF
368 if (!tuplehash)
369 return NULL;
370
371 dir = tuplehash->tuple.dir;
372 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
355a8b13 373 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
ba03137f
FF
374 return NULL;
375
b32d2f34 376 if (unlikely(nf_ct_is_dying(flow->ct)))
8cd2bc98
TY
377 return NULL;
378
ba03137f 379 return tuplehash;
ac2a6666
PNA
380}
381EXPORT_SYMBOL_GPL(flow_offload_lookup);
382
49de9c09
TY
383static int
384nf_flow_table_iterate(struct nf_flowtable *flow_table,
8e8a4ec2
PNA
385 void (*iter)(struct nf_flowtable *flowtable,
386 struct flow_offload *flow, void *data),
49de9c09 387 void *data)
ac2a6666
PNA
388{
389 struct flow_offload_tuple_rhash *tuplehash;
390 struct rhashtable_iter hti;
391 struct flow_offload *flow;
0de22baa 392 int err = 0;
ac2a6666 393
0de22baa 394 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
ac2a6666
PNA
395 rhashtable_walk_start(&hti);
396
397 while ((tuplehash = rhashtable_walk_next(&hti))) {
398 if (IS_ERR(tuplehash)) {
0de22baa
TY
399 if (PTR_ERR(tuplehash) != -EAGAIN) {
400 err = PTR_ERR(tuplehash);
401 break;
402 }
ac2a6666
PNA
403 continue;
404 }
405 if (tuplehash->tuple.dir)
406 continue;
407
408 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
409
8e8a4ec2 410 iter(flow_table, flow, data);
ac2a6666 411 }
ac2a6666
PNA
412 rhashtable_walk_stop(&hti);
413 rhashtable_walk_exit(&hti);
414
415 return err;
416}
ac2a6666 417
8b9229d1
PNA
418static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
419{
420 struct dst_entry *dst;
421
422 if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
423 tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
424 dst = tuple->dst_cache;
425 if (!dst_check(dst, tuple->dst_cookie))
426 return true;
427 }
428
429 return false;
430}
431
432static bool nf_flow_has_stale_dst(struct flow_offload *flow)
433{
434 return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
435 flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
436}
437
8e8a4ec2
PNA
438static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
439 struct flow_offload *flow, void *data)
ac2a6666 440{
8b9229d1
PNA
441 if (nf_flow_has_expired(flow) ||
442 nf_ct_is_dying(flow->ct) ||
443 nf_flow_has_stale_dst(flow))
01e0ad85 444 flow_offload_teardown(flow);
9ed81c8e
PNA
445
446 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
355a8b13
PNA
447 if (test_bit(NF_FLOW_HW, &flow->flags)) {
448 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
c29f74e0 449 nf_flow_offload_del(flow_table, flow);
355a8b13 450 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
c29f74e0
PNA
451 flow_offload_del(flow_table, flow);
452 } else {
453 flow_offload_del(flow_table, flow);
454 }
355a8b13 455 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
79b9b685 456 nf_flow_offload_stats(flow_table, flow);
c29f74e0 457 }
b408c5b0
PNA
458}
459
a268de77 460static void nf_flow_offload_work_gc(struct work_struct *work)
b408c5b0
PNA
461{
462 struct nf_flowtable *flow_table;
463
464 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
8e8a4ec2 465 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
ac2a6666
PNA
466 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
467}
ac2a6666 468
f4401262
PNA
469static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
470 __be16 port, __be16 new_port)
ac2a6666
PNA
471{
472 struct tcphdr *tcph;
473
ac2a6666 474 tcph = (void *)(skb_network_header(skb) + thoff);
8d6bca15 475 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
ac2a6666
PNA
476}
477
f4401262
PNA
478static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
479 __be16 port, __be16 new_port)
ac2a6666
PNA
480{
481 struct udphdr *udph;
482
ac2a6666
PNA
483 udph = (void *)(skb_network_header(skb) + thoff);
484 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
485 inet_proto_csum_replace2(&udph->check, skb, port,
8d6bca15 486 new_port, false);
ac2a6666
PNA
487 if (!udph->check)
488 udph->check = CSUM_MANGLED_0;
489 }
ac2a6666
PNA
490}
491
f4401262
PNA
492static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
493 u8 protocol, __be16 port, __be16 new_port)
ac2a6666
PNA
494{
495 switch (protocol) {
496 case IPPROTO_TCP:
f4401262 497 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
ac2a6666
PNA
498 break;
499 case IPPROTO_UDP:
f4401262 500 nf_flow_nat_port_udp(skb, thoff, port, new_port);
ac2a6666
PNA
501 break;
502 }
ac2a6666
PNA
503}
504
f4401262
PNA
505void nf_flow_snat_port(const struct flow_offload *flow,
506 struct sk_buff *skb, unsigned int thoff,
507 u8 protocol, enum flow_offload_tuple_dir dir)
ac2a6666
PNA
508{
509 struct flow_ports *hdr;
510 __be16 port, new_port;
511
ac2a6666
PNA
512 hdr = (void *)(skb_network_header(skb) + thoff);
513
514 switch (dir) {
515 case FLOW_OFFLOAD_DIR_ORIGINAL:
516 port = hdr->source;
517 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
518 hdr->source = new_port;
519 break;
520 case FLOW_OFFLOAD_DIR_REPLY:
521 port = hdr->dest;
522 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
523 hdr->dest = new_port;
524 break;
ac2a6666
PNA
525 }
526
f4401262 527 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
ac2a6666
PNA
528}
529EXPORT_SYMBOL_GPL(nf_flow_snat_port);
530
f4401262
PNA
531void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
532 unsigned int thoff, u8 protocol,
533 enum flow_offload_tuple_dir dir)
ac2a6666
PNA
534{
535 struct flow_ports *hdr;
536 __be16 port, new_port;
537
ac2a6666
PNA
538 hdr = (void *)(skb_network_header(skb) + thoff);
539
540 switch (dir) {
541 case FLOW_OFFLOAD_DIR_ORIGINAL:
542 port = hdr->dest;
543 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
544 hdr->dest = new_port;
545 break;
546 case FLOW_OFFLOAD_DIR_REPLY:
547 port = hdr->source;
548 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
549 hdr->source = new_port;
550 break;
ac2a6666
PNA
551 }
552
f4401262 553 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
ac2a6666
PNA
554}
555EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
556
a268de77
FF
557int nf_flow_table_init(struct nf_flowtable *flowtable)
558{
559 int err;
560
740b486a 561 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
c29f74e0 562 flow_block_init(&flowtable->flow_block);
422c032a 563 init_rwsem(&flowtable->flow_block_lock);
a268de77
FF
564
565 err = rhashtable_init(&flowtable->rhashtable,
566 &nf_flow_offload_rhash_params);
567 if (err < 0)
568 return err;
569
570 queue_delayed_work(system_power_efficient_wq,
571 &flowtable->gc_work, HZ);
572
84453a90
FF
573 mutex_lock(&flowtable_lock);
574 list_add(&flowtable->list, &flowtables);
575 mutex_unlock(&flowtable_lock);
576
a268de77
FF
577 return 0;
578}
579EXPORT_SYMBOL_GPL(nf_flow_table_init);
580
8e8a4ec2
PNA
581static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
582 struct flow_offload *flow, void *data)
c0ea1bcb
PNA
583{
584 struct net_device *dev = data;
585
59c466dd
FF
586 if (!dev) {
587 flow_offload_teardown(flow);
c0ea1bcb 588 return;
59c466dd 589 }
b32d2f34
PNA
590
591 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
a3fb3698
TY
592 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
593 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
445db8d0 594 flow_offload_teardown(flow);
c0ea1bcb
PNA
595}
596
a8284c68
PNA
597void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
598 struct net_device *dev)
c0ea1bcb 599{
84453a90 600 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
c0ea1bcb 601 flush_delayed_work(&flowtable->gc_work);
91bfaa15 602 nf_flow_table_offload_flush(flowtable);
c0ea1bcb
PNA
603}
604
5f1be84a 605void nf_flow_table_cleanup(struct net_device *dev)
c0ea1bcb 606{
84453a90
FF
607 struct nf_flowtable *flowtable;
608
609 mutex_lock(&flowtable_lock);
610 list_for_each_entry(flowtable, &flowtables, list)
a8284c68 611 nf_flow_table_gc_cleanup(flowtable, dev);
84453a90 612 mutex_unlock(&flowtable_lock);
c0ea1bcb
PNA
613}
614EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
615
b408c5b0
PNA
616void nf_flow_table_free(struct nf_flowtable *flow_table)
617{
84453a90
FF
618 mutex_lock(&flowtable_lock);
619 list_del(&flow_table->list);
620 mutex_unlock(&flowtable_lock);
978703f4 621
a268de77 622 cancel_delayed_work_sync(&flow_table->gc_work);
b408c5b0 623 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
8e8a4ec2 624 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
0f34f30a 625 nf_flow_table_offload_flush(flow_table);
c921ffe8 626 if (nf_flowtable_hw_offload(flow_table))
8e8a4ec2 627 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
a268de77 628 rhashtable_destroy(&flow_table->rhashtable);
b408c5b0
PNA
629}
630EXPORT_SYMBOL_GPL(nf_flow_table_free);
631
c29f74e0
PNA
632static int __init nf_flow_table_module_init(void)
633{
634 return nf_flow_table_offload_init();
635}
636
637static void __exit nf_flow_table_module_exit(void)
638{
639 nf_flow_table_offload_exit();
640}
641
642module_init(nf_flow_table_module_init);
643module_exit(nf_flow_table_module_exit);
644
ac2a6666
PNA
645MODULE_LICENSE("GPL");
646MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
4cacc395 647MODULE_DESCRIPTION("Netfilter flow table module");