1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_tuple.h>
16 struct flow_offload_entry
{
17 struct flow_offload flow
;
19 struct rcu_head rcu_head
;
22 static DEFINE_MUTEX(flowtable_lock
);
23 static LIST_HEAD(flowtables
);
26 flow_offload_fill_dir(struct flow_offload
*flow
, struct nf_conn
*ct
,
27 struct nf_flow_route
*route
,
28 enum flow_offload_tuple_dir dir
)
30 struct flow_offload_tuple
*ft
= &flow
->tuplehash
[dir
].tuple
;
31 struct nf_conntrack_tuple
*ctt
= &ct
->tuplehash
[dir
].tuple
;
32 struct dst_entry
*other_dst
= route
->tuple
[!dir
].dst
;
33 struct dst_entry
*dst
= route
->tuple
[dir
].dst
;
37 switch (ctt
->src
.l3num
) {
39 ft
->src_v4
= ctt
->src
.u3
.in
;
40 ft
->dst_v4
= ctt
->dst
.u3
.in
;
41 ft
->mtu
= ip_dst_mtu_maybe_forward(dst
, true);
44 ft
->src_v6
= ctt
->src
.u3
.in6
;
45 ft
->dst_v6
= ctt
->dst
.u3
.in6
;
46 ft
->mtu
= ip6_dst_mtu_forward(dst
);
50 ft
->l3proto
= ctt
->src
.l3num
;
51 ft
->l4proto
= ctt
->dst
.protonum
;
52 ft
->src_port
= ctt
->src
.u
.tcp
.port
;
53 ft
->dst_port
= ctt
->dst
.u
.tcp
.port
;
55 ft
->iifidx
= other_dst
->dev
->ifindex
;
60 flow_offload_alloc(struct nf_conn
*ct
, struct nf_flow_route
*route
)
62 struct flow_offload_entry
*entry
;
63 struct flow_offload
*flow
;
65 if (unlikely(nf_ct_is_dying(ct
) ||
66 !atomic_inc_not_zero(&ct
->ct_general
.use
)))
69 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
75 if (!dst_hold_safe(route
->tuple
[FLOW_OFFLOAD_DIR_ORIGINAL
].dst
))
76 goto err_dst_cache_original
;
78 if (!dst_hold_safe(route
->tuple
[FLOW_OFFLOAD_DIR_REPLY
].dst
))
79 goto err_dst_cache_reply
;
83 flow_offload_fill_dir(flow
, ct
, route
, FLOW_OFFLOAD_DIR_ORIGINAL
);
84 flow_offload_fill_dir(flow
, ct
, route
, FLOW_OFFLOAD_DIR_REPLY
);
86 if (ct
->status
& IPS_SRC_NAT
)
87 flow
->flags
|= FLOW_OFFLOAD_SNAT
;
88 if (ct
->status
& IPS_DST_NAT
)
89 flow
->flags
|= FLOW_OFFLOAD_DNAT
;
94 dst_release(route
->tuple
[FLOW_OFFLOAD_DIR_ORIGINAL
].dst
);
95 err_dst_cache_original
:
102 EXPORT_SYMBOL_GPL(flow_offload_alloc
);
104 static void flow_offload_fixup_tcp(struct ip_ct_tcp
*tcp
)
106 tcp
->state
= TCP_CONNTRACK_ESTABLISHED
;
107 tcp
->seen
[0].td_maxwin
= 0;
108 tcp
->seen
[1].td_maxwin
= 0;
111 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
114 static inline __s32
nf_flow_timeout_delta(unsigned int timeout
)
116 return (__s32
)(timeout
- (u32
)jiffies
);
119 static void flow_offload_fixup_ct_timeout(struct nf_conn
*ct
)
121 const struct nf_conntrack_l4proto
*l4proto
;
122 int l4num
= nf_ct_protonum(ct
);
123 unsigned int timeout
;
125 l4proto
= nf_ct_l4proto_find(l4num
);
129 if (l4num
== IPPROTO_TCP
)
130 timeout
= NF_FLOWTABLE_TCP_PICKUP_TIMEOUT
;
131 else if (l4num
== IPPROTO_UDP
)
132 timeout
= NF_FLOWTABLE_UDP_PICKUP_TIMEOUT
;
136 if (nf_flow_timeout_delta(ct
->timeout
) > (__s32
)timeout
)
137 ct
->timeout
= nfct_time_stamp
+ timeout
;
140 static void flow_offload_fixup_ct_state(struct nf_conn
*ct
)
142 if (nf_ct_protonum(ct
) == IPPROTO_TCP
)
143 flow_offload_fixup_tcp(&ct
->proto
.tcp
);
146 static void flow_offload_fixup_ct(struct nf_conn
*ct
)
148 flow_offload_fixup_ct_state(ct
);
149 flow_offload_fixup_ct_timeout(ct
);
152 void flow_offload_free(struct flow_offload
*flow
)
154 struct flow_offload_entry
*e
;
156 dst_release(flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
.dst_cache
);
157 dst_release(flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
.dst_cache
);
158 e
= container_of(flow
, struct flow_offload_entry
, flow
);
159 if (flow
->flags
& FLOW_OFFLOAD_DYING
)
160 nf_ct_delete(e
->ct
, 0, 0);
162 kfree_rcu(e
, rcu_head
);
164 EXPORT_SYMBOL_GPL(flow_offload_free
);
166 static u32
flow_offload_hash(const void *data
, u32 len
, u32 seed
)
168 const struct flow_offload_tuple
*tuple
= data
;
170 return jhash(tuple
, offsetof(struct flow_offload_tuple
, dir
), seed
);
173 static u32
flow_offload_hash_obj(const void *data
, u32 len
, u32 seed
)
175 const struct flow_offload_tuple_rhash
*tuplehash
= data
;
177 return jhash(&tuplehash
->tuple
, offsetof(struct flow_offload_tuple
, dir
), seed
);
180 static int flow_offload_hash_cmp(struct rhashtable_compare_arg
*arg
,
183 const struct flow_offload_tuple
*tuple
= arg
->key
;
184 const struct flow_offload_tuple_rhash
*x
= ptr
;
186 if (memcmp(&x
->tuple
, tuple
, offsetof(struct flow_offload_tuple
, dir
)))
192 static const struct rhashtable_params nf_flow_offload_rhash_params
= {
193 .head_offset
= offsetof(struct flow_offload_tuple_rhash
, node
),
194 .hashfn
= flow_offload_hash
,
195 .obj_hashfn
= flow_offload_hash_obj
,
196 .obj_cmpfn
= flow_offload_hash_cmp
,
197 .automatic_shrinking
= true,
200 int flow_offload_add(struct nf_flowtable
*flow_table
, struct flow_offload
*flow
)
204 err
= rhashtable_insert_fast(&flow_table
->rhashtable
,
205 &flow
->tuplehash
[0].node
,
206 nf_flow_offload_rhash_params
);
210 err
= rhashtable_insert_fast(&flow_table
->rhashtable
,
211 &flow
->tuplehash
[1].node
,
212 nf_flow_offload_rhash_params
);
214 rhashtable_remove_fast(&flow_table
->rhashtable
,
215 &flow
->tuplehash
[0].node
,
216 nf_flow_offload_rhash_params
);
220 flow
->timeout
= (u32
)jiffies
+ NF_FLOW_TIMEOUT
;
223 EXPORT_SYMBOL_GPL(flow_offload_add
);
225 static inline bool nf_flow_has_expired(const struct flow_offload
*flow
)
227 return nf_flow_timeout_delta(flow
->timeout
) <= 0;
230 static void flow_offload_del(struct nf_flowtable
*flow_table
,
231 struct flow_offload
*flow
)
233 struct flow_offload_entry
*e
;
235 rhashtable_remove_fast(&flow_table
->rhashtable
,
236 &flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].node
,
237 nf_flow_offload_rhash_params
);
238 rhashtable_remove_fast(&flow_table
->rhashtable
,
239 &flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].node
,
240 nf_flow_offload_rhash_params
);
242 e
= container_of(flow
, struct flow_offload_entry
, flow
);
243 clear_bit(IPS_OFFLOAD_BIT
, &e
->ct
->status
);
245 if (nf_flow_has_expired(flow
))
246 flow_offload_fixup_ct(e
->ct
);
247 else if (flow
->flags
& FLOW_OFFLOAD_TEARDOWN
)
248 flow_offload_fixup_ct_timeout(e
->ct
);
250 flow_offload_free(flow
);
253 void flow_offload_teardown(struct flow_offload
*flow
)
255 struct flow_offload_entry
*e
;
257 flow
->flags
|= FLOW_OFFLOAD_TEARDOWN
;
259 e
= container_of(flow
, struct flow_offload_entry
, flow
);
260 flow_offload_fixup_ct_state(e
->ct
);
262 EXPORT_SYMBOL_GPL(flow_offload_teardown
);
264 struct flow_offload_tuple_rhash
*
265 flow_offload_lookup(struct nf_flowtable
*flow_table
,
266 struct flow_offload_tuple
*tuple
)
268 struct flow_offload_tuple_rhash
*tuplehash
;
269 struct flow_offload
*flow
;
270 struct flow_offload_entry
*e
;
273 tuplehash
= rhashtable_lookup(&flow_table
->rhashtable
, tuple
,
274 nf_flow_offload_rhash_params
);
278 dir
= tuplehash
->tuple
.dir
;
279 flow
= container_of(tuplehash
, struct flow_offload
, tuplehash
[dir
]);
280 if (flow
->flags
& (FLOW_OFFLOAD_DYING
| FLOW_OFFLOAD_TEARDOWN
))
283 e
= container_of(flow
, struct flow_offload_entry
, flow
);
284 if (unlikely(nf_ct_is_dying(e
->ct
)))
289 EXPORT_SYMBOL_GPL(flow_offload_lookup
);
292 nf_flow_table_iterate(struct nf_flowtable
*flow_table
,
293 void (*iter
)(struct flow_offload
*flow
, void *data
),
296 struct flow_offload_tuple_rhash
*tuplehash
;
297 struct rhashtable_iter hti
;
298 struct flow_offload
*flow
;
301 rhashtable_walk_enter(&flow_table
->rhashtable
, &hti
);
302 rhashtable_walk_start(&hti
);
304 while ((tuplehash
= rhashtable_walk_next(&hti
))) {
305 if (IS_ERR(tuplehash
)) {
306 if (PTR_ERR(tuplehash
) != -EAGAIN
) {
307 err
= PTR_ERR(tuplehash
);
312 if (tuplehash
->tuple
.dir
)
315 flow
= container_of(tuplehash
, struct flow_offload
, tuplehash
[0]);
319 rhashtable_walk_stop(&hti
);
320 rhashtable_walk_exit(&hti
);
325 static void nf_flow_offload_gc_step(struct flow_offload
*flow
, void *data
)
327 struct nf_flowtable
*flow_table
= data
;
328 struct flow_offload_entry
*e
;
330 e
= container_of(flow
, struct flow_offload_entry
, flow
);
331 if (nf_flow_has_expired(flow
) || nf_ct_is_dying(e
->ct
) ||
332 (flow
->flags
& (FLOW_OFFLOAD_DYING
| FLOW_OFFLOAD_TEARDOWN
)))
333 flow_offload_del(flow_table
, flow
);
336 static void nf_flow_offload_work_gc(struct work_struct
*work
)
338 struct nf_flowtable
*flow_table
;
340 flow_table
= container_of(work
, struct nf_flowtable
, gc_work
.work
);
341 nf_flow_table_iterate(flow_table
, nf_flow_offload_gc_step
, flow_table
);
342 queue_delayed_work(system_power_efficient_wq
, &flow_table
->gc_work
, HZ
);
345 static int nf_flow_nat_port_tcp(struct sk_buff
*skb
, unsigned int thoff
,
346 __be16 port
, __be16 new_port
)
350 if (!pskb_may_pull(skb
, thoff
+ sizeof(*tcph
)) ||
351 skb_try_make_writable(skb
, thoff
+ sizeof(*tcph
)))
354 tcph
= (void *)(skb_network_header(skb
) + thoff
);
355 inet_proto_csum_replace2(&tcph
->check
, skb
, port
, new_port
, true);
360 static int nf_flow_nat_port_udp(struct sk_buff
*skb
, unsigned int thoff
,
361 __be16 port
, __be16 new_port
)
365 if (!pskb_may_pull(skb
, thoff
+ sizeof(*udph
)) ||
366 skb_try_make_writable(skb
, thoff
+ sizeof(*udph
)))
369 udph
= (void *)(skb_network_header(skb
) + thoff
);
370 if (udph
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
371 inet_proto_csum_replace2(&udph
->check
, skb
, port
,
374 udph
->check
= CSUM_MANGLED_0
;
380 static int nf_flow_nat_port(struct sk_buff
*skb
, unsigned int thoff
,
381 u8 protocol
, __be16 port
, __be16 new_port
)
385 if (nf_flow_nat_port_tcp(skb
, thoff
, port
, new_port
) < 0)
389 if (nf_flow_nat_port_udp(skb
, thoff
, port
, new_port
) < 0)
397 int nf_flow_snat_port(const struct flow_offload
*flow
,
398 struct sk_buff
*skb
, unsigned int thoff
,
399 u8 protocol
, enum flow_offload_tuple_dir dir
)
401 struct flow_ports
*hdr
;
402 __be16 port
, new_port
;
404 if (!pskb_may_pull(skb
, thoff
+ sizeof(*hdr
)) ||
405 skb_try_make_writable(skb
, thoff
+ sizeof(*hdr
)))
408 hdr
= (void *)(skb_network_header(skb
) + thoff
);
411 case FLOW_OFFLOAD_DIR_ORIGINAL
:
413 new_port
= flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
.dst_port
;
414 hdr
->source
= new_port
;
416 case FLOW_OFFLOAD_DIR_REPLY
:
418 new_port
= flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
.src_port
;
419 hdr
->dest
= new_port
;
425 return nf_flow_nat_port(skb
, thoff
, protocol
, port
, new_port
);
427 EXPORT_SYMBOL_GPL(nf_flow_snat_port
);
429 int nf_flow_dnat_port(const struct flow_offload
*flow
,
430 struct sk_buff
*skb
, unsigned int thoff
,
431 u8 protocol
, enum flow_offload_tuple_dir dir
)
433 struct flow_ports
*hdr
;
434 __be16 port
, new_port
;
436 if (!pskb_may_pull(skb
, thoff
+ sizeof(*hdr
)) ||
437 skb_try_make_writable(skb
, thoff
+ sizeof(*hdr
)))
440 hdr
= (void *)(skb_network_header(skb
) + thoff
);
443 case FLOW_OFFLOAD_DIR_ORIGINAL
:
445 new_port
= flow
->tuplehash
[FLOW_OFFLOAD_DIR_REPLY
].tuple
.src_port
;
446 hdr
->dest
= new_port
;
448 case FLOW_OFFLOAD_DIR_REPLY
:
450 new_port
= flow
->tuplehash
[FLOW_OFFLOAD_DIR_ORIGINAL
].tuple
.dst_port
;
451 hdr
->source
= new_port
;
457 return nf_flow_nat_port(skb
, thoff
, protocol
, port
, new_port
);
459 EXPORT_SYMBOL_GPL(nf_flow_dnat_port
);
461 int nf_flow_table_init(struct nf_flowtable
*flowtable
)
465 INIT_DEFERRABLE_WORK(&flowtable
->gc_work
, nf_flow_offload_work_gc
);
467 err
= rhashtable_init(&flowtable
->rhashtable
,
468 &nf_flow_offload_rhash_params
);
472 queue_delayed_work(system_power_efficient_wq
,
473 &flowtable
->gc_work
, HZ
);
475 mutex_lock(&flowtable_lock
);
476 list_add(&flowtable
->list
, &flowtables
);
477 mutex_unlock(&flowtable_lock
);
481 EXPORT_SYMBOL_GPL(nf_flow_table_init
);
483 static void nf_flow_table_do_cleanup(struct flow_offload
*flow
, void *data
)
485 struct net_device
*dev
= data
;
486 struct flow_offload_entry
*e
;
488 e
= container_of(flow
, struct flow_offload_entry
, flow
);
491 flow_offload_teardown(flow
);
494 if (net_eq(nf_ct_net(e
->ct
), dev_net(dev
)) &&
495 (flow
->tuplehash
[0].tuple
.iifidx
== dev
->ifindex
||
496 flow
->tuplehash
[1].tuple
.iifidx
== dev
->ifindex
))
497 flow_offload_dead(flow
);
500 static void nf_flow_table_iterate_cleanup(struct nf_flowtable
*flowtable
,
501 struct net_device
*dev
)
503 nf_flow_table_iterate(flowtable
, nf_flow_table_do_cleanup
, dev
);
504 flush_delayed_work(&flowtable
->gc_work
);
507 void nf_flow_table_cleanup(struct net_device
*dev
)
509 struct nf_flowtable
*flowtable
;
511 mutex_lock(&flowtable_lock
);
512 list_for_each_entry(flowtable
, &flowtables
, list
)
513 nf_flow_table_iterate_cleanup(flowtable
, dev
);
514 mutex_unlock(&flowtable_lock
);
516 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup
);
518 void nf_flow_table_free(struct nf_flowtable
*flow_table
)
520 mutex_lock(&flowtable_lock
);
521 list_del(&flow_table
->list
);
522 mutex_unlock(&flowtable_lock
);
523 cancel_delayed_work_sync(&flow_table
->gc_work
);
524 nf_flow_table_iterate(flow_table
, nf_flow_table_do_cleanup
, NULL
);
525 nf_flow_table_iterate(flow_table
, nf_flow_offload_gc_step
, flow_table
);
526 rhashtable_destroy(&flow_table
->rhashtable
);
528 EXPORT_SYMBOL_GPL(nf_flow_table_free
);
530 MODULE_LICENSE("GPL");
531 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");