]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/netfilter/nf_flow_table_ip.c
netfilter: flowtable: use dev_fill_forward_path() to obtain egress device
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nf_flow_table_ip.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <net/ip.h>
11 #include <net/ipv6.h>
12 #include <net/ip6_route.h>
13 #include <net/neighbour.h>
14 #include <net/netfilter/nf_flow_table.h>
15 #include <net/netfilter/nf_conntrack_acct.h>
16 /* For layer 4 checksum field offset. */
17 #include <linux/tcp.h>
18 #include <linux/udp.h>
19
20 static int nf_flow_state_check(struct flow_offload *flow, int proto,
21 struct sk_buff *skb, unsigned int thoff)
22 {
23 struct tcphdr *tcph;
24
25 if (proto != IPPROTO_TCP)
26 return 0;
27
28 tcph = (void *)(skb_network_header(skb) + thoff);
29 if (unlikely(tcph->fin || tcph->rst)) {
30 flow_offload_teardown(flow);
31 return -1;
32 }
33
34 return 0;
35 }
36
37 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
38 __be32 addr, __be32 new_addr)
39 {
40 struct tcphdr *tcph;
41
42 tcph = (void *)(skb_network_header(skb) + thoff);
43 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
44 }
45
46 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
47 __be32 addr, __be32 new_addr)
48 {
49 struct udphdr *udph;
50
51 udph = (void *)(skb_network_header(skb) + thoff);
52 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
53 inet_proto_csum_replace4(&udph->check, skb, addr,
54 new_addr, true);
55 if (!udph->check)
56 udph->check = CSUM_MANGLED_0;
57 }
58 }
59
60 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
61 unsigned int thoff, __be32 addr,
62 __be32 new_addr)
63 {
64 switch (iph->protocol) {
65 case IPPROTO_TCP:
66 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
67 break;
68 case IPPROTO_UDP:
69 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
70 break;
71 }
72 }
73
74 static void nf_flow_snat_ip(const struct flow_offload *flow,
75 struct sk_buff *skb, struct iphdr *iph,
76 unsigned int thoff, enum flow_offload_tuple_dir dir)
77 {
78 __be32 addr, new_addr;
79
80 switch (dir) {
81 case FLOW_OFFLOAD_DIR_ORIGINAL:
82 addr = iph->saddr;
83 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
84 iph->saddr = new_addr;
85 break;
86 case FLOW_OFFLOAD_DIR_REPLY:
87 addr = iph->daddr;
88 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
89 iph->daddr = new_addr;
90 break;
91 }
92 csum_replace4(&iph->check, addr, new_addr);
93
94 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
95 }
96
97 static void nf_flow_dnat_ip(const struct flow_offload *flow,
98 struct sk_buff *skb, struct iphdr *iph,
99 unsigned int thoff, enum flow_offload_tuple_dir dir)
100 {
101 __be32 addr, new_addr;
102
103 switch (dir) {
104 case FLOW_OFFLOAD_DIR_ORIGINAL:
105 addr = iph->daddr;
106 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
107 iph->daddr = new_addr;
108 break;
109 case FLOW_OFFLOAD_DIR_REPLY:
110 addr = iph->saddr;
111 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
112 iph->saddr = new_addr;
113 break;
114 }
115 csum_replace4(&iph->check, addr, new_addr);
116
117 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
118 }
119
120 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
121 unsigned int thoff, enum flow_offload_tuple_dir dir,
122 struct iphdr *iph)
123 {
124 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
125 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
126 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
127 }
128 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
129 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
130 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
131 }
132 }
133
134 static bool ip_has_options(unsigned int thoff)
135 {
136 return thoff != sizeof(struct iphdr);
137 }
138
139 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
140 struct flow_offload_tuple *tuple, u32 *hdrsize)
141 {
142 struct flow_ports *ports;
143 unsigned int thoff;
144 struct iphdr *iph;
145
146 if (!pskb_may_pull(skb, sizeof(*iph)))
147 return -1;
148
149 iph = ip_hdr(skb);
150 thoff = iph->ihl * 4;
151
152 if (ip_is_fragment(iph) ||
153 unlikely(ip_has_options(thoff)))
154 return -1;
155
156 switch (iph->protocol) {
157 case IPPROTO_TCP:
158 *hdrsize = sizeof(struct tcphdr);
159 break;
160 case IPPROTO_UDP:
161 *hdrsize = sizeof(struct udphdr);
162 break;
163 default:
164 return -1;
165 }
166
167 if (iph->ttl <= 1)
168 return -1;
169
170 thoff = iph->ihl * 4;
171 if (!pskb_may_pull(skb, thoff + *hdrsize))
172 return -1;
173
174 iph = ip_hdr(skb);
175 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
176
177 tuple->src_v4.s_addr = iph->saddr;
178 tuple->dst_v4.s_addr = iph->daddr;
179 tuple->src_port = ports->source;
180 tuple->dst_port = ports->dest;
181 tuple->l3proto = AF_INET;
182 tuple->l4proto = iph->protocol;
183 tuple->iifidx = dev->ifindex;
184
185 return 0;
186 }
187
188 /* Based on ip_exceeds_mtu(). */
189 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
190 {
191 if (skb->len <= mtu)
192 return false;
193
194 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
195 return false;
196
197 return true;
198 }
199
200 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
201 const struct nf_hook_state *state,
202 struct dst_entry *dst)
203 {
204 skb_orphan(skb);
205 skb_dst_set_noref(skb, dst);
206 dst_output(state->net, state->sk, skb);
207 return NF_STOLEN;
208 }
209
210 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
211 const struct flow_offload_tuple_rhash *tuplehash,
212 unsigned short type)
213 {
214 struct net_device *outdev;
215
216 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
217 if (!outdev)
218 return NF_DROP;
219
220 skb->dev = outdev;
221 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
222 tuplehash->tuple.out.h_source, skb->len);
223 dev_queue_xmit(skb);
224
225 return NF_STOLEN;
226 }
227
228 unsigned int
229 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
230 const struct nf_hook_state *state)
231 {
232 struct flow_offload_tuple_rhash *tuplehash;
233 struct nf_flowtable *flow_table = priv;
234 struct flow_offload_tuple tuple = {};
235 enum flow_offload_tuple_dir dir;
236 struct flow_offload *flow;
237 struct net_device *outdev;
238 struct rtable *rt;
239 unsigned int thoff;
240 struct iphdr *iph;
241 __be32 nexthop;
242 u32 hdrsize;
243 int ret;
244
245 if (skb->protocol != htons(ETH_P_IP))
246 return NF_ACCEPT;
247
248 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize) < 0)
249 return NF_ACCEPT;
250
251 tuplehash = flow_offload_lookup(flow_table, &tuple);
252 if (tuplehash == NULL)
253 return NF_ACCEPT;
254
255 dir = tuplehash->tuple.dir;
256 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
257
258 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
259 return NF_ACCEPT;
260
261 iph = ip_hdr(skb);
262 thoff = iph->ihl * 4;
263 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
264 return NF_ACCEPT;
265
266 if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
267 tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
268 rt = (struct rtable *)tuplehash->tuple.dst_cache;
269 if (!dst_check(&rt->dst, 0)) {
270 flow_offload_teardown(flow);
271 return NF_ACCEPT;
272 }
273 }
274
275 if (skb_try_make_writable(skb, thoff + hdrsize))
276 return NF_DROP;
277
278 flow_offload_refresh(flow_table, flow);
279
280 iph = ip_hdr(skb);
281 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
282
283 ip_decrease_ttl(iph);
284 skb->tstamp = 0;
285
286 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
287 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
288
289 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
290 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
291 IPCB(skb)->iif = skb->dev->ifindex;
292 IPCB(skb)->flags = IPSKB_FORWARDED;
293 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
294 }
295
296 switch (tuplehash->tuple.xmit_type) {
297 case FLOW_OFFLOAD_XMIT_NEIGH:
298 outdev = rt->dst.dev;
299 skb->dev = outdev;
300 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
301 skb_dst_set_noref(skb, &rt->dst);
302 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
303 ret = NF_STOLEN;
304 break;
305 case FLOW_OFFLOAD_XMIT_DIRECT:
306 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
307 if (ret == NF_DROP)
308 flow_offload_teardown(flow);
309 break;
310 }
311
312 return ret;
313 }
314 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
315
316 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
317 struct in6_addr *addr,
318 struct in6_addr *new_addr,
319 struct ipv6hdr *ip6h)
320 {
321 struct tcphdr *tcph;
322
323 tcph = (void *)(skb_network_header(skb) + thoff);
324 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
325 new_addr->s6_addr32, true);
326 }
327
328 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
329 struct in6_addr *addr,
330 struct in6_addr *new_addr)
331 {
332 struct udphdr *udph;
333
334 udph = (void *)(skb_network_header(skb) + thoff);
335 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
336 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
337 new_addr->s6_addr32, true);
338 if (!udph->check)
339 udph->check = CSUM_MANGLED_0;
340 }
341 }
342
343 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
344 unsigned int thoff, struct in6_addr *addr,
345 struct in6_addr *new_addr)
346 {
347 switch (ip6h->nexthdr) {
348 case IPPROTO_TCP:
349 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
350 break;
351 case IPPROTO_UDP:
352 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
353 break;
354 }
355 }
356
357 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
358 struct sk_buff *skb, struct ipv6hdr *ip6h,
359 unsigned int thoff,
360 enum flow_offload_tuple_dir dir)
361 {
362 struct in6_addr addr, new_addr;
363
364 switch (dir) {
365 case FLOW_OFFLOAD_DIR_ORIGINAL:
366 addr = ip6h->saddr;
367 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
368 ip6h->saddr = new_addr;
369 break;
370 case FLOW_OFFLOAD_DIR_REPLY:
371 addr = ip6h->daddr;
372 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
373 ip6h->daddr = new_addr;
374 break;
375 }
376
377 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
378 }
379
380 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
381 struct sk_buff *skb, struct ipv6hdr *ip6h,
382 unsigned int thoff,
383 enum flow_offload_tuple_dir dir)
384 {
385 struct in6_addr addr, new_addr;
386
387 switch (dir) {
388 case FLOW_OFFLOAD_DIR_ORIGINAL:
389 addr = ip6h->daddr;
390 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
391 ip6h->daddr = new_addr;
392 break;
393 case FLOW_OFFLOAD_DIR_REPLY:
394 addr = ip6h->saddr;
395 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
396 ip6h->saddr = new_addr;
397 break;
398 }
399
400 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
401 }
402
403 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
404 struct sk_buff *skb,
405 enum flow_offload_tuple_dir dir,
406 struct ipv6hdr *ip6h)
407 {
408 unsigned int thoff = sizeof(*ip6h);
409
410 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
411 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
412 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
413 }
414 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
415 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
416 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
417 }
418 }
419
420 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
421 struct flow_offload_tuple *tuple, u32 *hdrsize)
422 {
423 struct flow_ports *ports;
424 struct ipv6hdr *ip6h;
425 unsigned int thoff;
426
427 if (!pskb_may_pull(skb, sizeof(*ip6h)))
428 return -1;
429
430 ip6h = ipv6_hdr(skb);
431
432 switch (ip6h->nexthdr) {
433 case IPPROTO_TCP:
434 *hdrsize = sizeof(struct tcphdr);
435 break;
436 case IPPROTO_UDP:
437 *hdrsize = sizeof(struct udphdr);
438 break;
439 default:
440 return -1;
441 }
442
443 if (ip6h->hop_limit <= 1)
444 return -1;
445
446 thoff = sizeof(*ip6h);
447 if (!pskb_may_pull(skb, thoff + *hdrsize))
448 return -1;
449
450 ip6h = ipv6_hdr(skb);
451 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
452
453 tuple->src_v6 = ip6h->saddr;
454 tuple->dst_v6 = ip6h->daddr;
455 tuple->src_port = ports->source;
456 tuple->dst_port = ports->dest;
457 tuple->l3proto = AF_INET6;
458 tuple->l4proto = ip6h->nexthdr;
459 tuple->iifidx = dev->ifindex;
460
461 return 0;
462 }
463
464 unsigned int
465 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
466 const struct nf_hook_state *state)
467 {
468 struct flow_offload_tuple_rhash *tuplehash;
469 struct nf_flowtable *flow_table = priv;
470 struct flow_offload_tuple tuple = {};
471 enum flow_offload_tuple_dir dir;
472 const struct in6_addr *nexthop;
473 struct flow_offload *flow;
474 struct net_device *outdev;
475 struct ipv6hdr *ip6h;
476 struct rt6_info *rt;
477 u32 hdrsize;
478 int ret;
479
480 if (skb->protocol != htons(ETH_P_IPV6))
481 return NF_ACCEPT;
482
483 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize) < 0)
484 return NF_ACCEPT;
485
486 tuplehash = flow_offload_lookup(flow_table, &tuple);
487 if (tuplehash == NULL)
488 return NF_ACCEPT;
489
490 dir = tuplehash->tuple.dir;
491 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
492
493 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
494 return NF_ACCEPT;
495
496 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
497 sizeof(*ip6h)))
498 return NF_ACCEPT;
499
500 if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
501 tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
502 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
503 if (!dst_check(&rt->dst, 0)) {
504 flow_offload_teardown(flow);
505 return NF_ACCEPT;
506 }
507 }
508
509 if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize))
510 return NF_DROP;
511
512 flow_offload_refresh(flow_table, flow);
513
514 ip6h = ipv6_hdr(skb);
515 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
516
517 ip6h->hop_limit--;
518 skb->tstamp = 0;
519
520 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
521 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
522
523 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
524 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
525 IP6CB(skb)->iif = skb->dev->ifindex;
526 IP6CB(skb)->flags = IP6SKB_FORWARDED;
527 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
528 }
529
530 switch (tuplehash->tuple.xmit_type) {
531 case FLOW_OFFLOAD_XMIT_NEIGH:
532 outdev = rt->dst.dev;
533 skb->dev = outdev;
534 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
535 skb_dst_set_noref(skb, &rt->dst);
536 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
537 ret = NF_STOLEN;
538 break;
539 case FLOW_OFFLOAD_XMIT_DIRECT:
540 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
541 if (ret == NF_DROP)
542 flow_offload_teardown(flow);
543 break;
544 }
545
546 return ret;
547 }
548 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);