]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/netfilter/nf_flow_table_ip.c
Merge tag 'hwlock-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[mirror_ubuntu-hirsute-kernel.git] / net / netfilter / nf_flow_table_ip.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
7d208687
FF
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/netfilter.h>
6#include <linux/rhashtable.h>
7#include <linux/ip.h>
a908fdec 8#include <linux/ipv6.h>
7d208687
FF
9#include <linux/netdevice.h>
10#include <net/ip.h>
a908fdec
FF
11#include <net/ipv6.h>
12#include <net/ip6_route.h>
7d208687
FF
13#include <net/neighbour.h>
14#include <net/netfilter/nf_flow_table.h>
53c2b289 15#include <net/netfilter/nf_conntrack_acct.h>
7d208687
FF
16/* For layer 4 checksum field offset. */
17#include <linux/tcp.h>
18#include <linux/udp.h>
19
33894c36
FF
20static int nf_flow_state_check(struct flow_offload *flow, int proto,
21 struct sk_buff *skb, unsigned int thoff)
b6f27d32
FF
22{
23 struct tcphdr *tcph;
24
33894c36
FF
25 if (proto != IPPROTO_TCP)
26 return 0;
27
b6f27d32
FF
28 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
29 return -1;
30
31 tcph = (void *)(skb_network_header(skb) + thoff);
32 if (unlikely(tcph->fin || tcph->rst)) {
33 flow_offload_teardown(flow);
34 return -1;
35 }
36
37 return 0;
38}
39
7d208687
FF
40static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
41 __be32 addr, __be32 new_addr)
42{
43 struct tcphdr *tcph;
44
45 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
46 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
47 return -1;
48
49 tcph = (void *)(skb_network_header(skb) + thoff);
50 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
51
52 return 0;
53}
54
55static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
56 __be32 addr, __be32 new_addr)
57{
58 struct udphdr *udph;
59
60 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
61 skb_try_make_writable(skb, thoff + sizeof(*udph)))
62 return -1;
63
64 udph = (void *)(skb_network_header(skb) + thoff);
65 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
66 inet_proto_csum_replace4(&udph->check, skb, addr,
67 new_addr, true);
68 if (!udph->check)
69 udph->check = CSUM_MANGLED_0;
70 }
71
72 return 0;
73}
74
75static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
76 unsigned int thoff, __be32 addr,
77 __be32 new_addr)
78{
79 switch (iph->protocol) {
80 case IPPROTO_TCP:
81 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
82 return NF_DROP;
83 break;
84 case IPPROTO_UDP:
85 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
86 return NF_DROP;
87 break;
88 }
89
90 return 0;
91}
92
93static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
94 struct iphdr *iph, unsigned int thoff,
95 enum flow_offload_tuple_dir dir)
96{
97 __be32 addr, new_addr;
98
99 switch (dir) {
100 case FLOW_OFFLOAD_DIR_ORIGINAL:
101 addr = iph->saddr;
102 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
103 iph->saddr = new_addr;
104 break;
105 case FLOW_OFFLOAD_DIR_REPLY:
106 addr = iph->daddr;
107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
108 iph->daddr = new_addr;
109 break;
110 default:
111 return -1;
112 }
113 csum_replace4(&iph->check, addr, new_addr);
114
115 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
116}
117
118static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
119 struct iphdr *iph, unsigned int thoff,
120 enum flow_offload_tuple_dir dir)
121{
122 __be32 addr, new_addr;
123
124 switch (dir) {
125 case FLOW_OFFLOAD_DIR_ORIGINAL:
126 addr = iph->daddr;
127 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
128 iph->daddr = new_addr;
129 break;
130 case FLOW_OFFLOAD_DIR_REPLY:
131 addr = iph->saddr;
132 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
133 iph->saddr = new_addr;
134 break;
135 default:
136 return -1;
137 }
138 csum_replace4(&iph->check, addr, new_addr);
139
140 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
141}
142
143static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
b6f27d32 144 unsigned int thoff, enum flow_offload_tuple_dir dir)
7d208687
FF
145{
146 struct iphdr *iph = ip_hdr(skb);
7d208687 147
355a8b13 148 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
7d208687 149 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
61abaf02 150 nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
7d208687 151 return -1;
61abaf02
HY
152
153 iph = ip_hdr(skb);
355a8b13 154 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
7d208687 155 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
61abaf02 156 nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
7d208687
FF
157 return -1;
158
159 return 0;
160}
161
162static bool ip_has_options(unsigned int thoff)
163{
164 return thoff != sizeof(struct iphdr);
165}
166
167static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
168 struct flow_offload_tuple *tuple)
169{
170 struct flow_ports *ports;
171 unsigned int thoff;
172 struct iphdr *iph;
173
174 if (!pskb_may_pull(skb, sizeof(*iph)))
175 return -1;
176
177 iph = ip_hdr(skb);
178 thoff = iph->ihl * 4;
179
180 if (ip_is_fragment(iph) ||
181 unlikely(ip_has_options(thoff)))
182 return -1;
183
184 if (iph->protocol != IPPROTO_TCP &&
185 iph->protocol != IPPROTO_UDP)
186 return -1;
187
33cc3c0c
TY
188 if (iph->ttl <= 1)
189 return -1;
190
7d208687
FF
191 thoff = iph->ihl * 4;
192 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
193 return -1;
194
41e9ec5a 195 iph = ip_hdr(skb);
7d208687
FF
196 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
197
198 tuple->src_v4.s_addr = iph->saddr;
199 tuple->dst_v4.s_addr = iph->daddr;
200 tuple->src_port = ports->source;
201 tuple->dst_port = ports->dest;
202 tuple->l3proto = AF_INET;
203 tuple->l4proto = iph->protocol;
204 tuple->iifidx = dev->ifindex;
205
206 return 0;
207}
208
209/* Based on ip_exceeds_mtu(). */
210static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
211{
212 if (skb->len <= mtu)
213 return false;
214
7d208687
FF
215 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
216 return false;
217
218 return true;
219}
220
589b474a
FW
221static int nf_flow_offload_dst_check(struct dst_entry *dst)
222{
223 if (unlikely(dst_xfrm(dst)))
224 return dst_check(dst, 0) ? 0 : -1;
225
226 return 0;
227}
228
229static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
230 const struct nf_hook_state *state,
231 struct dst_entry *dst)
232{
233 skb_orphan(skb);
234 skb_dst_set_noref(skb, dst);
589b474a
FW
235 dst_output(state->net, state->sk, skb);
236 return NF_STOLEN;
237}
238
7d208687
FF
239unsigned int
240nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
241 const struct nf_hook_state *state)
242{
243 struct flow_offload_tuple_rhash *tuplehash;
244 struct nf_flowtable *flow_table = priv;
245 struct flow_offload_tuple tuple = {};
246 enum flow_offload_tuple_dir dir;
247 struct flow_offload *flow;
248 struct net_device *outdev;
2a79fd39 249 struct rtable *rt;
b6f27d32 250 unsigned int thoff;
7d208687
FF
251 struct iphdr *iph;
252 __be32 nexthop;
253
254 if (skb->protocol != htons(ETH_P_IP))
255 return NF_ACCEPT;
256
257 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
258 return NF_ACCEPT;
259
260 tuplehash = flow_offload_lookup(flow_table, &tuple);
261 if (tuplehash == NULL)
262 return NF_ACCEPT;
263
7d208687
FF
264 dir = tuplehash->tuple.dir;
265 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
2a79fd39 266 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
227e1e4d 267 outdev = rt->dst.dev;
7d208687 268
e75b3e1c 269 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
7d208687
FF
270 return NF_ACCEPT;
271
272 if (skb_try_make_writable(skb, sizeof(*iph)))
273 return NF_DROP;
274
b6f27d32 275 thoff = ip_hdr(skb)->ihl * 4;
33894c36 276 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
b6f27d32
FF
277 return NF_ACCEPT;
278
8b3646d6 279 flow_offload_refresh(flow_table, flow);
f698fe40 280
589b474a
FW
281 if (nf_flow_offload_dst_check(&rt->dst)) {
282 flow_offload_teardown(flow);
283 return NF_ACCEPT;
284 }
285
28c5ed2f 286 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
7d208687
FF
287 return NF_DROP;
288
7d208687
FF
289 iph = ip_hdr(skb);
290 ip_decrease_ttl(iph);
de20900f 291 skb->tstamp = 0;
7d208687 292
53c2b289
PNA
293 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
294 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
295
589b474a
FW
296 if (unlikely(dst_xfrm(&rt->dst))) {
297 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
298 IPCB(skb)->iif = skb->dev->ifindex;
299 IPCB(skb)->flags = IPSKB_FORWARDED;
300 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
301 }
302
7d208687
FF
303 skb->dev = outdev;
304 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
2a79fd39 305 skb_dst_set_noref(skb, &rt->dst);
7d208687
FF
306 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
307
308 return NF_STOLEN;
309}
310EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
a908fdec
FF
311
312static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
313 struct in6_addr *addr,
314 struct in6_addr *new_addr)
315{
316 struct tcphdr *tcph;
317
318 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
319 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
320 return -1;
321
322 tcph = (void *)(skb_network_header(skb) + thoff);
323 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
324 new_addr->s6_addr32, true);
325
326 return 0;
327}
328
329static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
330 struct in6_addr *addr,
331 struct in6_addr *new_addr)
332{
333 struct udphdr *udph;
334
335 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
336 skb_try_make_writable(skb, thoff + sizeof(*udph)))
337 return -1;
338
339 udph = (void *)(skb_network_header(skb) + thoff);
340 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
341 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
342 new_addr->s6_addr32, true);
343 if (!udph->check)
344 udph->check = CSUM_MANGLED_0;
345 }
346
347 return 0;
348}
349
350static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
351 unsigned int thoff, struct in6_addr *addr,
352 struct in6_addr *new_addr)
353{
354 switch (ip6h->nexthdr) {
355 case IPPROTO_TCP:
356 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
357 return NF_DROP;
358 break;
359 case IPPROTO_UDP:
360 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
361 return NF_DROP;
362 break;
363 }
364
365 return 0;
366}
367
368static int nf_flow_snat_ipv6(const struct flow_offload *flow,
369 struct sk_buff *skb, struct ipv6hdr *ip6h,
370 unsigned int thoff,
371 enum flow_offload_tuple_dir dir)
372{
373 struct in6_addr addr, new_addr;
374
375 switch (dir) {
376 case FLOW_OFFLOAD_DIR_ORIGINAL:
377 addr = ip6h->saddr;
378 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
379 ip6h->saddr = new_addr;
380 break;
381 case FLOW_OFFLOAD_DIR_REPLY:
382 addr = ip6h->daddr;
383 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
384 ip6h->daddr = new_addr;
385 break;
386 default:
387 return -1;
388 }
389
390 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
391}
392
393static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
394 struct sk_buff *skb, struct ipv6hdr *ip6h,
395 unsigned int thoff,
396 enum flow_offload_tuple_dir dir)
397{
398 struct in6_addr addr, new_addr;
399
400 switch (dir) {
401 case FLOW_OFFLOAD_DIR_ORIGINAL:
402 addr = ip6h->daddr;
403 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
404 ip6h->daddr = new_addr;
405 break;
406 case FLOW_OFFLOAD_DIR_REPLY:
407 addr = ip6h->saddr;
408 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
409 ip6h->saddr = new_addr;
410 break;
411 default:
412 return -1;
413 }
414
415 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
416}
417
418static int nf_flow_nat_ipv6(const struct flow_offload *flow,
419 struct sk_buff *skb,
420 enum flow_offload_tuple_dir dir)
421{
422 struct ipv6hdr *ip6h = ipv6_hdr(skb);
423 unsigned int thoff = sizeof(*ip6h);
424
355a8b13 425 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
a908fdec 426 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
61abaf02 427 nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
a908fdec 428 return -1;
61abaf02
HY
429
430 ip6h = ipv6_hdr(skb);
355a8b13 431 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
a908fdec 432 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
61abaf02 433 nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
a908fdec
FF
434 return -1;
435
436 return 0;
437}
438
439static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
440 struct flow_offload_tuple *tuple)
441{
442 struct flow_ports *ports;
443 struct ipv6hdr *ip6h;
444 unsigned int thoff;
445
446 if (!pskb_may_pull(skb, sizeof(*ip6h)))
447 return -1;
448
449 ip6h = ipv6_hdr(skb);
450
451 if (ip6h->nexthdr != IPPROTO_TCP &&
452 ip6h->nexthdr != IPPROTO_UDP)
453 return -1;
454
33cc3c0c
TY
455 if (ip6h->hop_limit <= 1)
456 return -1;
457
a908fdec
FF
458 thoff = sizeof(*ip6h);
459 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
460 return -1;
461
41e9ec5a 462 ip6h = ipv6_hdr(skb);
a908fdec
FF
463 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
464
465 tuple->src_v6 = ip6h->saddr;
466 tuple->dst_v6 = ip6h->daddr;
467 tuple->src_port = ports->source;
468 tuple->dst_port = ports->dest;
469 tuple->l3proto = AF_INET6;
470 tuple->l4proto = ip6h->nexthdr;
471 tuple->iifidx = dev->ifindex;
472
473 return 0;
474}
475
476unsigned int
477nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
478 const struct nf_hook_state *state)
479{
480 struct flow_offload_tuple_rhash *tuplehash;
481 struct nf_flowtable *flow_table = priv;
482 struct flow_offload_tuple tuple = {};
483 enum flow_offload_tuple_dir dir;
9b1c1ef1 484 const struct in6_addr *nexthop;
a908fdec
FF
485 struct flow_offload *flow;
486 struct net_device *outdev;
a908fdec
FF
487 struct ipv6hdr *ip6h;
488 struct rt6_info *rt;
489
490 if (skb->protocol != htons(ETH_P_IPV6))
491 return NF_ACCEPT;
492
493 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
494 return NF_ACCEPT;
495
496 tuplehash = flow_offload_lookup(flow_table, &tuple);
497 if (tuplehash == NULL)
498 return NF_ACCEPT;
499
a908fdec
FF
500 dir = tuplehash->tuple.dir;
501 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
502 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
227e1e4d 503 outdev = rt->dst.dev;
a908fdec
FF
504
505 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
506 return NF_ACCEPT;
507
33894c36
FF
508 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
509 sizeof(*ip6h)))
b6f27d32
FF
510 return NF_ACCEPT;
511
8b3646d6 512 flow_offload_refresh(flow_table, flow);
f698fe40 513
589b474a
FW
514 if (nf_flow_offload_dst_check(&rt->dst)) {
515 flow_offload_teardown(flow);
516 return NF_ACCEPT;
517 }
518
a908fdec
FF
519 if (skb_try_make_writable(skb, sizeof(*ip6h)))
520 return NF_DROP;
521
28c5ed2f 522 if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
a908fdec
FF
523 return NF_DROP;
524
a908fdec
FF
525 ip6h = ipv6_hdr(skb);
526 ip6h->hop_limit--;
de20900f 527 skb->tstamp = 0;
a908fdec 528
53c2b289
PNA
529 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
530 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
531
589b474a
FW
532 if (unlikely(dst_xfrm(&rt->dst))) {
533 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
534 IP6CB(skb)->iif = skb->dev->ifindex;
535 IP6CB(skb)->flags = IP6SKB_FORWARDED;
536 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
537 }
538
a908fdec
FF
539 skb->dev = outdev;
540 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
2a79fd39 541 skb_dst_set_noref(skb, &rt->dst);
a908fdec
FF
542 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
543
544 return NF_STOLEN;
545}
546EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);