]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/netfilter/nf_flow_table_ip.c
netfilter: flowtable: add pppoe support
[mirror_ubuntu-jammy-kernel.git] / net / netfilter / nf_flow_table_ip.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_pppox.h>
12 #include <linux/ppp_defs.h>
13 #include <net/ip.h>
14 #include <net/ipv6.h>
15 #include <net/ip6_route.h>
16 #include <net/neighbour.h>
17 #include <net/netfilter/nf_flow_table.h>
18 #include <net/netfilter/nf_conntrack_acct.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22
23 static int nf_flow_state_check(struct flow_offload *flow, int proto,
24 struct sk_buff *skb, unsigned int thoff)
25 {
26 struct tcphdr *tcph;
27
28 if (proto != IPPROTO_TCP)
29 return 0;
30
31 tcph = (void *)(skb_network_header(skb) + thoff);
32 if (unlikely(tcph->fin || tcph->rst)) {
33 flow_offload_teardown(flow);
34 return -1;
35 }
36
37 return 0;
38 }
39
40 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
41 __be32 addr, __be32 new_addr)
42 {
43 struct tcphdr *tcph;
44
45 tcph = (void *)(skb_network_header(skb) + thoff);
46 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
47 }
48
49 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
50 __be32 addr, __be32 new_addr)
51 {
52 struct udphdr *udph;
53
54 udph = (void *)(skb_network_header(skb) + thoff);
55 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
56 inet_proto_csum_replace4(&udph->check, skb, addr,
57 new_addr, true);
58 if (!udph->check)
59 udph->check = CSUM_MANGLED_0;
60 }
61 }
62
63 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
64 unsigned int thoff, __be32 addr,
65 __be32 new_addr)
66 {
67 switch (iph->protocol) {
68 case IPPROTO_TCP:
69 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
70 break;
71 case IPPROTO_UDP:
72 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
73 break;
74 }
75 }
76
77 static void nf_flow_snat_ip(const struct flow_offload *flow,
78 struct sk_buff *skb, struct iphdr *iph,
79 unsigned int thoff, enum flow_offload_tuple_dir dir)
80 {
81 __be32 addr, new_addr;
82
83 switch (dir) {
84 case FLOW_OFFLOAD_DIR_ORIGINAL:
85 addr = iph->saddr;
86 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
87 iph->saddr = new_addr;
88 break;
89 case FLOW_OFFLOAD_DIR_REPLY:
90 addr = iph->daddr;
91 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
92 iph->daddr = new_addr;
93 break;
94 }
95 csum_replace4(&iph->check, addr, new_addr);
96
97 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
98 }
99
100 static void nf_flow_dnat_ip(const struct flow_offload *flow,
101 struct sk_buff *skb, struct iphdr *iph,
102 unsigned int thoff, enum flow_offload_tuple_dir dir)
103 {
104 __be32 addr, new_addr;
105
106 switch (dir) {
107 case FLOW_OFFLOAD_DIR_ORIGINAL:
108 addr = iph->daddr;
109 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
110 iph->daddr = new_addr;
111 break;
112 case FLOW_OFFLOAD_DIR_REPLY:
113 addr = iph->saddr;
114 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
115 iph->saddr = new_addr;
116 break;
117 }
118 csum_replace4(&iph->check, addr, new_addr);
119
120 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
121 }
122
123 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
124 unsigned int thoff, enum flow_offload_tuple_dir dir,
125 struct iphdr *iph)
126 {
127 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
128 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
129 nf_flow_snat_ip(flow, skb, iph, thoff, dir);
130 }
131 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
132 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
133 nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
134 }
135 }
136
137 static bool ip_has_options(unsigned int thoff)
138 {
139 return thoff != sizeof(struct iphdr);
140 }
141
142 static void nf_flow_tuple_encap(struct sk_buff *skb,
143 struct flow_offload_tuple *tuple)
144 {
145 struct vlan_ethhdr *veth;
146 struct pppoe_hdr *phdr;
147 int i = 0;
148
149 if (skb_vlan_tag_present(skb)) {
150 tuple->encap[i].id = skb_vlan_tag_get(skb);
151 tuple->encap[i].proto = skb->vlan_proto;
152 i++;
153 }
154 switch (skb->protocol) {
155 case htons(ETH_P_8021Q):
156 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
157 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
158 tuple->encap[i].proto = skb->protocol;
159 break;
160 case htons(ETH_P_PPP_SES):
161 phdr = (struct pppoe_hdr *)skb_mac_header(skb);
162 tuple->encap[i].id = ntohs(phdr->sid);
163 tuple->encap[i].proto = skb->protocol;
164 break;
165 }
166 }
167
168 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
169 struct flow_offload_tuple *tuple, u32 *hdrsize,
170 u32 offset)
171 {
172 struct flow_ports *ports;
173 unsigned int thoff;
174 struct iphdr *iph;
175
176 if (!pskb_may_pull(skb, sizeof(*iph) + offset))
177 return -1;
178
179 iph = (struct iphdr *)(skb_network_header(skb) + offset);
180 thoff = (iph->ihl * 4);
181
182 if (ip_is_fragment(iph) ||
183 unlikely(ip_has_options(thoff)))
184 return -1;
185
186 thoff += offset;
187
188 switch (iph->protocol) {
189 case IPPROTO_TCP:
190 *hdrsize = sizeof(struct tcphdr);
191 break;
192 case IPPROTO_UDP:
193 *hdrsize = sizeof(struct udphdr);
194 break;
195 default:
196 return -1;
197 }
198
199 if (iph->ttl <= 1)
200 return -1;
201
202 if (!pskb_may_pull(skb, thoff + *hdrsize))
203 return -1;
204
205 iph = (struct iphdr *)(skb_network_header(skb) + offset);
206 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
207
208 tuple->src_v4.s_addr = iph->saddr;
209 tuple->dst_v4.s_addr = iph->daddr;
210 tuple->src_port = ports->source;
211 tuple->dst_port = ports->dest;
212 tuple->l3proto = AF_INET;
213 tuple->l4proto = iph->protocol;
214 tuple->iifidx = dev->ifindex;
215 nf_flow_tuple_encap(skb, tuple);
216
217 return 0;
218 }
219
220 /* Based on ip_exceeds_mtu(). */
221 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
222 {
223 if (skb->len <= mtu)
224 return false;
225
226 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
227 return false;
228
229 return true;
230 }
231
232 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
233 const struct nf_hook_state *state,
234 struct dst_entry *dst)
235 {
236 skb_orphan(skb);
237 skb_dst_set_noref(skb, dst);
238 dst_output(state->net, state->sk, skb);
239 return NF_STOLEN;
240 }
241
242 static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
243 {
244 __be16 proto;
245
246 proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
247 sizeof(struct pppoe_hdr)));
248 switch (proto) {
249 case htons(PPP_IP):
250 return htons(ETH_P_IP);
251 case htons(PPP_IPV6):
252 return htons(ETH_P_IPV6);
253 }
254
255 return 0;
256 }
257
258 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
259 u32 *offset)
260 {
261 struct vlan_ethhdr *veth;
262
263 switch (skb->protocol) {
264 case htons(ETH_P_8021Q):
265 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
266 if (veth->h_vlan_encapsulated_proto == proto) {
267 *offset += VLAN_HLEN;
268 return true;
269 }
270 break;
271 case htons(ETH_P_PPP_SES):
272 if (nf_flow_pppoe_proto(skb) == proto) {
273 *offset += PPPOE_SES_HLEN;
274 return true;
275 }
276 break;
277 }
278
279 return false;
280 }
281
282 static void nf_flow_encap_pop(struct sk_buff *skb,
283 struct flow_offload_tuple_rhash *tuplehash)
284 {
285 struct vlan_hdr *vlan_hdr;
286 int i;
287
288 for (i = 0; i < tuplehash->tuple.encap_num; i++) {
289 if (skb_vlan_tag_present(skb)) {
290 __vlan_hwaccel_clear_tag(skb);
291 continue;
292 }
293 switch (skb->protocol) {
294 case htons(ETH_P_8021Q):
295 vlan_hdr = (struct vlan_hdr *)skb->data;
296 __skb_pull(skb, VLAN_HLEN);
297 vlan_set_encap_proto(skb, vlan_hdr);
298 skb_reset_network_header(skb);
299 break;
300 case htons(ETH_P_PPP_SES):
301 skb->protocol = nf_flow_pppoe_proto(skb);
302 skb_pull(skb, PPPOE_SES_HLEN);
303 skb_reset_network_header(skb);
304 break;
305 }
306 }
307 }
308
309 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
310 const struct flow_offload_tuple_rhash *tuplehash,
311 unsigned short type)
312 {
313 struct net_device *outdev;
314
315 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
316 if (!outdev)
317 return NF_DROP;
318
319 skb->dev = outdev;
320 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
321 tuplehash->tuple.out.h_source, skb->len);
322 dev_queue_xmit(skb);
323
324 return NF_STOLEN;
325 }
326
327 unsigned int
328 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
329 const struct nf_hook_state *state)
330 {
331 struct flow_offload_tuple_rhash *tuplehash;
332 struct nf_flowtable *flow_table = priv;
333 struct flow_offload_tuple tuple = {};
334 enum flow_offload_tuple_dir dir;
335 struct flow_offload *flow;
336 struct net_device *outdev;
337 u32 hdrsize, offset = 0;
338 unsigned int thoff, mtu;
339 struct rtable *rt;
340 struct iphdr *iph;
341 __be32 nexthop;
342 int ret;
343
344 if (skb->protocol != htons(ETH_P_IP) &&
345 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset))
346 return NF_ACCEPT;
347
348 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0)
349 return NF_ACCEPT;
350
351 tuplehash = flow_offload_lookup(flow_table, &tuple);
352 if (tuplehash == NULL)
353 return NF_ACCEPT;
354
355 dir = tuplehash->tuple.dir;
356 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
357
358 mtu = flow->tuplehash[dir].tuple.mtu + offset;
359 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
360 return NF_ACCEPT;
361
362 iph = (struct iphdr *)(skb_network_header(skb) + offset);
363 thoff = (iph->ihl * 4) + offset;
364 if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
365 return NF_ACCEPT;
366
367 if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
368 tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
369 rt = (struct rtable *)tuplehash->tuple.dst_cache;
370 if (!dst_check(&rt->dst, 0)) {
371 flow_offload_teardown(flow);
372 return NF_ACCEPT;
373 }
374 }
375
376 if (skb_try_make_writable(skb, thoff + hdrsize))
377 return NF_DROP;
378
379 flow_offload_refresh(flow_table, flow);
380
381 nf_flow_encap_pop(skb, tuplehash);
382 thoff -= offset;
383
384 iph = ip_hdr(skb);
385 nf_flow_nat_ip(flow, skb, thoff, dir, iph);
386
387 ip_decrease_ttl(iph);
388 skb->tstamp = 0;
389
390 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
391 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
392
393 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
394 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
395 IPCB(skb)->iif = skb->dev->ifindex;
396 IPCB(skb)->flags = IPSKB_FORWARDED;
397 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
398 }
399
400 switch (tuplehash->tuple.xmit_type) {
401 case FLOW_OFFLOAD_XMIT_NEIGH:
402 outdev = rt->dst.dev;
403 skb->dev = outdev;
404 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
405 skb_dst_set_noref(skb, &rt->dst);
406 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
407 ret = NF_STOLEN;
408 break;
409 case FLOW_OFFLOAD_XMIT_DIRECT:
410 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
411 if (ret == NF_DROP)
412 flow_offload_teardown(flow);
413 break;
414 }
415
416 return ret;
417 }
418 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
419
420 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
421 struct in6_addr *addr,
422 struct in6_addr *new_addr,
423 struct ipv6hdr *ip6h)
424 {
425 struct tcphdr *tcph;
426
427 tcph = (void *)(skb_network_header(skb) + thoff);
428 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
429 new_addr->s6_addr32, true);
430 }
431
432 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
433 struct in6_addr *addr,
434 struct in6_addr *new_addr)
435 {
436 struct udphdr *udph;
437
438 udph = (void *)(skb_network_header(skb) + thoff);
439 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
440 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
441 new_addr->s6_addr32, true);
442 if (!udph->check)
443 udph->check = CSUM_MANGLED_0;
444 }
445 }
446
447 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
448 unsigned int thoff, struct in6_addr *addr,
449 struct in6_addr *new_addr)
450 {
451 switch (ip6h->nexthdr) {
452 case IPPROTO_TCP:
453 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
454 break;
455 case IPPROTO_UDP:
456 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
457 break;
458 }
459 }
460
461 static void nf_flow_snat_ipv6(const struct flow_offload *flow,
462 struct sk_buff *skb, struct ipv6hdr *ip6h,
463 unsigned int thoff,
464 enum flow_offload_tuple_dir dir)
465 {
466 struct in6_addr addr, new_addr;
467
468 switch (dir) {
469 case FLOW_OFFLOAD_DIR_ORIGINAL:
470 addr = ip6h->saddr;
471 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
472 ip6h->saddr = new_addr;
473 break;
474 case FLOW_OFFLOAD_DIR_REPLY:
475 addr = ip6h->daddr;
476 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
477 ip6h->daddr = new_addr;
478 break;
479 }
480
481 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
482 }
483
484 static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
485 struct sk_buff *skb, struct ipv6hdr *ip6h,
486 unsigned int thoff,
487 enum flow_offload_tuple_dir dir)
488 {
489 struct in6_addr addr, new_addr;
490
491 switch (dir) {
492 case FLOW_OFFLOAD_DIR_ORIGINAL:
493 addr = ip6h->daddr;
494 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
495 ip6h->daddr = new_addr;
496 break;
497 case FLOW_OFFLOAD_DIR_REPLY:
498 addr = ip6h->saddr;
499 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
500 ip6h->saddr = new_addr;
501 break;
502 }
503
504 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
505 }
506
507 static void nf_flow_nat_ipv6(const struct flow_offload *flow,
508 struct sk_buff *skb,
509 enum flow_offload_tuple_dir dir,
510 struct ipv6hdr *ip6h)
511 {
512 unsigned int thoff = sizeof(*ip6h);
513
514 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
515 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
516 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
517 }
518 if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
519 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
520 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
521 }
522 }
523
524 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
525 struct flow_offload_tuple *tuple, u32 *hdrsize,
526 u32 offset)
527 {
528 struct flow_ports *ports;
529 struct ipv6hdr *ip6h;
530 unsigned int thoff;
531
532 thoff = sizeof(*ip6h) + offset;
533 if (!pskb_may_pull(skb, thoff))
534 return -1;
535
536 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
537
538 switch (ip6h->nexthdr) {
539 case IPPROTO_TCP:
540 *hdrsize = sizeof(struct tcphdr);
541 break;
542 case IPPROTO_UDP:
543 *hdrsize = sizeof(struct udphdr);
544 break;
545 default:
546 return -1;
547 }
548
549 if (ip6h->hop_limit <= 1)
550 return -1;
551
552 if (!pskb_may_pull(skb, thoff + *hdrsize))
553 return -1;
554
555 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
556 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
557
558 tuple->src_v6 = ip6h->saddr;
559 tuple->dst_v6 = ip6h->daddr;
560 tuple->src_port = ports->source;
561 tuple->dst_port = ports->dest;
562 tuple->l3proto = AF_INET6;
563 tuple->l4proto = ip6h->nexthdr;
564 tuple->iifidx = dev->ifindex;
565 nf_flow_tuple_encap(skb, tuple);
566
567 return 0;
568 }
569
570 unsigned int
571 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
572 const struct nf_hook_state *state)
573 {
574 struct flow_offload_tuple_rhash *tuplehash;
575 struct nf_flowtable *flow_table = priv;
576 struct flow_offload_tuple tuple = {};
577 enum flow_offload_tuple_dir dir;
578 const struct in6_addr *nexthop;
579 struct flow_offload *flow;
580 struct net_device *outdev;
581 unsigned int thoff, mtu;
582 u32 hdrsize, offset = 0;
583 struct ipv6hdr *ip6h;
584 struct rt6_info *rt;
585 int ret;
586
587 if (skb->protocol != htons(ETH_P_IPV6) &&
588 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset))
589 return NF_ACCEPT;
590
591 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0)
592 return NF_ACCEPT;
593
594 tuplehash = flow_offload_lookup(flow_table, &tuple);
595 if (tuplehash == NULL)
596 return NF_ACCEPT;
597
598 dir = tuplehash->tuple.dir;
599 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
600
601 mtu = flow->tuplehash[dir].tuple.mtu + offset;
602 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
603 return NF_ACCEPT;
604
605 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
606 thoff = sizeof(*ip6h) + offset;
607 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
608 return NF_ACCEPT;
609
610 if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
611 tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
612 rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
613 if (!dst_check(&rt->dst, 0)) {
614 flow_offload_teardown(flow);
615 return NF_ACCEPT;
616 }
617 }
618
619 if (skb_try_make_writable(skb, thoff + hdrsize))
620 return NF_DROP;
621
622 flow_offload_refresh(flow_table, flow);
623
624 nf_flow_encap_pop(skb, tuplehash);
625
626 ip6h = ipv6_hdr(skb);
627 nf_flow_nat_ipv6(flow, skb, dir, ip6h);
628
629 ip6h->hop_limit--;
630 skb->tstamp = 0;
631
632 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
633 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
634
635 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
636 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
637 IP6CB(skb)->iif = skb->dev->ifindex;
638 IP6CB(skb)->flags = IP6SKB_FORWARDED;
639 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
640 }
641
642 switch (tuplehash->tuple.xmit_type) {
643 case FLOW_OFFLOAD_XMIT_NEIGH:
644 outdev = rt->dst.dev;
645 skb->dev = outdev;
646 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
647 skb_dst_set_noref(skb, &rt->dst);
648 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
649 ret = NF_STOLEN;
650 break;
651 case FLOW_OFFLOAD_XMIT_DIRECT:
652 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
653 if (ret == NF_DROP)
654 flow_offload_teardown(flow);
655 break;
656 }
657
658 return ret;
659 }
660 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);