2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
31 #include <linux/config.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/net.h>
37 #include <linux/netdevice.h>
38 #include <linux/if_arp.h>
39 #include <linux/in6.h>
40 #include <linux/tcp.h>
41 #include <linux/route.h>
42 #include <linux/module.h>
44 #include <linux/netfilter.h>
45 #include <linux/netfilter_ipv6.h>
51 #include <net/ndisc.h>
52 #include <net/protocol.h>
53 #include <net/ip6_route.h>
54 #include <net/addrconf.h>
55 #include <net/rawv6.h>
58 #include <net/checksum.h>
60 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*));
62 static __inline__
void ipv6_select_ident(struct sk_buff
*skb
, struct frag_hdr
*fhdr
)
64 static u32 ipv6_fragmentation_id
= 1;
65 static DEFINE_SPINLOCK(ip6_id_lock
);
67 spin_lock_bh(&ip6_id_lock
);
68 fhdr
->identification
= htonl(ipv6_fragmentation_id
);
69 if (++ipv6_fragmentation_id
== 0)
70 ipv6_fragmentation_id
= 1;
71 spin_unlock_bh(&ip6_id_lock
);
74 static inline int ip6_output_finish(struct sk_buff
*skb
)
77 struct dst_entry
*dst
= skb
->dst
;
78 struct hh_cache
*hh
= dst
->hh
;
83 read_lock_bh(&hh
->hh_lock
);
84 hh_alen
= HH_DATA_ALIGN(hh
->hh_len
);
85 memcpy(skb
->data
- hh_alen
, hh
->hh_data
, hh_alen
);
86 read_unlock_bh(&hh
->hh_lock
);
87 skb_push(skb
, hh
->hh_len
);
88 return hh
->hh_output(skb
);
89 } else if (dst
->neighbour
)
90 return dst
->neighbour
->output(skb
);
92 IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES
);
98 /* dev_loopback_xmit for use with netfilter. */
99 static int ip6_dev_loopback_xmit(struct sk_buff
*newskb
)
101 newskb
->mac
.raw
= newskb
->data
;
102 __skb_pull(newskb
, newskb
->nh
.raw
- newskb
->data
);
103 newskb
->pkt_type
= PACKET_LOOPBACK
;
104 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
105 BUG_TRAP(newskb
->dst
);
112 static int ip6_output2(struct sk_buff
*skb
)
114 struct dst_entry
*dst
= skb
->dst
;
115 struct net_device
*dev
= dst
->dev
;
117 skb
->protocol
= htons(ETH_P_IPV6
);
120 if (ipv6_addr_is_multicast(&skb
->nh
.ipv6h
->daddr
)) {
121 struct ipv6_pinfo
* np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
123 if (!(dev
->flags
& IFF_LOOPBACK
) && (!np
|| np
->mc_loop
) &&
124 ipv6_chk_mcast_addr(dev
, &skb
->nh
.ipv6h
->daddr
,
125 &skb
->nh
.ipv6h
->saddr
)) {
126 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
128 /* Do not check for IFF_ALLMULTI; multicast routing
129 is not supported in any case.
132 NF_HOOK(PF_INET6
, NF_IP6_POST_ROUTING
, newskb
, NULL
,
134 ip6_dev_loopback_xmit
);
136 if (skb
->nh
.ipv6h
->hop_limit
== 0) {
137 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
143 IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS
);
146 return NF_HOOK(PF_INET6
, NF_IP6_POST_ROUTING
, skb
,NULL
, skb
->dev
,ip6_output_finish
);
149 int ip6_output(struct sk_buff
*skb
)
151 if ((skb
->len
> dst_mtu(skb
->dst
) && !skb_shinfo(skb
)->ufo_size
) ||
152 dst_allfrag(skb
->dst
))
153 return ip6_fragment(skb
, ip6_output2
);
155 return ip6_output2(skb
);
159 * xmit an sk_buff (used by TCP)
162 int ip6_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
,
163 struct ipv6_txoptions
*opt
, int ipfragok
)
165 struct ipv6_pinfo
*np
= inet6_sk(sk
);
166 struct in6_addr
*first_hop
= &fl
->fl6_dst
;
167 struct dst_entry
*dst
= skb
->dst
;
169 u8 proto
= fl
->proto
;
170 int seg_len
= skb
->len
;
177 /* First: exthdrs may take lots of space (~8K for now)
178 MAX_HEADER is not enough.
180 head_room
= opt
->opt_nflen
+ opt
->opt_flen
;
181 seg_len
+= head_room
;
182 head_room
+= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
184 if (skb_headroom(skb
) < head_room
) {
185 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
189 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
193 skb_set_owner_w(skb
, sk
);
196 ipv6_push_frag_opts(skb
, opt
, &proto
);
198 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
);
201 hdr
= skb
->nh
.ipv6h
= (struct ipv6hdr
*)skb_push(skb
, sizeof(struct ipv6hdr
));
204 * Fill in the IPv6 header
209 hlimit
= np
->hop_limit
;
211 hlimit
= dst_metric(dst
, RTAX_HOPLIMIT
);
213 hlimit
= ipv6_get_hoplimit(dst
->dev
);
221 *(u32
*)hdr
= htonl(0x60000000 | (tclass
<< 20)) | fl
->fl6_flowlabel
;
223 hdr
->payload_len
= htons(seg_len
);
224 hdr
->nexthdr
= proto
;
225 hdr
->hop_limit
= hlimit
;
227 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
228 ipv6_addr_copy(&hdr
->daddr
, first_hop
);
230 skb
->priority
= sk
->sk_priority
;
233 if ((skb
->len
<= mtu
) || ipfragok
) {
234 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
235 return NF_HOOK(PF_INET6
, NF_IP6_LOCAL_OUT
, skb
, NULL
, dst
->dev
,
240 printk(KERN_DEBUG
"IPv6: sending pkt_too_big to self\n");
242 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, skb
->dev
);
243 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
249 * To avoid extra problems ND packets are send through this
250 * routine. It's code duplication but I really want to avoid
251 * extra checks since ipv6_build_header is used by TCP (which
252 * is for us performance critical)
255 int ip6_nd_hdr(struct sock
*sk
, struct sk_buff
*skb
, struct net_device
*dev
,
256 struct in6_addr
*saddr
, struct in6_addr
*daddr
,
259 struct ipv6_pinfo
*np
= inet6_sk(sk
);
263 skb
->protocol
= htons(ETH_P_IPV6
);
266 totlen
= len
+ sizeof(struct ipv6hdr
);
268 hdr
= (struct ipv6hdr
*) skb_put(skb
, sizeof(struct ipv6hdr
));
271 *(u32
*)hdr
= htonl(0x60000000);
273 hdr
->payload_len
= htons(len
);
274 hdr
->nexthdr
= proto
;
275 hdr
->hop_limit
= np
->hop_limit
;
277 ipv6_addr_copy(&hdr
->saddr
, saddr
);
278 ipv6_addr_copy(&hdr
->daddr
, daddr
);
283 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
285 struct ip6_ra_chain
*ra
;
286 struct sock
*last
= NULL
;
288 read_lock(&ip6_ra_lock
);
289 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
290 struct sock
*sk
= ra
->sk
;
291 if (sk
&& ra
->sel
== sel
&&
292 (!sk
->sk_bound_dev_if
||
293 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
295 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
297 rawv6_rcv(last
, skb2
);
304 rawv6_rcv(last
, skb
);
305 read_unlock(&ip6_ra_lock
);
308 read_unlock(&ip6_ra_lock
);
312 static inline int ip6_forward_finish(struct sk_buff
*skb
)
314 return dst_output(skb
);
317 int ip6_forward(struct sk_buff
*skb
)
319 struct dst_entry
*dst
= skb
->dst
;
320 struct ipv6hdr
*hdr
= skb
->nh
.ipv6h
;
321 struct inet6_skb_parm
*opt
= IP6CB(skb
);
323 if (ipv6_devconf
.forwarding
== 0)
326 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
327 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS
);
331 skb
->ip_summed
= CHECKSUM_NONE
;
334 * We DO NOT make any processing on
335 * RA packets, pushing them to user level AS IS
336 * without ane WARRANTY that application will be able
337 * to interpret them. The reason is that we
338 * cannot make anything clever here.
340 * We are not end-node, so that if packet contains
341 * AH/ESP, we cannot make anything.
342 * Defragmentation also would be mistake, RA packets
343 * cannot be fragmented, because there is no warranty
344 * that different fragments will go along one path. --ANK
347 u8
*ptr
= skb
->nh
.raw
+ opt
->ra
;
348 if (ip6_call_ra_chain(skb
, (ptr
[2]<<8) + ptr
[3]))
353 * check and decrement ttl
355 if (hdr
->hop_limit
<= 1) {
356 /* Force OUTPUT device used as source address */
358 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
365 if (!xfrm6_route_forward(skb
)) {
366 IP6_INC_STATS(IPSTATS_MIB_INDISCARDS
);
371 /* IPv6 specs say nothing about it, but it is clear that we cannot
372 send redirects to source routed frames.
374 if (skb
->dev
== dst
->dev
&& dst
->neighbour
&& opt
->srcrt
== 0) {
375 struct in6_addr
*target
= NULL
;
377 struct neighbour
*n
= dst
->neighbour
;
380 * incoming and outgoing devices are the same
384 rt
= (struct rt6_info
*) dst
;
385 if ((rt
->rt6i_flags
& RTF_GATEWAY
))
386 target
= (struct in6_addr
*)&n
->primary_key
;
388 target
= &hdr
->daddr
;
390 /* Limit redirects both by destination (here)
391 and by source (inside ndisc_send_redirect)
393 if (xrlim_allow(dst
, 1*HZ
))
394 ndisc_send_redirect(skb
, n
, target
);
395 } else if (ipv6_addr_type(&hdr
->saddr
)&(IPV6_ADDR_MULTICAST
|IPV6_ADDR_LOOPBACK
396 |IPV6_ADDR_LINKLOCAL
)) {
397 /* This check is security critical. */
401 if (skb
->len
> dst_mtu(dst
)) {
402 /* Again, force OUTPUT device used as source address */
404 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, dst_mtu(dst
), skb
->dev
);
405 IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS
);
406 IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS
);
411 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
412 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
418 /* Mangling hops number delayed to point after skb COW */
422 IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS
);
423 return NF_HOOK(PF_INET6
,NF_IP6_FORWARD
, skb
, skb
->dev
, dst
->dev
, ip6_forward_finish
);
426 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
432 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
434 to
->pkt_type
= from
->pkt_type
;
435 to
->priority
= from
->priority
;
436 to
->protocol
= from
->protocol
;
437 dst_release(to
->dst
);
438 to
->dst
= dst_clone(from
->dst
);
441 #ifdef CONFIG_NET_SCHED
442 to
->tc_index
= from
->tc_index
;
444 #ifdef CONFIG_NETFILTER
445 to
->nfmark
= from
->nfmark
;
446 /* Connection association is same as pre-frag packet */
447 nf_conntrack_put(to
->nfct
);
448 to
->nfct
= from
->nfct
;
449 nf_conntrack_get(to
->nfct
);
450 to
->nfctinfo
= from
->nfctinfo
;
451 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
452 nf_conntrack_put_reasm(to
->nfct_reasm
);
453 to
->nfct_reasm
= from
->nfct_reasm
;
454 nf_conntrack_get_reasm(to
->nfct_reasm
);
456 #ifdef CONFIG_BRIDGE_NETFILTER
457 nf_bridge_put(to
->nf_bridge
);
458 to
->nf_bridge
= from
->nf_bridge
;
459 nf_bridge_get(to
->nf_bridge
);
464 int ip6_find_1stfragopt(struct sk_buff
*skb
, u8
**nexthdr
)
466 u16 offset
= sizeof(struct ipv6hdr
);
467 struct ipv6_opt_hdr
*exthdr
= (struct ipv6_opt_hdr
*)(skb
->nh
.ipv6h
+ 1);
468 unsigned int packet_len
= skb
->tail
- skb
->nh
.raw
;
470 *nexthdr
= &skb
->nh
.ipv6h
->nexthdr
;
472 while (offset
+ 1 <= packet_len
) {
477 case NEXTHDR_ROUTING
:
479 if (**nexthdr
== NEXTHDR_ROUTING
) found_rhdr
= 1;
480 if (**nexthdr
== NEXTHDR_DEST
&& found_rhdr
) return offset
;
481 offset
+= ipv6_optlen(exthdr
);
482 *nexthdr
= &exthdr
->nexthdr
;
483 exthdr
= (struct ipv6_opt_hdr
*)(skb
->nh
.raw
+ offset
);
492 EXPORT_SYMBOL_GPL(ip6_find_1stfragopt
);
494 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
496 struct net_device
*dev
;
497 struct sk_buff
*frag
;
498 struct rt6_info
*rt
= (struct rt6_info
*)skb
->dst
;
499 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
500 struct ipv6hdr
*tmp_hdr
;
502 unsigned int mtu
, hlen
, left
, len
;
504 int ptr
, offset
= 0, err
=0;
505 u8
*prevhdr
, nexthdr
= 0;
508 hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
511 mtu
= dst_mtu(&rt
->u
.dst
);
512 if (np
&& np
->frag_size
< mtu
) {
516 mtu
-= hlen
+ sizeof(struct frag_hdr
);
518 if (skb_shinfo(skb
)->frag_list
) {
519 int first_len
= skb_pagelen(skb
);
521 if (first_len
- hlen
> mtu
||
522 ((first_len
- hlen
) & 7) ||
526 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
) {
527 /* Correct geometry. */
528 if (frag
->len
> mtu
||
529 ((frag
->len
& 7) && frag
->next
) ||
530 skb_headroom(frag
) < hlen
)
533 /* Partially cloned skb? */
534 if (skb_shared(frag
))
541 frag
->destructor
= sock_wfree
;
542 skb
->truesize
-= frag
->truesize
;
548 frag
= skb_shinfo(skb
)->frag_list
;
549 skb_shinfo(skb
)->frag_list
= NULL
;
552 tmp_hdr
= kmalloc(hlen
, GFP_ATOMIC
);
554 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
558 *prevhdr
= NEXTHDR_FRAGMENT
;
559 memcpy(tmp_hdr
, skb
->nh
.raw
, hlen
);
560 __skb_pull(skb
, hlen
);
561 fh
= (struct frag_hdr
*)__skb_push(skb
, sizeof(struct frag_hdr
));
562 skb
->nh
.raw
= __skb_push(skb
, hlen
);
563 memcpy(skb
->nh
.raw
, tmp_hdr
, hlen
);
565 ipv6_select_ident(skb
, fh
);
566 fh
->nexthdr
= nexthdr
;
568 fh
->frag_off
= htons(IP6_MF
);
569 frag_id
= fh
->identification
;
571 first_len
= skb_pagelen(skb
);
572 skb
->data_len
= first_len
- skb_headlen(skb
);
573 skb
->len
= first_len
;
574 skb
->nh
.ipv6h
->payload_len
= htons(first_len
- sizeof(struct ipv6hdr
));
578 /* Prepare header of the next frame,
579 * before previous one went down. */
581 frag
->ip_summed
= CHECKSUM_NONE
;
582 frag
->h
.raw
= frag
->data
;
583 fh
= (struct frag_hdr
*)__skb_push(frag
, sizeof(struct frag_hdr
));
584 frag
->nh
.raw
= __skb_push(frag
, hlen
);
585 memcpy(frag
->nh
.raw
, tmp_hdr
, hlen
);
586 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
587 fh
->nexthdr
= nexthdr
;
589 fh
->frag_off
= htons(offset
);
590 if (frag
->next
!= NULL
)
591 fh
->frag_off
|= htons(IP6_MF
);
592 fh
->identification
= frag_id
;
593 frag
->nh
.ipv6h
->payload_len
= htons(frag
->len
- sizeof(struct ipv6hdr
));
594 ip6_copy_metadata(frag
, skb
);
609 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS
);
619 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
624 left
= skb
->len
- hlen
; /* Space per frame */
625 ptr
= hlen
; /* Where to start from */
628 * Fragment the datagram.
631 *prevhdr
= NEXTHDR_FRAGMENT
;
634 * Keep copying data until we run out.
638 /* IF: it doesn't fit, use 'mtu' - the data space left */
641 /* IF: we are not sending upto and including the packet end
642 then align the next start on an eight byte boundary */
650 if ((frag
= alloc_skb(len
+hlen
+sizeof(struct frag_hdr
)+LL_RESERVED_SPACE(rt
->u
.dst
.dev
), GFP_ATOMIC
)) == NULL
) {
651 NETDEBUG(KERN_INFO
"IPv6: frag: no memory for new fragment!\n");
652 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
658 * Set up data on packet
661 ip6_copy_metadata(frag
, skb
);
662 skb_reserve(frag
, LL_RESERVED_SPACE(rt
->u
.dst
.dev
));
663 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
664 frag
->nh
.raw
= frag
->data
;
665 fh
= (struct frag_hdr
*)(frag
->data
+ hlen
);
666 frag
->h
.raw
= frag
->data
+ hlen
+ sizeof(struct frag_hdr
);
669 * Charge the memory for the fragment to any owner
673 skb_set_owner_w(frag
, skb
->sk
);
676 * Copy the packet header into the new buffer.
678 memcpy(frag
->nh
.raw
, skb
->data
, hlen
);
681 * Build fragment header.
683 fh
->nexthdr
= nexthdr
;
686 ipv6_select_ident(skb
, fh
);
687 frag_id
= fh
->identification
;
689 fh
->identification
= frag_id
;
692 * Copy a block of the IP datagram.
694 if (skb_copy_bits(skb
, ptr
, frag
->h
.raw
, len
))
698 fh
->frag_off
= htons(offset
);
700 fh
->frag_off
|= htons(IP6_MF
);
701 frag
->nh
.ipv6h
->payload_len
= htons(frag
->len
- sizeof(struct ipv6hdr
));
707 * Put this fragment into the sending queue.
710 IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES
);
717 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS
);
722 IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
726 int ip6_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi
*fl
)
732 struct ipv6_pinfo
*np
= inet6_sk(sk
);
734 *dst
= sk_dst_check(sk
, np
->dst_cookie
);
736 struct rt6_info
*rt
= (struct rt6_info
*)*dst
;
738 /* Yes, checking route validity in not connected
739 * case is not very simple. Take into account,
740 * that we do not support routing by source, TOS,
741 * and MSG_DONTROUTE --ANK (980726)
743 * 1. If route was host route, check that
744 * cached destination is current.
745 * If it is network route, we still may
746 * check its validity using saved pointer
747 * to the last used address: daddr_cache.
748 * We do not want to save whole address now,
749 * (because main consumer of this service
750 * is tcp, which has not this problem),
751 * so that the last trick works only on connected
753 * 2. oif also should be the same.
755 if (((rt
->rt6i_dst
.plen
!= 128 ||
756 !ipv6_addr_equal(&fl
->fl6_dst
,
758 && (np
->daddr_cache
== NULL
||
759 !ipv6_addr_equal(&fl
->fl6_dst
,
761 || (fl
->oif
&& fl
->oif
!= (*dst
)->dev
->ifindex
)) {
769 *dst
= ip6_route_output(sk
, fl
);
771 if ((err
= (*dst
)->error
))
772 goto out_err_release
;
774 if (ipv6_addr_any(&fl
->fl6_src
)) {
775 err
= ipv6_get_saddr(*dst
, &fl
->fl6_dst
, &fl
->fl6_src
);
778 goto out_err_release
;
789 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
791 static inline int ip6_ufo_append_data(struct sock
*sk
,
792 int getfrag(void *from
, char *to
, int offset
, int len
,
793 int odd
, struct sk_buff
*skb
),
794 void *from
, int length
, int hh_len
, int fragheaderlen
,
795 int transhdrlen
, int mtu
,unsigned int flags
)
801 /* There is support for UDP large send offload by network
802 * device, so create one single skb packet containing complete
805 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
806 skb
= sock_alloc_send_skb(sk
,
807 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
808 (flags
& MSG_DONTWAIT
), &err
);
812 /* reserve space for Hardware header */
813 skb_reserve(skb
, hh_len
);
815 /* create space for UDP/IP header */
816 skb_put(skb
,fragheaderlen
+ transhdrlen
);
818 /* initialize network header pointer */
819 skb
->nh
.raw
= skb
->data
;
821 /* initialize protocol header pointer */
822 skb
->h
.raw
= skb
->data
+ fragheaderlen
;
824 skb
->ip_summed
= CHECKSUM_HW
;
826 sk
->sk_sndmsg_off
= 0;
829 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
830 (length
- transhdrlen
));
832 struct frag_hdr fhdr
;
834 /* specify the length of each IP datagram fragment*/
835 skb_shinfo(skb
)->ufo_size
= (mtu
- fragheaderlen
) -
836 sizeof(struct frag_hdr
);
837 ipv6_select_ident(skb
, &fhdr
);
838 skb_shinfo(skb
)->ip6_frag_id
= fhdr
.identification
;
839 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
843 /* There is not enough support do UPD LSO,
844 * so follow normal path
851 int ip6_append_data(struct sock
*sk
, int getfrag(void *from
, char *to
,
852 int offset
, int len
, int odd
, struct sk_buff
*skb
),
853 void *from
, int length
, int transhdrlen
,
854 int hlimit
, int tclass
, struct ipv6_txoptions
*opt
, struct flowi
*fl
,
855 struct rt6_info
*rt
, unsigned int flags
)
857 struct inet_sock
*inet
= inet_sk(sk
);
858 struct ipv6_pinfo
*np
= inet6_sk(sk
);
860 unsigned int maxfraglen
, fragheaderlen
;
867 int csummode
= CHECKSUM_NONE
;
871 if (skb_queue_empty(&sk
->sk_write_queue
)) {
876 if (np
->cork
.opt
== NULL
) {
877 np
->cork
.opt
= kmalloc(opt
->tot_len
,
879 if (unlikely(np
->cork
.opt
== NULL
))
881 } else if (np
->cork
.opt
->tot_len
< opt
->tot_len
) {
882 printk(KERN_DEBUG
"ip6_append_data: invalid option length\n");
885 memcpy(np
->cork
.opt
, opt
, opt
->tot_len
);
886 inet
->cork
.flags
|= IPCORK_OPT
;
887 /* need source address above miyazawa*/
889 dst_hold(&rt
->u
.dst
);
892 np
->cork
.hop_limit
= hlimit
;
893 np
->cork
.tclass
= tclass
;
894 mtu
= dst_mtu(rt
->u
.dst
.path
);
895 if (np
->frag_size
< mtu
) {
899 inet
->cork
.fragsize
= mtu
;
900 if (dst_allfrag(rt
->u
.dst
.path
))
901 inet
->cork
.flags
|= IPCORK_ALLFRAG
;
902 inet
->cork
.length
= 0;
903 sk
->sk_sndmsg_page
= NULL
;
904 sk
->sk_sndmsg_off
= 0;
905 exthdrlen
= rt
->u
.dst
.header_len
+ (opt
? opt
->opt_flen
: 0);
907 transhdrlen
+= exthdrlen
;
911 if (inet
->cork
.flags
& IPCORK_OPT
)
915 mtu
= inet
->cork
.fragsize
;
918 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
920 fragheaderlen
= sizeof(struct ipv6hdr
) + (opt
? opt
->opt_nflen
: 0);
921 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
- sizeof(struct frag_hdr
);
923 if (mtu
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
) {
924 if (inet
->cork
.length
+ length
> sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
- fragheaderlen
) {
925 ipv6_local_error(sk
, EMSGSIZE
, fl
, mtu
-exthdrlen
);
931 * Let's try using as much space as possible.
932 * Use MTU if total length of the message fits into the MTU.
933 * Otherwise, we need to reserve fragment header and
934 * fragment alignment (= 8-15 octects, in total).
936 * Note that we may need to "move" the data from the tail of
937 * of the buffer to the new fragment when we split
940 * FIXME: It may be fragmented into multiple chunks
941 * at once if non-fragmentable extension headers
946 inet
->cork
.length
+= length
;
947 if (((length
> mtu
) && (sk
->sk_protocol
== IPPROTO_UDP
)) &&
948 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
950 err
= ip6_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
951 fragheaderlen
, transhdrlen
, mtu
,
958 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
962 /* Check if the remaining data fits into current packet. */
963 copy
= (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
965 copy
= maxfraglen
- skb
->len
;
969 unsigned int datalen
;
970 unsigned int fraglen
;
971 unsigned int fraggap
;
972 unsigned int alloclen
;
973 struct sk_buff
*skb_prev
;
977 /* There's no room in the current skb */
979 fraggap
= skb_prev
->len
- maxfraglen
;
984 * If remaining data exceeds the mtu,
985 * we know we need more fragment(s).
987 datalen
= length
+ fraggap
;
988 if (datalen
> (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
989 datalen
= maxfraglen
- fragheaderlen
;
991 fraglen
= datalen
+ fragheaderlen
;
992 if ((flags
& MSG_MORE
) &&
993 !(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
996 alloclen
= datalen
+ fragheaderlen
;
999 * The last fragment gets additional space at tail.
1000 * Note: we overallocate on fragments with MSG_MODE
1001 * because we have no idea if we're the last one.
1003 if (datalen
== length
+ fraggap
)
1004 alloclen
+= rt
->u
.dst
.trailer_len
;
1007 * We just reserve space for fragment header.
1008 * Note: this may be overallocation if the message
1009 * (without MSG_MORE) fits into the MTU.
1011 alloclen
+= sizeof(struct frag_hdr
);
1014 skb
= sock_alloc_send_skb(sk
,
1016 (flags
& MSG_DONTWAIT
), &err
);
1019 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1021 skb
= sock_wmalloc(sk
,
1022 alloclen
+ hh_len
, 1,
1024 if (unlikely(skb
== NULL
))
1030 * Fill in the control structures
1032 skb
->ip_summed
= csummode
;
1034 /* reserve for fragmentation */
1035 skb_reserve(skb
, hh_len
+sizeof(struct frag_hdr
));
1038 * Find where to start putting bytes
1040 data
= skb_put(skb
, fraglen
);
1041 skb
->nh
.raw
= data
+ exthdrlen
;
1042 data
+= fragheaderlen
;
1043 skb
->h
.raw
= data
+ exthdrlen
;
1046 skb
->csum
= skb_copy_and_csum_bits(
1047 skb_prev
, maxfraglen
,
1048 data
+ transhdrlen
, fraggap
, 0);
1049 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1052 skb_trim(skb_prev
, maxfraglen
);
1054 copy
= datalen
- transhdrlen
- fraggap
;
1059 } else if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1066 length
-= datalen
- fraggap
;
1069 csummode
= CHECKSUM_NONE
;
1072 * Put the packet on the pending queue
1074 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1081 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
)) {
1085 if (getfrag(from
, skb_put(skb
, copy
),
1086 offset
, copy
, off
, skb
) < 0) {
1087 __skb_trim(skb
, off
);
1092 int i
= skb_shinfo(skb
)->nr_frags
;
1093 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1094 struct page
*page
= sk
->sk_sndmsg_page
;
1095 int off
= sk
->sk_sndmsg_off
;
1098 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1101 if (page
!= frag
->page
) {
1102 if (i
== MAX_SKB_FRAGS
) {
1107 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1108 frag
= &skb_shinfo(skb
)->frags
[i
];
1110 } else if(i
< MAX_SKB_FRAGS
) {
1111 if (copy
> PAGE_SIZE
)
1113 page
= alloc_pages(sk
->sk_allocation
, 0);
1118 sk
->sk_sndmsg_page
= page
;
1119 sk
->sk_sndmsg_off
= 0;
1121 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1122 frag
= &skb_shinfo(skb
)->frags
[i
];
1123 skb
->truesize
+= PAGE_SIZE
;
1124 atomic_add(PAGE_SIZE
, &sk
->sk_wmem_alloc
);
1129 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1133 sk
->sk_sndmsg_off
+= copy
;
1136 skb
->data_len
+= copy
;
1143 inet
->cork
.length
-= length
;
1144 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1148 int ip6_push_pending_frames(struct sock
*sk
)
1150 struct sk_buff
*skb
, *tmp_skb
;
1151 struct sk_buff
**tail_skb
;
1152 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1153 struct inet_sock
*inet
= inet_sk(sk
);
1154 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1155 struct ipv6hdr
*hdr
;
1156 struct ipv6_txoptions
*opt
= np
->cork
.opt
;
1157 struct rt6_info
*rt
= np
->cork
.rt
;
1158 struct flowi
*fl
= &inet
->cork
.fl
;
1159 unsigned char proto
= fl
->proto
;
1162 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1164 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1166 /* move skb->data to ip header from ext header */
1167 if (skb
->data
< skb
->nh
.raw
)
1168 __skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
1169 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1170 __skb_pull(tmp_skb
, skb
->h
.raw
- skb
->nh
.raw
);
1171 *tail_skb
= tmp_skb
;
1172 tail_skb
= &(tmp_skb
->next
);
1173 skb
->len
+= tmp_skb
->len
;
1174 skb
->data_len
+= tmp_skb
->len
;
1175 skb
->truesize
+= tmp_skb
->truesize
;
1176 __sock_put(tmp_skb
->sk
);
1177 tmp_skb
->destructor
= NULL
;
1181 ipv6_addr_copy(final_dst
, &fl
->fl6_dst
);
1182 __skb_pull(skb
, skb
->h
.raw
- skb
->nh
.raw
);
1183 if (opt
&& opt
->opt_flen
)
1184 ipv6_push_frag_opts(skb
, opt
, &proto
);
1185 if (opt
&& opt
->opt_nflen
)
1186 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
);
1188 skb
->nh
.ipv6h
= hdr
= (struct ipv6hdr
*) skb_push(skb
, sizeof(struct ipv6hdr
));
1190 *(u32
*)hdr
= fl
->fl6_flowlabel
|
1191 htonl(0x60000000 | ((int)np
->cork
.tclass
<< 20));
1193 if (skb
->len
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
)
1194 hdr
->payload_len
= htons(skb
->len
- sizeof(struct ipv6hdr
));
1196 hdr
->payload_len
= 0;
1197 hdr
->hop_limit
= np
->cork
.hop_limit
;
1198 hdr
->nexthdr
= proto
;
1199 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
1200 ipv6_addr_copy(&hdr
->daddr
, final_dst
);
1202 skb
->priority
= sk
->sk_priority
;
1204 skb
->dst
= dst_clone(&rt
->u
.dst
);
1205 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
1206 err
= NF_HOOK(PF_INET6
, NF_IP6_LOCAL_OUT
, skb
, NULL
, skb
->dst
->dev
, dst_output
);
1209 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1215 inet
->cork
.flags
&= ~IPCORK_OPT
;
1216 kfree(np
->cork
.opt
);
1217 np
->cork
.opt
= NULL
;
1219 dst_release(&np
->cork
.rt
->u
.dst
);
1221 inet
->cork
.flags
&= ~IPCORK_ALLFRAG
;
1223 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));
1229 void ip6_flush_pending_frames(struct sock
*sk
)
1231 struct inet_sock
*inet
= inet_sk(sk
);
1232 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1233 struct sk_buff
*skb
;
1235 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
) {
1236 IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1240 inet
->cork
.flags
&= ~IPCORK_OPT
;
1242 kfree(np
->cork
.opt
);
1243 np
->cork
.opt
= NULL
;
1245 dst_release(&np
->cork
.rt
->u
.dst
);
1247 inet
->cork
.flags
&= ~IPCORK_ALLFRAG
;
1249 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));