]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/ipv6/ip6_output.c
[NET_SCHED]: cls_fw: fix NULL pointer dereference
[mirror_ubuntu-zesty-kernel.git] / net / ipv6 / ip6_output.c
CommitLineData
1da177e4
LT
1/*
2 * IPv6 output functions
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on linux/net/ipv4/ip_output.c
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * Changes:
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
22 * etc.
23 *
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
28 * for datagram xmit
29 */
30
1da177e4
LT
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/socket.h>
35#include <linux/net.h>
36#include <linux/netdevice.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/tcp.h>
40#include <linux/route.h>
b59f45d0 41#include <linux/module.h>
1da177e4
LT
42
43#include <linux/netfilter.h>
44#include <linux/netfilter_ipv6.h>
45
46#include <net/sock.h>
47#include <net/snmp.h>
48
49#include <net/ipv6.h>
50#include <net/ndisc.h>
51#include <net/protocol.h>
52#include <net/ip6_route.h>
53#include <net/addrconf.h>
54#include <net/rawv6.h>
55#include <net/icmp.h>
56#include <net/xfrm.h>
57#include <net/checksum.h>
58
59static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60
61static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
62{
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
65
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
71}
72
73static inline int ip6_output_finish(struct sk_buff *skb)
74{
75
76 struct dst_entry *dst = skb->dst;
77 struct hh_cache *hh = dst->hh;
78
79 if (hh) {
80 int hh_alen;
81
82 read_lock_bh(&hh->hh_lock);
83 hh_alen = HH_DATA_ALIGN(hh->hh_len);
84 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
85 read_unlock_bh(&hh->hh_lock);
86 skb_push(skb, hh->hh_len);
87 return hh->hh_output(skb);
88 } else if (dst->neighbour)
89 return dst->neighbour->output(skb);
90
a11d206d 91 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
1da177e4
LT
92 kfree_skb(skb);
93 return -EINVAL;
94
95}
96
97/* dev_loopback_xmit for use with netfilter. */
98static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
99{
100 newskb->mac.raw = newskb->data;
101 __skb_pull(newskb, newskb->nh.raw - newskb->data);
102 newskb->pkt_type = PACKET_LOOPBACK;
103 newskb->ip_summed = CHECKSUM_UNNECESSARY;
104 BUG_TRAP(newskb->dst);
105
106 netif_rx(newskb);
107 return 0;
108}
109
110
111static int ip6_output2(struct sk_buff *skb)
112{
113 struct dst_entry *dst = skb->dst;
114 struct net_device *dev = dst->dev;
115
116 skb->protocol = htons(ETH_P_IPV6);
117 skb->dev = dev;
118
119 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
120 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
a11d206d 121 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
1da177e4
LT
122
123 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
124 ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
125 &skb->nh.ipv6h->saddr)) {
126 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
127
128 /* Do not check for IFF_ALLMULTI; multicast routing
129 is not supported in any case.
130 */
131 if (newskb)
132 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
133 newskb->dev,
134 ip6_dev_loopback_xmit);
135
136 if (skb->nh.ipv6h->hop_limit == 0) {
a11d206d 137 IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
138 kfree_skb(skb);
139 return 0;
140 }
141 }
142
a11d206d 143 IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
1da177e4
LT
144 }
145
146 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
147}
148
149int ip6_output(struct sk_buff *skb)
150{
89114afd 151 if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
e89e9cf5 152 dst_allfrag(skb->dst))
1da177e4
LT
153 return ip6_fragment(skb, ip6_output2);
154 else
155 return ip6_output2(skb);
156}
157
1da177e4
LT
158/*
159 * xmit an sk_buff (used by TCP)
160 */
161
162int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
163 struct ipv6_txoptions *opt, int ipfragok)
164{
b30bd282 165 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4
LT
166 struct in6_addr *first_hop = &fl->fl6_dst;
167 struct dst_entry *dst = skb->dst;
168 struct ipv6hdr *hdr;
169 u8 proto = fl->proto;
170 int seg_len = skb->len;
41a1f8ea 171 int hlimit, tclass;
1da177e4
LT
172 u32 mtu;
173
174 if (opt) {
175 int head_room;
176
177 /* First: exthdrs may take lots of space (~8K for now)
178 MAX_HEADER is not enough.
179 */
180 head_room = opt->opt_nflen + opt->opt_flen;
181 seg_len += head_room;
182 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
183
184 if (skb_headroom(skb) < head_room) {
185 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
a11d206d
YH
186 if (skb2 == NULL) {
187 IP6_INC_STATS(ip6_dst_idev(skb->dst),
188 IPSTATS_MIB_OUTDISCARDS);
189 kfree_skb(skb);
1da177e4
LT
190 return -ENOBUFS;
191 }
a11d206d
YH
192 kfree_skb(skb);
193 skb = skb2;
1da177e4
LT
194 if (sk)
195 skb_set_owner_w(skb, sk);
196 }
197 if (opt->opt_flen)
198 ipv6_push_frag_opts(skb, opt, &proto);
199 if (opt->opt_nflen)
200 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
201 }
202
203 hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
204
205 /*
206 * Fill in the IPv6 header
207 */
208
1da177e4
LT
209 hlimit = -1;
210 if (np)
211 hlimit = np->hop_limit;
212 if (hlimit < 0)
213 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
214 if (hlimit < 0)
215 hlimit = ipv6_get_hoplimit(dst->dev);
216
41a1f8ea
YH
217 tclass = -1;
218 if (np)
219 tclass = np->tclass;
220 if (tclass < 0)
221 tclass = 0;
222
90bcaf7b 223 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
41a1f8ea 224
1da177e4
LT
225 hdr->payload_len = htons(seg_len);
226 hdr->nexthdr = proto;
227 hdr->hop_limit = hlimit;
228
229 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
230 ipv6_addr_copy(&hdr->daddr, first_hop);
231
a2c2064f
PM
232 skb->priority = sk->sk_priority;
233
1da177e4 234 mtu = dst_mtu(dst);
89114afd 235 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
a11d206d
YH
236 IP6_INC_STATS(ip6_dst_idev(skb->dst),
237 IPSTATS_MIB_OUTREQUESTS);
6869c4d8
HW
238 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
239 dst_output);
1da177e4
LT
240 }
241
242 if (net_ratelimit())
243 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
244 skb->dev = dst->dev;
245 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
a11d206d 246 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
247 kfree_skb(skb);
248 return -EMSGSIZE;
249}
250
251/*
252 * To avoid extra problems ND packets are send through this
253 * routine. It's code duplication but I really want to avoid
254 * extra checks since ipv6_build_header is used by TCP (which
255 * is for us performance critical)
256 */
257
258int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
259 struct in6_addr *saddr, struct in6_addr *daddr,
260 int proto, int len)
261{
262 struct ipv6_pinfo *np = inet6_sk(sk);
263 struct ipv6hdr *hdr;
264 int totlen;
265
266 skb->protocol = htons(ETH_P_IPV6);
267 skb->dev = dev;
268
269 totlen = len + sizeof(struct ipv6hdr);
270
271 hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
272 skb->nh.ipv6h = hdr;
273
ae08e1f0 274 *(__be32*)hdr = htonl(0x60000000);
1da177e4
LT
275
276 hdr->payload_len = htons(len);
277 hdr->nexthdr = proto;
278 hdr->hop_limit = np->hop_limit;
279
280 ipv6_addr_copy(&hdr->saddr, saddr);
281 ipv6_addr_copy(&hdr->daddr, daddr);
282
283 return 0;
284}
285
286static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
287{
288 struct ip6_ra_chain *ra;
289 struct sock *last = NULL;
290
291 read_lock(&ip6_ra_lock);
292 for (ra = ip6_ra_chain; ra; ra = ra->next) {
293 struct sock *sk = ra->sk;
0bd1b59b
AM
294 if (sk && ra->sel == sel &&
295 (!sk->sk_bound_dev_if ||
296 sk->sk_bound_dev_if == skb->dev->ifindex)) {
1da177e4
LT
297 if (last) {
298 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
299 if (skb2)
300 rawv6_rcv(last, skb2);
301 }
302 last = sk;
303 }
304 }
305
306 if (last) {
307 rawv6_rcv(last, skb);
308 read_unlock(&ip6_ra_lock);
309 return 1;
310 }
311 read_unlock(&ip6_ra_lock);
312 return 0;
313}
314
e21e0b5f
VN
315static int ip6_forward_proxy_check(struct sk_buff *skb)
316{
317 struct ipv6hdr *hdr = skb->nh.ipv6h;
318 u8 nexthdr = hdr->nexthdr;
319 int offset;
320
321 if (ipv6_ext_hdr(nexthdr)) {
322 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
323 if (offset < 0)
324 return 0;
325 } else
326 offset = sizeof(struct ipv6hdr);
327
328 if (nexthdr == IPPROTO_ICMPV6) {
329 struct icmp6hdr *icmp6;
330
331 if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
332 return 0;
333
334 icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
335
336 switch (icmp6->icmp6_type) {
337 case NDISC_ROUTER_SOLICITATION:
338 case NDISC_ROUTER_ADVERTISEMENT:
339 case NDISC_NEIGHBOUR_SOLICITATION:
340 case NDISC_NEIGHBOUR_ADVERTISEMENT:
341 case NDISC_REDIRECT:
342 /* For reaction involving unicast neighbor discovery
343 * message destined to the proxied address, pass it to
344 * input function.
345 */
346 return 1;
347 default:
348 break;
349 }
350 }
351
74553b09
VN
352 /*
353 * The proxying router can't forward traffic sent to a link-local
354 * address, so signal the sender and discard the packet. This
355 * behavior is clarified by the MIPv6 specification.
356 */
357 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
358 dst_link_failure(skb);
359 return -1;
360 }
361
e21e0b5f
VN
362 return 0;
363}
364
1da177e4
LT
365static inline int ip6_forward_finish(struct sk_buff *skb)
366{
367 return dst_output(skb);
368}
369
370int ip6_forward(struct sk_buff *skb)
371{
372 struct dst_entry *dst = skb->dst;
373 struct ipv6hdr *hdr = skb->nh.ipv6h;
374 struct inet6_skb_parm *opt = IP6CB(skb);
375
376 if (ipv6_devconf.forwarding == 0)
377 goto error;
378
379 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
a11d206d 380 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
1da177e4
LT
381 goto drop;
382 }
383
384 skb->ip_summed = CHECKSUM_NONE;
385
386 /*
387 * We DO NOT make any processing on
388 * RA packets, pushing them to user level AS IS
389 * without ane WARRANTY that application will be able
390 * to interpret them. The reason is that we
391 * cannot make anything clever here.
392 *
393 * We are not end-node, so that if packet contains
394 * AH/ESP, we cannot make anything.
395 * Defragmentation also would be mistake, RA packets
396 * cannot be fragmented, because there is no warranty
397 * that different fragments will go along one path. --ANK
398 */
399 if (opt->ra) {
400 u8 *ptr = skb->nh.raw + opt->ra;
401 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
402 return 0;
403 }
404
405 /*
406 * check and decrement ttl
407 */
408 if (hdr->hop_limit <= 1) {
409 /* Force OUTPUT device used as source address */
410 skb->dev = dst->dev;
411 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
412 0, skb->dev);
a11d206d 413 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
1da177e4
LT
414
415 kfree_skb(skb);
416 return -ETIMEDOUT;
417 }
418
fbea49e1
YH
419 /* XXX: idev->cnf.proxy_ndp? */
420 if (ipv6_devconf.proxy_ndp &&
421 pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
74553b09
VN
422 int proxied = ip6_forward_proxy_check(skb);
423 if (proxied > 0)
e21e0b5f 424 return ip6_input(skb);
74553b09 425 else if (proxied < 0) {
a11d206d 426 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
74553b09
VN
427 goto drop;
428 }
e21e0b5f
VN
429 }
430
1da177e4 431 if (!xfrm6_route_forward(skb)) {
a11d206d 432 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
1da177e4
LT
433 goto drop;
434 }
435 dst = skb->dst;
436
437 /* IPv6 specs say nothing about it, but it is clear that we cannot
438 send redirects to source routed frames.
439 */
440 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
441 struct in6_addr *target = NULL;
442 struct rt6_info *rt;
443 struct neighbour *n = dst->neighbour;
444
445 /*
446 * incoming and outgoing devices are the same
447 * send a redirect.
448 */
449
450 rt = (struct rt6_info *) dst;
451 if ((rt->rt6i_flags & RTF_GATEWAY))
452 target = (struct in6_addr*)&n->primary_key;
453 else
454 target = &hdr->daddr;
455
456 /* Limit redirects both by destination (here)
457 and by source (inside ndisc_send_redirect)
458 */
459 if (xrlim_allow(dst, 1*HZ))
460 ndisc_send_redirect(skb, n, target);
461 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
462 |IPV6_ADDR_LINKLOCAL)) {
463 /* This check is security critical. */
464 goto error;
465 }
466
467 if (skb->len > dst_mtu(dst)) {
468 /* Again, force OUTPUT device used as source address */
469 skb->dev = dst->dev;
470 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
a11d206d
YH
471 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
472 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
473 kfree_skb(skb);
474 return -EMSGSIZE;
475 }
476
477 if (skb_cow(skb, dst->dev->hard_header_len)) {
a11d206d 478 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
479 goto drop;
480 }
481
482 hdr = skb->nh.ipv6h;
483
484 /* Mangling hops number delayed to point after skb COW */
485
486 hdr->hop_limit--;
487
a11d206d 488 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
1da177e4
LT
489 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
490
491error:
a11d206d 492 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
1da177e4
LT
493drop:
494 kfree_skb(skb);
495 return -EINVAL;
496}
497
498static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
499{
500 to->pkt_type = from->pkt_type;
501 to->priority = from->priority;
502 to->protocol = from->protocol;
1da177e4
LT
503 dst_release(to->dst);
504 to->dst = dst_clone(from->dst);
505 to->dev = from->dev;
82e91ffe 506 to->mark = from->mark;
1da177e4
LT
507
508#ifdef CONFIG_NET_SCHED
509 to->tc_index = from->tc_index;
510#endif
511#ifdef CONFIG_NETFILTER
1da177e4 512 /* Connection association is same as pre-frag packet */
9fb9cbb1 513 nf_conntrack_put(to->nfct);
1da177e4
LT
514 to->nfct = from->nfct;
515 nf_conntrack_get(to->nfct);
516 to->nfctinfo = from->nfctinfo;
9fb9cbb1
YK
517#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
518 nf_conntrack_put_reasm(to->nfct_reasm);
519 to->nfct_reasm = from->nfct_reasm;
520 nf_conntrack_get_reasm(to->nfct_reasm);
521#endif
1da177e4
LT
522#ifdef CONFIG_BRIDGE_NETFILTER
523 nf_bridge_put(to->nf_bridge);
524 to->nf_bridge = from->nf_bridge;
525 nf_bridge_get(to->nf_bridge);
526#endif
1da177e4 527#endif
984bc16c 528 skb_copy_secmark(to, from);
1da177e4
LT
529}
530
531int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
532{
533 u16 offset = sizeof(struct ipv6hdr);
534 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
535 unsigned int packet_len = skb->tail - skb->nh.raw;
536 int found_rhdr = 0;
537 *nexthdr = &skb->nh.ipv6h->nexthdr;
538
539 while (offset + 1 <= packet_len) {
540
541 switch (**nexthdr) {
542
543 case NEXTHDR_HOP:
27637df9 544 break;
1da177e4 545 case NEXTHDR_ROUTING:
27637df9
MN
546 found_rhdr = 1;
547 break;
1da177e4 548 case NEXTHDR_DEST:
27637df9
MN
549#ifdef CONFIG_IPV6_MIP6
550 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
551 break;
552#endif
553 if (found_rhdr)
554 return offset;
1da177e4
LT
555 break;
556 default :
557 return offset;
558 }
27637df9
MN
559
560 offset += ipv6_optlen(exthdr);
561 *nexthdr = &exthdr->nexthdr;
562 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
1da177e4
LT
563 }
564
565 return offset;
566}
b59f45d0 567EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
1da177e4
LT
568
569static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
570{
571 struct net_device *dev;
572 struct sk_buff *frag;
573 struct rt6_info *rt = (struct rt6_info*)skb->dst;
d91675f9 574 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
1da177e4
LT
575 struct ipv6hdr *tmp_hdr;
576 struct frag_hdr *fh;
577 unsigned int mtu, hlen, left, len;
ae08e1f0 578 __be32 frag_id = 0;
1da177e4
LT
579 int ptr, offset = 0, err=0;
580 u8 *prevhdr, nexthdr = 0;
581
582 dev = rt->u.dst.dev;
583 hlen = ip6_find_1stfragopt(skb, &prevhdr);
584 nexthdr = *prevhdr;
585
d91675f9
YH
586 mtu = dst_mtu(&rt->u.dst);
587 if (np && np->frag_size < mtu) {
588 if (np->frag_size)
589 mtu = np->frag_size;
590 }
591 mtu -= hlen + sizeof(struct frag_hdr);
1da177e4
LT
592
593 if (skb_shinfo(skb)->frag_list) {
594 int first_len = skb_pagelen(skb);
595
596 if (first_len - hlen > mtu ||
597 ((first_len - hlen) & 7) ||
598 skb_cloned(skb))
599 goto slow_path;
600
601 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
602 /* Correct geometry. */
603 if (frag->len > mtu ||
604 ((frag->len & 7) && frag->next) ||
605 skb_headroom(frag) < hlen)
606 goto slow_path;
607
1da177e4
LT
608 /* Partially cloned skb? */
609 if (skb_shared(frag))
610 goto slow_path;
2fdba6b0
HX
611
612 BUG_ON(frag->sk);
613 if (skb->sk) {
614 sock_hold(skb->sk);
615 frag->sk = skb->sk;
616 frag->destructor = sock_wfree;
617 skb->truesize -= frag->truesize;
618 }
1da177e4
LT
619 }
620
621 err = 0;
622 offset = 0;
623 frag = skb_shinfo(skb)->frag_list;
624 skb_shinfo(skb)->frag_list = NULL;
625 /* BUILD HEADER */
626
af879cc7 627 tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
1da177e4 628 if (!tmp_hdr) {
a11d206d 629 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
630 return -ENOMEM;
631 }
632
633 *prevhdr = NEXTHDR_FRAGMENT;
1da177e4
LT
634 __skb_pull(skb, hlen);
635 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
636 skb->nh.raw = __skb_push(skb, hlen);
637 memcpy(skb->nh.raw, tmp_hdr, hlen);
638
639 ipv6_select_ident(skb, fh);
640 fh->nexthdr = nexthdr;
641 fh->reserved = 0;
642 fh->frag_off = htons(IP6_MF);
643 frag_id = fh->identification;
644
645 first_len = skb_pagelen(skb);
646 skb->data_len = first_len - skb_headlen(skb);
647 skb->len = first_len;
648 skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
a11d206d
YH
649
650 dst_hold(&rt->u.dst);
1da177e4
LT
651
652 for (;;) {
653 /* Prepare header of the next frame,
654 * before previous one went down. */
655 if (frag) {
656 frag->ip_summed = CHECKSUM_NONE;
657 frag->h.raw = frag->data;
658 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
659 frag->nh.raw = __skb_push(frag, hlen);
660 memcpy(frag->nh.raw, tmp_hdr, hlen);
661 offset += skb->len - hlen - sizeof(struct frag_hdr);
662 fh->nexthdr = nexthdr;
663 fh->reserved = 0;
664 fh->frag_off = htons(offset);
665 if (frag->next != NULL)
666 fh->frag_off |= htons(IP6_MF);
667 fh->identification = frag_id;
668 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
669 ip6_copy_metadata(frag, skb);
670 }
671
672 err = output(skb);
dafee490 673 if(!err)
a11d206d 674 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
dafee490 675
1da177e4
LT
676 if (err || !frag)
677 break;
678
679 skb = frag;
680 frag = skb->next;
681 skb->next = NULL;
682 }
683
a51482bd 684 kfree(tmp_hdr);
1da177e4
LT
685
686 if (err == 0) {
a11d206d
YH
687 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
688 dst_release(&rt->u.dst);
1da177e4
LT
689 return 0;
690 }
691
692 while (frag) {
693 skb = frag->next;
694 kfree_skb(frag);
695 frag = skb;
696 }
697
a11d206d
YH
698 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
699 dst_release(&rt->u.dst);
1da177e4
LT
700 return err;
701 }
702
703slow_path:
704 left = skb->len - hlen; /* Space per frame */
705 ptr = hlen; /* Where to start from */
706
707 /*
708 * Fragment the datagram.
709 */
710
711 *prevhdr = NEXTHDR_FRAGMENT;
712
713 /*
714 * Keep copying data until we run out.
715 */
716 while(left > 0) {
717 len = left;
718 /* IF: it doesn't fit, use 'mtu' - the data space left */
719 if (len > mtu)
720 len = mtu;
721 /* IF: we are not sending upto and including the packet end
722 then align the next start on an eight byte boundary */
723 if (len < left) {
724 len &= ~7;
725 }
726 /*
727 * Allocate buffer.
728 */
729
730 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
64ce2073 731 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
a11d206d
YH
732 IP6_INC_STATS(ip6_dst_idev(skb->dst),
733 IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
734 err = -ENOMEM;
735 goto fail;
736 }
737
738 /*
739 * Set up data on packet
740 */
741
742 ip6_copy_metadata(frag, skb);
743 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
744 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
745 frag->nh.raw = frag->data;
746 fh = (struct frag_hdr*)(frag->data + hlen);
747 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
748
749 /*
750 * Charge the memory for the fragment to any owner
751 * it might possess
752 */
753 if (skb->sk)
754 skb_set_owner_w(frag, skb->sk);
755
756 /*
757 * Copy the packet header into the new buffer.
758 */
759 memcpy(frag->nh.raw, skb->data, hlen);
760
761 /*
762 * Build fragment header.
763 */
764 fh->nexthdr = nexthdr;
765 fh->reserved = 0;
f36d6ab1 766 if (!frag_id) {
1da177e4
LT
767 ipv6_select_ident(skb, fh);
768 frag_id = fh->identification;
769 } else
770 fh->identification = frag_id;
771
772 /*
773 * Copy a block of the IP datagram.
774 */
775 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
776 BUG();
777 left -= len;
778
779 fh->frag_off = htons(offset);
780 if (left > 0)
781 fh->frag_off |= htons(IP6_MF);
782 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
783
784 ptr += len;
785 offset += len;
786
787 /*
788 * Put this fragment into the sending queue.
789 */
1da177e4
LT
790 err = output(frag);
791 if (err)
792 goto fail;
dafee490 793
a11d206d 794 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
1da177e4 795 }
a11d206d
YH
796 IP6_INC_STATS(ip6_dst_idev(skb->dst),
797 IPSTATS_MIB_FRAGOKS);
1da177e4 798 kfree_skb(skb);
1da177e4
LT
799 return err;
800
801fail:
a11d206d
YH
802 IP6_INC_STATS(ip6_dst_idev(skb->dst),
803 IPSTATS_MIB_FRAGFAILS);
1da177e4 804 kfree_skb(skb);
1da177e4
LT
805 return err;
806}
807
cf6b1982
YH
808static inline int ip6_rt_check(struct rt6key *rt_key,
809 struct in6_addr *fl_addr,
810 struct in6_addr *addr_cache)
811{
812 return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
813 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
814}
815
497c615a
HX
816static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
817 struct dst_entry *dst,
818 struct flowi *fl)
1da177e4 819{
497c615a
HX
820 struct ipv6_pinfo *np = inet6_sk(sk);
821 struct rt6_info *rt = (struct rt6_info *)dst;
1da177e4 822
497c615a
HX
823 if (!dst)
824 goto out;
825
826 /* Yes, checking route validity in not connected
827 * case is not very simple. Take into account,
828 * that we do not support routing by source, TOS,
829 * and MSG_DONTROUTE --ANK (980726)
830 *
cf6b1982
YH
831 * 1. ip6_rt_check(): If route was host route,
832 * check that cached destination is current.
497c615a
HX
833 * If it is network route, we still may
834 * check its validity using saved pointer
835 * to the last used address: daddr_cache.
836 * We do not want to save whole address now,
837 * (because main consumer of this service
838 * is tcp, which has not this problem),
839 * so that the last trick works only on connected
840 * sockets.
841 * 2. oif also should be the same.
842 */
cf6b1982 843 if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
8e1ef0a9
YH
844#ifdef CONFIG_IPV6_SUBTREES
845 ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
846#endif
cf6b1982 847 (fl->oif && fl->oif != dst->dev->ifindex)) {
497c615a
HX
848 dst_release(dst);
849 dst = NULL;
1da177e4
LT
850 }
851
497c615a
HX
852out:
853 return dst;
854}
855
856static int ip6_dst_lookup_tail(struct sock *sk,
857 struct dst_entry **dst, struct flowi *fl)
858{
859 int err;
860
1da177e4
LT
861 if (*dst == NULL)
862 *dst = ip6_route_output(sk, fl);
863
864 if ((err = (*dst)->error))
865 goto out_err_release;
866
867 if (ipv6_addr_any(&fl->fl6_src)) {
868 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
44456d37 869 if (err)
1da177e4 870 goto out_err_release;
1da177e4
LT
871 }
872
873 return 0;
874
875out_err_release:
876 dst_release(*dst);
877 *dst = NULL;
878 return err;
879}
34a0b3cd 880
497c615a
HX
881/**
882 * ip6_dst_lookup - perform route lookup on flow
883 * @sk: socket which provides route info
884 * @dst: pointer to dst_entry * for result
885 * @fl: flow to lookup
886 *
887 * This function performs a route lookup on the given flow.
888 *
889 * It returns zero on success, or a standard errno code on error.
890 */
891int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
892{
893 *dst = NULL;
894 return ip6_dst_lookup_tail(sk, dst, fl);
895}
3cf3dc6c
ACM
896EXPORT_SYMBOL_GPL(ip6_dst_lookup);
897
497c615a
HX
898/**
899 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
900 * @sk: socket which provides the dst cache and route info
901 * @dst: pointer to dst_entry * for result
902 * @fl: flow to lookup
903 *
904 * This function performs a route lookup on the given flow with the
905 * possibility of using the cached route in the socket if it is valid.
906 * It will take the socket dst lock when operating on the dst cache.
907 * As a result, this function can only be used in process context.
908 *
909 * It returns zero on success, or a standard errno code on error.
910 */
911int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
912{
913 *dst = NULL;
914 if (sk) {
915 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
916 *dst = ip6_sk_dst_check(sk, *dst, fl);
917 }
918
919 return ip6_dst_lookup_tail(sk, dst, fl);
920}
921EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
922
34a0b3cd 923static inline int ip6_ufo_append_data(struct sock *sk,
e89e9cf5
AR
924 int getfrag(void *from, char *to, int offset, int len,
925 int odd, struct sk_buff *skb),
926 void *from, int length, int hh_len, int fragheaderlen,
927 int transhdrlen, int mtu,unsigned int flags)
928
929{
930 struct sk_buff *skb;
931 int err;
932
933 /* There is support for UDP large send offload by network
934 * device, so create one single skb packet containing complete
935 * udp datagram
936 */
937 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
938 skb = sock_alloc_send_skb(sk,
939 hh_len + fragheaderlen + transhdrlen + 20,
940 (flags & MSG_DONTWAIT), &err);
941 if (skb == NULL)
942 return -ENOMEM;
943
944 /* reserve space for Hardware header */
945 skb_reserve(skb, hh_len);
946
947 /* create space for UDP/IP header */
948 skb_put(skb,fragheaderlen + transhdrlen);
949
950 /* initialize network header pointer */
951 skb->nh.raw = skb->data;
952
953 /* initialize protocol header pointer */
954 skb->h.raw = skb->data + fragheaderlen;
955
84fa7933 956 skb->ip_summed = CHECKSUM_PARTIAL;
e89e9cf5
AR
957 skb->csum = 0;
958 sk->sk_sndmsg_off = 0;
959 }
960
961 err = skb_append_datato_frags(sk,skb, getfrag, from,
962 (length - transhdrlen));
963 if (!err) {
964 struct frag_hdr fhdr;
965
966 /* specify the length of each IP datagram fragment*/
7967168c
HX
967 skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
968 sizeof(struct frag_hdr);
f83ef8c0 969 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
e89e9cf5
AR
970 ipv6_select_ident(skb, &fhdr);
971 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
972 __skb_queue_tail(&sk->sk_write_queue, skb);
973
974 return 0;
975 }
976 /* There is not enough support do UPD LSO,
977 * so follow normal path
978 */
979 kfree_skb(skb);
980
981 return err;
982}
1da177e4 983
41a1f8ea
YH
984int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
985 int offset, int len, int odd, struct sk_buff *skb),
986 void *from, int length, int transhdrlen,
987 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
988 struct rt6_info *rt, unsigned int flags)
1da177e4
LT
989{
990 struct inet_sock *inet = inet_sk(sk);
991 struct ipv6_pinfo *np = inet6_sk(sk);
992 struct sk_buff *skb;
993 unsigned int maxfraglen, fragheaderlen;
994 int exthdrlen;
995 int hh_len;
996 int mtu;
997 int copy;
998 int err;
999 int offset = 0;
1000 int csummode = CHECKSUM_NONE;
1001
1002 if (flags&MSG_PROBE)
1003 return 0;
1004 if (skb_queue_empty(&sk->sk_write_queue)) {
1005 /*
1006 * setup for corking
1007 */
1008 if (opt) {
1009 if (np->cork.opt == NULL) {
1010 np->cork.opt = kmalloc(opt->tot_len,
1011 sk->sk_allocation);
1012 if (unlikely(np->cork.opt == NULL))
1013 return -ENOBUFS;
1014 } else if (np->cork.opt->tot_len < opt->tot_len) {
1015 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1016 return -EINVAL;
1017 }
1018 memcpy(np->cork.opt, opt, opt->tot_len);
1019 inet->cork.flags |= IPCORK_OPT;
1020 /* need source address above miyazawa*/
1021 }
1022 dst_hold(&rt->u.dst);
1023 np->cork.rt = rt;
1024 inet->cork.fl = *fl;
1025 np->cork.hop_limit = hlimit;
41a1f8ea 1026 np->cork.tclass = tclass;
d91675f9 1027 mtu = dst_mtu(rt->u.dst.path);
c7503609 1028 if (np->frag_size < mtu) {
d91675f9
YH
1029 if (np->frag_size)
1030 mtu = np->frag_size;
1031 }
1032 inet->cork.fragsize = mtu;
1da177e4
LT
1033 if (dst_allfrag(rt->u.dst.path))
1034 inet->cork.flags |= IPCORK_ALLFRAG;
1035 inet->cork.length = 0;
1036 sk->sk_sndmsg_page = NULL;
1037 sk->sk_sndmsg_off = 0;
1038 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
1039 length += exthdrlen;
1040 transhdrlen += exthdrlen;
1041 } else {
1042 rt = np->cork.rt;
1043 fl = &inet->cork.fl;
1044 if (inet->cork.flags & IPCORK_OPT)
1045 opt = np->cork.opt;
1046 transhdrlen = 0;
1047 exthdrlen = 0;
1048 mtu = inet->cork.fragsize;
1049 }
1050
1051 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1052
1b5c2299 1053 fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
1da177e4
LT
1054 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1055
1056 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1057 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1058 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
1059 return -EMSGSIZE;
1060 }
1061 }
1062
1063 /*
1064 * Let's try using as much space as possible.
1065 * Use MTU if total length of the message fits into the MTU.
1066 * Otherwise, we need to reserve fragment header and
1067 * fragment alignment (= 8-15 octects, in total).
1068 *
1069 * Note that we may need to "move" the data from the tail of
1070 * of the buffer to the new fragment when we split
1071 * the message.
1072 *
1073 * FIXME: It may be fragmented into multiple chunks
1074 * at once if non-fragmentable extension headers
1075 * are too large.
1076 * --yoshfuji
1077 */
1078
1079 inet->cork.length += length;
e89e9cf5
AR
1080 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
1081 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1082
baa829d8
PM
1083 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
1084 fragheaderlen, transhdrlen, mtu,
1085 flags);
1086 if (err)
e89e9cf5 1087 goto error;
e89e9cf5
AR
1088 return 0;
1089 }
1da177e4
LT
1090
1091 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1092 goto alloc_new_skb;
1093
1094 while (length > 0) {
1095 /* Check if the remaining data fits into current packet. */
1096 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1097 if (copy < length)
1098 copy = maxfraglen - skb->len;
1099
1100 if (copy <= 0) {
1101 char *data;
1102 unsigned int datalen;
1103 unsigned int fraglen;
1104 unsigned int fraggap;
1105 unsigned int alloclen;
1106 struct sk_buff *skb_prev;
1107alloc_new_skb:
1108 skb_prev = skb;
1109
1110 /* There's no room in the current skb */
1111 if (skb_prev)
1112 fraggap = skb_prev->len - maxfraglen;
1113 else
1114 fraggap = 0;
1115
1116 /*
1117 * If remaining data exceeds the mtu,
1118 * we know we need more fragment(s).
1119 */
1120 datalen = length + fraggap;
1121 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1122 datalen = maxfraglen - fragheaderlen;
1123
1124 fraglen = datalen + fragheaderlen;
1125 if ((flags & MSG_MORE) &&
1126 !(rt->u.dst.dev->features&NETIF_F_SG))
1127 alloclen = mtu;
1128 else
1129 alloclen = datalen + fragheaderlen;
1130
1131 /*
1132 * The last fragment gets additional space at tail.
1133 * Note: we overallocate on fragments with MSG_MODE
1134 * because we have no idea if we're the last one.
1135 */
1136 if (datalen == length + fraggap)
1137 alloclen += rt->u.dst.trailer_len;
1138
1139 /*
1140 * We just reserve space for fragment header.
1141 * Note: this may be overallocation if the message
1142 * (without MSG_MORE) fits into the MTU.
1143 */
1144 alloclen += sizeof(struct frag_hdr);
1145
1146 if (transhdrlen) {
1147 skb = sock_alloc_send_skb(sk,
1148 alloclen + hh_len,
1149 (flags & MSG_DONTWAIT), &err);
1150 } else {
1151 skb = NULL;
1152 if (atomic_read(&sk->sk_wmem_alloc) <=
1153 2 * sk->sk_sndbuf)
1154 skb = sock_wmalloc(sk,
1155 alloclen + hh_len, 1,
1156 sk->sk_allocation);
1157 if (unlikely(skb == NULL))
1158 err = -ENOBUFS;
1159 }
1160 if (skb == NULL)
1161 goto error;
1162 /*
1163 * Fill in the control structures
1164 */
1165 skb->ip_summed = csummode;
1166 skb->csum = 0;
1167 /* reserve for fragmentation */
1168 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1169
1170 /*
1171 * Find where to start putting bytes
1172 */
1173 data = skb_put(skb, fraglen);
1174 skb->nh.raw = data + exthdrlen;
1175 data += fragheaderlen;
1176 skb->h.raw = data + exthdrlen;
1177
1178 if (fraggap) {
1179 skb->csum = skb_copy_and_csum_bits(
1180 skb_prev, maxfraglen,
1181 data + transhdrlen, fraggap, 0);
1182 skb_prev->csum = csum_sub(skb_prev->csum,
1183 skb->csum);
1184 data += fraggap;
e9fa4f7b 1185 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
1186 }
1187 copy = datalen - transhdrlen - fraggap;
1188 if (copy < 0) {
1189 err = -EINVAL;
1190 kfree_skb(skb);
1191 goto error;
1192 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1193 err = -EFAULT;
1194 kfree_skb(skb);
1195 goto error;
1196 }
1197
1198 offset += copy;
1199 length -= datalen - fraggap;
1200 transhdrlen = 0;
1201 exthdrlen = 0;
1202 csummode = CHECKSUM_NONE;
1203
1204 /*
1205 * Put the packet on the pending queue
1206 */
1207 __skb_queue_tail(&sk->sk_write_queue, skb);
1208 continue;
1209 }
1210
1211 if (copy > length)
1212 copy = length;
1213
1214 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1215 unsigned int off;
1216
1217 off = skb->len;
1218 if (getfrag(from, skb_put(skb, copy),
1219 offset, copy, off, skb) < 0) {
1220 __skb_trim(skb, off);
1221 err = -EFAULT;
1222 goto error;
1223 }
1224 } else {
1225 int i = skb_shinfo(skb)->nr_frags;
1226 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1227 struct page *page = sk->sk_sndmsg_page;
1228 int off = sk->sk_sndmsg_off;
1229 unsigned int left;
1230
1231 if (page && (left = PAGE_SIZE - off) > 0) {
1232 if (copy >= left)
1233 copy = left;
1234 if (page != frag->page) {
1235 if (i == MAX_SKB_FRAGS) {
1236 err = -EMSGSIZE;
1237 goto error;
1238 }
1239 get_page(page);
1240 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1241 frag = &skb_shinfo(skb)->frags[i];
1242 }
1243 } else if(i < MAX_SKB_FRAGS) {
1244 if (copy > PAGE_SIZE)
1245 copy = PAGE_SIZE;
1246 page = alloc_pages(sk->sk_allocation, 0);
1247 if (page == NULL) {
1248 err = -ENOMEM;
1249 goto error;
1250 }
1251 sk->sk_sndmsg_page = page;
1252 sk->sk_sndmsg_off = 0;
1253
1254 skb_fill_page_desc(skb, i, page, 0, 0);
1255 frag = &skb_shinfo(skb)->frags[i];
1256 skb->truesize += PAGE_SIZE;
1257 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1258 } else {
1259 err = -EMSGSIZE;
1260 goto error;
1261 }
1262 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1263 err = -EFAULT;
1264 goto error;
1265 }
1266 sk->sk_sndmsg_off += copy;
1267 frag->size += copy;
1268 skb->len += copy;
1269 skb->data_len += copy;
1270 }
1271 offset += copy;
1272 length -= copy;
1273 }
1274 return 0;
1275error:
1276 inet->cork.length -= length;
a11d206d 1277 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1278 return err;
1279}
1280
1281int ip6_push_pending_frames(struct sock *sk)
1282{
1283 struct sk_buff *skb, *tmp_skb;
1284 struct sk_buff **tail_skb;
1285 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1286 struct inet_sock *inet = inet_sk(sk);
1287 struct ipv6_pinfo *np = inet6_sk(sk);
1288 struct ipv6hdr *hdr;
1289 struct ipv6_txoptions *opt = np->cork.opt;
1290 struct rt6_info *rt = np->cork.rt;
1291 struct flowi *fl = &inet->cork.fl;
1292 unsigned char proto = fl->proto;
1293 int err = 0;
1294
1295 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1296 goto out;
1297 tail_skb = &(skb_shinfo(skb)->frag_list);
1298
1299 /* move skb->data to ip header from ext header */
1300 if (skb->data < skb->nh.raw)
1301 __skb_pull(skb, skb->nh.raw - skb->data);
1302 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1303 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1304 *tail_skb = tmp_skb;
1305 tail_skb = &(tmp_skb->next);
1306 skb->len += tmp_skb->len;
1307 skb->data_len += tmp_skb->len;
1da177e4
LT
1308 skb->truesize += tmp_skb->truesize;
1309 __sock_put(tmp_skb->sk);
1310 tmp_skb->destructor = NULL;
1311 tmp_skb->sk = NULL;
1da177e4
LT
1312 }
1313
1314 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1315 __skb_pull(skb, skb->h.raw - skb->nh.raw);
1316 if (opt && opt->opt_flen)
1317 ipv6_push_frag_opts(skb, opt, &proto);
1318 if (opt && opt->opt_nflen)
1319 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1320
1321 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1322
90bcaf7b 1323 *(__be32*)hdr = fl->fl6_flowlabel |
41a1f8ea 1324 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1da177e4
LT
1325
1326 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1327 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1328 else
1329 hdr->payload_len = 0;
1330 hdr->hop_limit = np->cork.hop_limit;
1331 hdr->nexthdr = proto;
1332 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1333 ipv6_addr_copy(&hdr->daddr, final_dst);
1334
a2c2064f
PM
1335 skb->priority = sk->sk_priority;
1336
1da177e4 1337 skb->dst = dst_clone(&rt->u.dst);
a11d206d 1338 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
1da177e4
LT
1339 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1340 if (err) {
1341 if (err > 0)
3320da89 1342 err = np->recverr ? net_xmit_errno(err) : 0;
1da177e4
LT
1343 if (err)
1344 goto error;
1345 }
1346
1347out:
1348 inet->cork.flags &= ~IPCORK_OPT;
a51482bd
JJ
1349 kfree(np->cork.opt);
1350 np->cork.opt = NULL;
1da177e4
LT
1351 if (np->cork.rt) {
1352 dst_release(&np->cork.rt->u.dst);
1353 np->cork.rt = NULL;
1354 inet->cork.flags &= ~IPCORK_ALLFRAG;
1355 }
1356 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1357 return err;
1358error:
1359 goto out;
1360}
1361
1362void ip6_flush_pending_frames(struct sock *sk)
1363{
1364 struct inet_sock *inet = inet_sk(sk);
1365 struct ipv6_pinfo *np = inet6_sk(sk);
1366 struct sk_buff *skb;
1367
1368 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
a11d206d
YH
1369 IP6_INC_STATS(ip6_dst_idev(skb->dst),
1370 IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1371 kfree_skb(skb);
1372 }
1373
1374 inet->cork.flags &= ~IPCORK_OPT;
1375
a51482bd
JJ
1376 kfree(np->cork.opt);
1377 np->cork.opt = NULL;
1da177e4
LT
1378 if (np->cork.rt) {
1379 dst_release(&np->cork.rt->u.dst);
1380 np->cork.rt = NULL;
1381 inet->cork.flags &= ~IPCORK_ALLFRAG;
1382 }
1383 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1384}