]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/ipv4/ip_output.c
ipv6: Use calculated 'neigh' instead of re-evaluating dst->neighbour
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / ip_output.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
e905a9ed 23 * Bradford Johnson: Fix faulty handling of some frames when
1da177e4
LT
24 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
e905a9ed
YH
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
1da177e4
LT
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
1da177e4
LT
50#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
a1f8e7f7 53#include <linux/highmem.h>
5a0e3ad6 54#include <linux/slab.h>
1da177e4
LT
55
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
cfacb057 70#include <net/xfrm.h>
1da177e4
LT
71#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
1da177e4
LT
75#include <net/checksum.h>
76#include <net/inetpeer.h>
1da177e4
LT
77#include <linux/igmp.h>
78#include <linux/netfilter_ipv4.h>
79#include <linux/netfilter_bridge.h>
80#include <linux/mroute.h>
81#include <linux/netlink.h>
6cbb0df7 82#include <linux/tcp.h>
1da177e4 83
ab32ea5d 84int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
323e126f 85EXPORT_SYMBOL(sysctl_ip_default_ttl);
1da177e4
LT
86
87/* Generate a checksum for an outgoing IP datagram. */
88__inline__ void ip_send_check(struct iphdr *iph)
89{
90 iph->check = 0;
91 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92}
4bc2f18b 93EXPORT_SYMBOL(ip_send_check);
1da177e4 94
c439cb2e
HX
95int __ip_local_out(struct sk_buff *skb)
96{
97 struct iphdr *iph = ip_hdr(skb);
98
99 iph->tot_len = htons(skb->len);
100 ip_send_check(iph);
9bbc768a
JE
101 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
102 skb_dst(skb)->dev, dst_output);
c439cb2e
HX
103}
104
105int ip_local_out(struct sk_buff *skb)
106{
107 int err;
108
109 err = __ip_local_out(skb);
110 if (likely(err == 1))
111 err = dst_output(skb);
112
113 return err;
114}
115EXPORT_SYMBOL_GPL(ip_local_out);
116
1da177e4
LT
117/* dev_loopback_xmit for use with netfilter. */
118static int ip_dev_loopback_xmit(struct sk_buff *newskb)
119{
459a98ed 120 skb_reset_mac_header(newskb);
bbe735e4 121 __skb_pull(newskb, skb_network_offset(newskb));
1da177e4
LT
122 newskb->pkt_type = PACKET_LOOPBACK;
123 newskb->ip_summed = CHECKSUM_UNNECESSARY;
adf30907 124 WARN_ON(!skb_dst(newskb));
e30b38c2 125 netif_rx_ni(newskb);
1da177e4
LT
126 return 0;
127}
128
129static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
130{
131 int ttl = inet->uc_ttl;
132
133 if (ttl < 0)
323e126f 134 ttl = ip4_dst_hoplimit(dst);
1da177e4
LT
135 return ttl;
136}
137
e905a9ed 138/*
1da177e4
LT
139 * Add an ip header to a skbuff and send it out.
140 *
141 */
142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
f6d8bd05 143 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
1da177e4
LT
144{
145 struct inet_sock *inet = inet_sk(sk);
511c3f92 146 struct rtable *rt = skb_rtable(skb);
1da177e4
LT
147 struct iphdr *iph;
148
149 /* Build the IP header. */
f6d8bd05 150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
8856dfa3 151 skb_reset_network_header(skb);
eddc9ec5 152 iph = ip_hdr(skb);
1da177e4
LT
153 iph->version = 4;
154 iph->ihl = 5;
155 iph->tos = inet->tos;
d8d1f30b 156 if (ip_dont_fragment(sk, &rt->dst))
1da177e4
LT
157 iph->frag_off = htons(IP_DF);
158 else
159 iph->frag_off = 0;
d8d1f30b 160 iph->ttl = ip_select_ttl(inet, &rt->dst);
dd927a26
DM
161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = saddr;
1da177e4 163 iph->protocol = sk->sk_protocol;
d8d1f30b 164 ip_select_ident(iph, &rt->dst, sk);
1da177e4 165
f6d8bd05
ED
166 if (opt && opt->opt.optlen) {
167 iph->ihl += opt->opt.optlen>>2;
168 ip_options_build(skb, &opt->opt, daddr, rt, 0);
1da177e4 169 }
1da177e4
LT
170
171 skb->priority = sk->sk_priority;
4a19ec58 172 skb->mark = sk->sk_mark;
1da177e4
LT
173
174 /* Send it out. */
c439cb2e 175 return ip_local_out(skb);
1da177e4 176}
d8c97a94
ACM
177EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
178
1da177e4
LT
179static inline int ip_finish_output2(struct sk_buff *skb)
180{
adf30907 181 struct dst_entry *dst = skb_dst(skb);
80787ebc 182 struct rtable *rt = (struct rtable *)dst;
1da177e4 183 struct net_device *dev = dst->dev;
c2636b4d 184 unsigned int hh_len = LL_RESERVED_SPACE(dev);
f6b72b62 185 struct neighbour *neigh;
1da177e4 186
edf391ff
NH
187 if (rt->rt_type == RTN_MULTICAST) {
188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
189 } else if (rt->rt_type == RTN_BROADCAST)
190 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
80787ebc 191
1da177e4 192 /* Be paranoid, rather than too clever. */
3b04ddde 193 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
1da177e4
LT
194 struct sk_buff *skb2;
195
196 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
197 if (skb2 == NULL) {
198 kfree_skb(skb);
199 return -ENOMEM;
200 }
201 if (skb->sk)
202 skb_set_owner_w(skb2, skb->sk);
203 kfree_skb(skb);
204 skb = skb2;
205 }
206
f6b72b62
DM
207 neigh = dst->neighbour;
208 if (neigh) {
209 struct hh_cache *hh = &neigh->hh;
210 if (hh->hh_len)
211 return neigh_hh_output(hh, skb);
212 else
fec8292d 213 return neigh->output(skb);
f6b72b62 214 }
1da177e4
LT
215 if (net_ratelimit())
216 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
217 kfree_skb(skb);
218 return -EINVAL;
219}
220
628a5c56
JH
221static inline int ip_skb_dst_mtu(struct sk_buff *skb)
222{
223 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
224
225 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
adf30907 226 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
628a5c56
JH
227}
228
861d0486 229static int ip_finish_output(struct sk_buff *skb)
1da177e4 230{
5c901daa
PM
231#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
232 /* Policy lookup after SNAT yielded a new policy */
adf30907 233 if (skb_dst(skb)->xfrm != NULL) {
48d5cad8
PM
234 IPCB(skb)->flags |= IPSKB_REROUTED;
235 return dst_output(skb);
236 }
5c901daa 237#endif
628a5c56 238 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
1bd9bef6
PM
239 return ip_fragment(skb, ip_finish_output2);
240 else
241 return ip_finish_output2(skb);
1da177e4
LT
242}
243
244int ip_mc_output(struct sk_buff *skb)
245{
246 struct sock *sk = skb->sk;
511c3f92 247 struct rtable *rt = skb_rtable(skb);
d8d1f30b 248 struct net_device *dev = rt->dst.dev;
1da177e4
LT
249
250 /*
251 * If the indicated interface is up and running, send the packet.
252 */
edf391ff 253 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
1da177e4
LT
254
255 skb->dev = dev;
256 skb->protocol = htons(ETH_P_IP);
257
258 /*
259 * Multicasts are looped back for other local users
260 */
261
262 if (rt->rt_flags&RTCF_MULTICAST) {
7ad6848c 263 if (sk_mc_loop(sk)
1da177e4
LT
264#ifdef CONFIG_IP_MROUTE
265 /* Small optimization: do not loopback not local frames,
266 which returned after forwarding; they will be dropped
267 by ip_mr_input in any case.
268 Note, that local frames are looped back to be delivered
269 to local recipients.
270
271 This check is duplicated in ip_mr_input at the moment.
272 */
9d4fb27d
JP
273 &&
274 ((rt->rt_flags & RTCF_LOCAL) ||
275 !(IPCB(skb)->flags & IPSKB_FORWARDED))
1da177e4 276#endif
9d4fb27d 277 ) {
1da177e4
LT
278 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
279 if (newskb)
9bbc768a
JE
280 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
281 newskb, NULL, newskb->dev,
1da177e4
LT
282 ip_dev_loopback_xmit);
283 }
284
285 /* Multicasts with ttl 0 must not go beyond the host */
286
eddc9ec5 287 if (ip_hdr(skb)->ttl == 0) {
1da177e4
LT
288 kfree_skb(skb);
289 return 0;
290 }
291 }
292
293 if (rt->rt_flags&RTCF_BROADCAST) {
294 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
295 if (newskb)
9bbc768a
JE
296 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
297 NULL, newskb->dev, ip_dev_loopback_xmit);
1da177e4
LT
298 }
299
9bbc768a
JE
300 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
301 skb->dev, ip_finish_output,
48d5cad8 302 !(IPCB(skb)->flags & IPSKB_REROUTED));
1da177e4
LT
303}
304
305int ip_output(struct sk_buff *skb)
306{
adf30907 307 struct net_device *dev = skb_dst(skb)->dev;
1bd9bef6 308
edf391ff 309 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
1da177e4 310
1bd9bef6
PM
311 skb->dev = dev;
312 skb->protocol = htons(ETH_P_IP);
313
9bbc768a 314 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
e905a9ed 315 ip_finish_output,
48d5cad8 316 !(IPCB(skb)->flags & IPSKB_REROUTED));
1da177e4
LT
317}
318
d9d8da80 319int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
1da177e4 320{
e89862f4 321 struct sock *sk = skb->sk;
1da177e4 322 struct inet_sock *inet = inet_sk(sk);
f6d8bd05 323 struct ip_options_rcu *inet_opt;
b57ae01a 324 struct flowi4 *fl4;
1da177e4
LT
325 struct rtable *rt;
326 struct iphdr *iph;
ab6e3feb 327 int res;
1da177e4
LT
328
329 /* Skip all of this if the packet is already routed,
330 * f.e. by something like SCTP.
331 */
ab6e3feb 332 rcu_read_lock();
f6d8bd05 333 inet_opt = rcu_dereference(inet->inet_opt);
ea4fc0d6 334 fl4 = &fl->u.ip4;
511c3f92 335 rt = skb_rtable(skb);
1da177e4
LT
336 if (rt != NULL)
337 goto packet_routed;
338
339 /* Make sure we can route this packet. */
340 rt = (struct rtable *)__sk_dst_check(sk, 0);
341 if (rt == NULL) {
3ca3c68e 342 __be32 daddr;
1da177e4
LT
343
344 /* Use correct destination address if we have options. */
c720c7e8 345 daddr = inet->inet_daddr;
f6d8bd05
ED
346 if (inet_opt && inet_opt->opt.srr)
347 daddr = inet_opt->opt.faddr;
1da177e4 348
78fbfd8a
DM
349 /* If this fails, retransmit mechanism of transport layer will
350 * keep trying until route appears or the connection times
351 * itself out.
352 */
b57ae01a 353 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
78fbfd8a
DM
354 daddr, inet->inet_saddr,
355 inet->inet_dport,
356 inet->inet_sport,
357 sk->sk_protocol,
358 RT_CONN_FLAGS(sk),
359 sk->sk_bound_dev_if);
360 if (IS_ERR(rt))
361 goto no_route;
d8d1f30b 362 sk_setup_caps(sk, &rt->dst);
1da177e4 363 }
d8d1f30b 364 skb_dst_set_noref(skb, &rt->dst);
1da177e4
LT
365
366packet_routed:
ea4fc0d6 367 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
1da177e4
LT
368 goto no_route;
369
370 /* OK, we know where to send it, allocate and build IP header. */
f6d8bd05 371 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
8856dfa3 372 skb_reset_network_header(skb);
eddc9ec5 373 iph = ip_hdr(skb);
714e85be 374 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
d8d1f30b 375 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
1da177e4
LT
376 iph->frag_off = htons(IP_DF);
377 else
378 iph->frag_off = 0;
d8d1f30b 379 iph->ttl = ip_select_ttl(inet, &rt->dst);
1da177e4 380 iph->protocol = sk->sk_protocol;
ea4fc0d6
DM
381 iph->saddr = fl4->saddr;
382 iph->daddr = fl4->daddr;
1da177e4
LT
383 /* Transport layer set skb->h.foo itself. */
384
f6d8bd05
ED
385 if (inet_opt && inet_opt->opt.optlen) {
386 iph->ihl += inet_opt->opt.optlen >> 2;
387 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
1da177e4
LT
388 }
389
d8d1f30b 390 ip_select_ident_more(iph, &rt->dst, sk,
7967168c 391 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
1da177e4 392
1da177e4 393 skb->priority = sk->sk_priority;
4a19ec58 394 skb->mark = sk->sk_mark;
1da177e4 395
ab6e3feb
ED
396 res = ip_local_out(skb);
397 rcu_read_unlock();
398 return res;
1da177e4
LT
399
400no_route:
ab6e3feb 401 rcu_read_unlock();
5e38e270 402 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4
LT
403 kfree_skb(skb);
404 return -EHOSTUNREACH;
405}
4bc2f18b 406EXPORT_SYMBOL(ip_queue_xmit);
1da177e4
LT
407
408
409static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
410{
411 to->pkt_type = from->pkt_type;
412 to->priority = from->priority;
413 to->protocol = from->protocol;
adf30907 414 skb_dst_drop(to);
fe76cda3 415 skb_dst_copy(to, from);
1da177e4 416 to->dev = from->dev;
82e91ffe 417 to->mark = from->mark;
1da177e4
LT
418
419 /* Copy the flags to each fragment. */
420 IPCB(to)->flags = IPCB(from)->flags;
421
422#ifdef CONFIG_NET_SCHED
423 to->tc_index = from->tc_index;
424#endif
e7ac05f3 425 nf_copy(to, from);
ba9dda3a
JK
426#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
427 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
428 to->nf_trace = from->nf_trace;
429#endif
c98d80ed
JA
430#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
431 to->ipvs_property = from->ipvs_property;
1da177e4 432#endif
984bc16c 433 skb_copy_secmark(to, from);
1da177e4
LT
434}
435
436/*
437 * This IP datagram is too large to be sent in one piece. Break it up into
438 * smaller pieces (each of size equal to IP header plus
439 * a block of the data of the original IP data part) that will yet fit in a
440 * single device frame, and queue such a frame for sending.
441 */
442
d9319100 443int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1da177e4
LT
444{
445 struct iphdr *iph;
1da177e4
LT
446 int ptr;
447 struct net_device *dev;
448 struct sk_buff *skb2;
c893b806 449 unsigned int mtu, hlen, left, len, ll_rs;
1da177e4 450 int offset;
76ab608d 451 __be16 not_last_frag;
511c3f92 452 struct rtable *rt = skb_rtable(skb);
1da177e4
LT
453 int err = 0;
454
d8d1f30b 455 dev = rt->dst.dev;
1da177e4
LT
456
457 /*
458 * Point into the IP datagram header.
459 */
460
eddc9ec5 461 iph = ip_hdr(skb);
1da177e4
LT
462
463 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
5e38e270 464 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1da177e4 465 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
628a5c56 466 htonl(ip_skb_dst_mtu(skb)));
1da177e4
LT
467 kfree_skb(skb);
468 return -EMSGSIZE;
469 }
470
471 /*
472 * Setup starting values.
473 */
474
475 hlen = iph->ihl * 4;
d8d1f30b 476 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
6c79bf0f
BDS
477#ifdef CONFIG_BRIDGE_NETFILTER
478 if (skb->nf_bridge)
479 mtu -= nf_bridge_mtu_reduction(skb);
480#endif
89cee8b1 481 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
1da177e4
LT
482
483 /* When frag_list is given, use it. First, check its validity:
484 * some transformers could create wrong frag_list or break existing
485 * one, it is not prohibited. In this case fall back to copying.
486 *
487 * LATER: this step can be merged to real generation of fragments,
488 * we can switch to copy when see the first bad fragment.
489 */
21dc3301 490 if (skb_has_frag_list(skb)) {
3d13008e 491 struct sk_buff *frag, *frag2;
1da177e4
LT
492 int first_len = skb_pagelen(skb);
493
494 if (first_len - hlen > mtu ||
495 ((first_len - hlen) & 7) ||
56f8a75c 496 ip_is_fragment(iph) ||
1da177e4
LT
497 skb_cloned(skb))
498 goto slow_path;
499
d7fcf1a5 500 skb_walk_frags(skb, frag) {
1da177e4
LT
501 /* Correct geometry. */
502 if (frag->len > mtu ||
503 ((frag->len & 7) && frag->next) ||
504 skb_headroom(frag) < hlen)
3d13008e 505 goto slow_path_clean;
1da177e4
LT
506
507 /* Partially cloned skb? */
508 if (skb_shared(frag))
3d13008e 509 goto slow_path_clean;
2fdba6b0
HX
510
511 BUG_ON(frag->sk);
512 if (skb->sk) {
2fdba6b0
HX
513 frag->sk = skb->sk;
514 frag->destructor = sock_wfree;
2fdba6b0 515 }
3d13008e 516 skb->truesize -= frag->truesize;
1da177e4
LT
517 }
518
519 /* Everything is OK. Generate! */
520
521 err = 0;
522 offset = 0;
523 frag = skb_shinfo(skb)->frag_list;
d7fcf1a5 524 skb_frag_list_init(skb);
1da177e4
LT
525 skb->data_len = first_len - skb_headlen(skb);
526 skb->len = first_len;
527 iph->tot_len = htons(first_len);
528 iph->frag_off = htons(IP_MF);
529 ip_send_check(iph);
530
531 for (;;) {
532 /* Prepare header of the next frame,
533 * before previous one went down. */
534 if (frag) {
535 frag->ip_summed = CHECKSUM_NONE;
badff6d0 536 skb_reset_transport_header(frag);
e2d1bca7
ACM
537 __skb_push(frag, hlen);
538 skb_reset_network_header(frag);
d56f90a7 539 memcpy(skb_network_header(frag), iph, hlen);
eddc9ec5 540 iph = ip_hdr(frag);
1da177e4
LT
541 iph->tot_len = htons(frag->len);
542 ip_copy_metadata(frag, skb);
543 if (offset == 0)
544 ip_options_fragment(frag);
545 offset += skb->len - hlen;
546 iph->frag_off = htons(offset>>3);
547 if (frag->next != NULL)
548 iph->frag_off |= htons(IP_MF);
549 /* Ready, complete checksum */
550 ip_send_check(iph);
551 }
552
553 err = output(skb);
554
dafee490 555 if (!err)
5e38e270 556 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
1da177e4
LT
557 if (err || !frag)
558 break;
559
560 skb = frag;
561 frag = skb->next;
562 skb->next = NULL;
563 }
564
565 if (err == 0) {
5e38e270 566 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
1da177e4
LT
567 return 0;
568 }
569
570 while (frag) {
571 skb = frag->next;
572 kfree_skb(frag);
573 frag = skb;
574 }
5e38e270 575 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1da177e4 576 return err;
3d13008e
ED
577
578slow_path_clean:
579 skb_walk_frags(skb, frag2) {
580 if (frag2 == frag)
581 break;
582 frag2->sk = NULL;
583 frag2->destructor = NULL;
584 skb->truesize += frag2->truesize;
585 }
1da177e4
LT
586 }
587
588slow_path:
589 left = skb->len - hlen; /* Space per frame */
49085bd7 590 ptr = hlen; /* Where to start from */
1da177e4 591
1da177e4 592 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
9bcfcaf5
SH
593 * we need to make room for the encapsulating header
594 */
c893b806 595 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
9bcfcaf5 596
1da177e4
LT
597 /*
598 * Fragment the datagram.
599 */
600
601 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
602 not_last_frag = iph->frag_off & htons(IP_MF);
603
604 /*
605 * Keep copying data until we run out.
606 */
607
132adf54 608 while (left > 0) {
1da177e4
LT
609 len = left;
610 /* IF: it doesn't fit, use 'mtu' - the data space left */
611 if (len > mtu)
612 len = mtu;
25985edc 613 /* IF: we are not sending up to and including the packet end
1da177e4
LT
614 then align the next start on an eight byte boundary */
615 if (len < left) {
616 len &= ~7;
617 }
618 /*
619 * Allocate buffer.
620 */
621
622 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
64ce2073 623 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
1da177e4
LT
624 err = -ENOMEM;
625 goto fail;
626 }
627
628 /*
629 * Set up data on packet
630 */
631
632 ip_copy_metadata(skb2, skb);
633 skb_reserve(skb2, ll_rs);
634 skb_put(skb2, len + hlen);
c1d2bbe1 635 skb_reset_network_header(skb2);
b0e380b1 636 skb2->transport_header = skb2->network_header + hlen;
1da177e4
LT
637
638 /*
639 * Charge the memory for the fragment to any owner
640 * it might possess
641 */
642
643 if (skb->sk)
644 skb_set_owner_w(skb2, skb->sk);
645
646 /*
647 * Copy the packet header into the new buffer.
648 */
649
d626f62b 650 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
1da177e4
LT
651
652 /*
653 * Copy a block of the IP datagram.
654 */
bff9b61c 655 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
1da177e4
LT
656 BUG();
657 left -= len;
658
659 /*
660 * Fill in the new header fields.
661 */
eddc9ec5 662 iph = ip_hdr(skb2);
1da177e4
LT
663 iph->frag_off = htons((offset >> 3));
664
665 /* ANK: dirty, but effective trick. Upgrade options only if
666 * the segment to be fragmented was THE FIRST (otherwise,
667 * options are already fixed) and make it ONCE
668 * on the initial skb, so that all the following fragments
669 * will inherit fixed options.
670 */
671 if (offset == 0)
672 ip_options_fragment(skb);
673
674 /*
675 * Added AC : If we are fragmenting a fragment that's not the
676 * last fragment then keep MF on each bit
677 */
678 if (left > 0 || not_last_frag)
679 iph->frag_off |= htons(IP_MF);
680 ptr += len;
681 offset += len;
682
683 /*
684 * Put this fragment into the sending queue.
685 */
1da177e4
LT
686 iph->tot_len = htons(len + hlen);
687
688 ip_send_check(iph);
689
690 err = output(skb2);
691 if (err)
692 goto fail;
dafee490 693
5e38e270 694 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
1da177e4
LT
695 }
696 kfree_skb(skb);
5e38e270 697 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
1da177e4
LT
698 return err;
699
700fail:
e905a9ed 701 kfree_skb(skb);
5e38e270 702 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
703 return err;
704}
2e2f7aef
PM
705EXPORT_SYMBOL(ip_fragment);
706
1da177e4
LT
707int
708ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
709{
710 struct iovec *iov = from;
711
84fa7933 712 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
713 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
714 return -EFAULT;
715 } else {
44bb9363 716 __wsum csum = 0;
1da177e4
LT
717 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
718 return -EFAULT;
719 skb->csum = csum_block_add(skb->csum, csum, odd);
720 }
721 return 0;
722}
4bc2f18b 723EXPORT_SYMBOL(ip_generic_getfrag);
1da177e4 724
44bb9363 725static inline __wsum
1da177e4
LT
726csum_page(struct page *page, int offset, int copy)
727{
728 char *kaddr;
44bb9363 729 __wsum csum;
1da177e4
LT
730 kaddr = kmap(page);
731 csum = csum_partial(kaddr + offset, copy, 0);
732 kunmap(page);
733 return csum;
734}
735
4b30b1c6 736static inline int ip_ufo_append_data(struct sock *sk,
1470ddf7 737 struct sk_buff_head *queue,
e89e9cf5
AR
738 int getfrag(void *from, char *to, int offset, int len,
739 int odd, struct sk_buff *skb),
740 void *from, int length, int hh_len, int fragheaderlen,
d9319100 741 int transhdrlen, int mtu, unsigned int flags)
e89e9cf5
AR
742{
743 struct sk_buff *skb;
744 int err;
745
746 /* There is support for UDP fragmentation offload by network
747 * device, so create one single skb packet containing complete
748 * udp datagram
749 */
1470ddf7 750 if ((skb = skb_peek_tail(queue)) == NULL) {
e89e9cf5
AR
751 skb = sock_alloc_send_skb(sk,
752 hh_len + fragheaderlen + transhdrlen + 20,
753 (flags & MSG_DONTWAIT), &err);
754
755 if (skb == NULL)
756 return err;
757
758 /* reserve space for Hardware header */
759 skb_reserve(skb, hh_len);
760
761 /* create space for UDP/IP header */
d9319100 762 skb_put(skb, fragheaderlen + transhdrlen);
e89e9cf5
AR
763
764 /* initialize network header pointer */
c1d2bbe1 765 skb_reset_network_header(skb);
e89e9cf5
AR
766
767 /* initialize protocol header pointer */
b0e380b1 768 skb->transport_header = skb->network_header + fragheaderlen;
e89e9cf5 769
84fa7933 770 skb->ip_summed = CHECKSUM_PARTIAL;
e89e9cf5 771 skb->csum = 0;
e89e9cf5 772
be9164e7 773 /* specify the length of each IP datagram fragment */
7967168c 774 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
f83ef8c0 775 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1470ddf7 776 __skb_queue_tail(queue, skb);
e89e9cf5 777 }
be9164e7
K
778
779 return skb_append_datato_frags(sk, skb, getfrag, from,
780 (length - transhdrlen));
e89e9cf5
AR
781}
782
f5fca608
DM
783static int __ip_append_data(struct sock *sk,
784 struct flowi4 *fl4,
785 struct sk_buff_head *queue,
1470ddf7
HX
786 struct inet_cork *cork,
787 int getfrag(void *from, char *to, int offset,
788 int len, int odd, struct sk_buff *skb),
789 void *from, int length, int transhdrlen,
790 unsigned int flags)
1da177e4
LT
791{
792 struct inet_sock *inet = inet_sk(sk);
793 struct sk_buff *skb;
794
07df5294 795 struct ip_options *opt = cork->opt;
1da177e4
LT
796 int hh_len;
797 int exthdrlen;
798 int mtu;
799 int copy;
800 int err;
801 int offset = 0;
802 unsigned int maxfraglen, fragheaderlen;
803 int csummode = CHECKSUM_NONE;
1470ddf7 804 struct rtable *rt = (struct rtable *)cork->dst;
1da177e4 805
96d7303e
SK
806 skb = skb_peek_tail(queue);
807
808 exthdrlen = !skb ? rt->dst.header_len : 0;
07df5294 809 mtu = cork->fragsize;
1da177e4 810
d8d1f30b 811 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1da177e4
LT
812
813 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
814 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
815
1470ddf7 816 if (cork->length + length > 0xFFFF - fragheaderlen) {
f5fca608 817 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
c720c7e8 818 mtu-exthdrlen);
1da177e4
LT
819 return -EMSGSIZE;
820 }
821
822 /*
823 * transhdrlen > 0 means that this is the first fragment and we wish
824 * it won't be fragmented in the future.
825 */
826 if (transhdrlen &&
827 length + fragheaderlen <= mtu &&
d8d1f30b 828 rt->dst.dev->features & NETIF_F_V4_CSUM &&
1da177e4 829 !exthdrlen)
84fa7933 830 csummode = CHECKSUM_PARTIAL;
1da177e4 831
1470ddf7 832 cork->length += length;
26cde9f7 833 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
be9164e7 834 (sk->sk_protocol == IPPROTO_UDP) &&
c146066a 835 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
1470ddf7
HX
836 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
837 hh_len, fragheaderlen, transhdrlen,
838 mtu, flags);
baa829d8 839 if (err)
e89e9cf5 840 goto error;
e89e9cf5
AR
841 return 0;
842 }
1da177e4
LT
843
844 /* So, what's going on in the loop below?
845 *
846 * We use calculated fragment length to generate chained skb,
847 * each of segments is IP fragment ready for sending to network after
848 * adding appropriate IP header.
849 */
850
26cde9f7 851 if (!skb)
1da177e4
LT
852 goto alloc_new_skb;
853
854 while (length > 0) {
855 /* Check if the remaining data fits into current packet. */
856 copy = mtu - skb->len;
857 if (copy < length)
858 copy = maxfraglen - skb->len;
859 if (copy <= 0) {
860 char *data;
861 unsigned int datalen;
862 unsigned int fraglen;
863 unsigned int fraggap;
864 unsigned int alloclen;
865 struct sk_buff *skb_prev;
866alloc_new_skb:
867 skb_prev = skb;
868 if (skb_prev)
869 fraggap = skb_prev->len - maxfraglen;
870 else
871 fraggap = 0;
872
873 /*
874 * If remaining data exceeds the mtu,
875 * we know we need more fragment(s).
876 */
877 datalen = length + fraggap;
878 if (datalen > mtu - fragheaderlen)
879 datalen = maxfraglen - fragheaderlen;
880 fraglen = datalen + fragheaderlen;
881
e905a9ed 882 if ((flags & MSG_MORE) &&
d8d1f30b 883 !(rt->dst.dev->features&NETIF_F_SG))
1da177e4
LT
884 alloclen = mtu;
885 else
59104f06 886 alloclen = fraglen;
1da177e4 887
353e5c9a
SK
888 alloclen += exthdrlen;
889
1da177e4
LT
890 /* The last fragment gets additional space at tail.
891 * Note, with MSG_MORE we overallocate on fragments,
892 * because we have no idea what fragment will be
893 * the last.
894 */
33f99dc7 895 if (datalen == length + fraggap)
d8d1f30b 896 alloclen += rt->dst.trailer_len;
33f99dc7 897
1da177e4 898 if (transhdrlen) {
e905a9ed 899 skb = sock_alloc_send_skb(sk,
1da177e4
LT
900 alloclen + hh_len + 15,
901 (flags & MSG_DONTWAIT), &err);
902 } else {
903 skb = NULL;
904 if (atomic_read(&sk->sk_wmem_alloc) <=
905 2 * sk->sk_sndbuf)
e905a9ed 906 skb = sock_wmalloc(sk,
1da177e4
LT
907 alloclen + hh_len + 15, 1,
908 sk->sk_allocation);
909 if (unlikely(skb == NULL))
910 err = -ENOBUFS;
51f31cab
PO
911 else
912 /* only the initial fragment is
913 time stamped */
1470ddf7 914 cork->tx_flags = 0;
1da177e4
LT
915 }
916 if (skb == NULL)
917 goto error;
918
919 /*
920 * Fill in the control structures
921 */
922 skb->ip_summed = csummode;
923 skb->csum = 0;
924 skb_reserve(skb, hh_len);
1470ddf7 925 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1da177e4
LT
926
927 /*
928 * Find where to start putting bytes.
929 */
353e5c9a 930 data = skb_put(skb, fraglen + exthdrlen);
c14d2450 931 skb_set_network_header(skb, exthdrlen);
b0e380b1
ACM
932 skb->transport_header = (skb->network_header +
933 fragheaderlen);
353e5c9a 934 data += fragheaderlen + exthdrlen;
1da177e4
LT
935
936 if (fraggap) {
937 skb->csum = skb_copy_and_csum_bits(
938 skb_prev, maxfraglen,
939 data + transhdrlen, fraggap, 0);
940 skb_prev->csum = csum_sub(skb_prev->csum,
941 skb->csum);
942 data += fraggap;
e9fa4f7b 943 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
944 }
945
946 copy = datalen - transhdrlen - fraggap;
947 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
948 err = -EFAULT;
949 kfree_skb(skb);
950 goto error;
951 }
952
953 offset += copy;
954 length -= datalen - fraggap;
955 transhdrlen = 0;
956 exthdrlen = 0;
957 csummode = CHECKSUM_NONE;
958
959 /*
960 * Put the packet on the pending queue.
961 */
1470ddf7 962 __skb_queue_tail(queue, skb);
1da177e4
LT
963 continue;
964 }
965
966 if (copy > length)
967 copy = length;
968
d8d1f30b 969 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1da177e4
LT
970 unsigned int off;
971
972 off = skb->len;
e905a9ed 973 if (getfrag(from, skb_put(skb, copy),
1da177e4
LT
974 offset, copy, off, skb) < 0) {
975 __skb_trim(skb, off);
976 err = -EFAULT;
977 goto error;
978 }
979 } else {
980 int i = skb_shinfo(skb)->nr_frags;
981 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1470ddf7
HX
982 struct page *page = cork->page;
983 int off = cork->off;
1da177e4
LT
984 unsigned int left;
985
986 if (page && (left = PAGE_SIZE - off) > 0) {
987 if (copy >= left)
988 copy = left;
989 if (page != frag->page) {
990 if (i == MAX_SKB_FRAGS) {
991 err = -EMSGSIZE;
992 goto error;
993 }
994 get_page(page);
1470ddf7 995 skb_fill_page_desc(skb, i, page, off, 0);
1da177e4
LT
996 frag = &skb_shinfo(skb)->frags[i];
997 }
998 } else if (i < MAX_SKB_FRAGS) {
999 if (copy > PAGE_SIZE)
1000 copy = PAGE_SIZE;
1001 page = alloc_pages(sk->sk_allocation, 0);
1002 if (page == NULL) {
1003 err = -ENOMEM;
1004 goto error;
1005 }
1470ddf7
HX
1006 cork->page = page;
1007 cork->off = 0;
1da177e4
LT
1008
1009 skb_fill_page_desc(skb, i, page, 0, 0);
1010 frag = &skb_shinfo(skb)->frags[i];
1da177e4
LT
1011 } else {
1012 err = -EMSGSIZE;
1013 goto error;
1014 }
1015 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1016 err = -EFAULT;
1017 goto error;
1018 }
1470ddf7 1019 cork->off += copy;
1da177e4
LT
1020 frag->size += copy;
1021 skb->len += copy;
1022 skb->data_len += copy;
f945fa7a
HX
1023 skb->truesize += copy;
1024 atomic_add(copy, &sk->sk_wmem_alloc);
1da177e4
LT
1025 }
1026 offset += copy;
1027 length -= copy;
1028 }
1029
1030 return 0;
1031
1032error:
1470ddf7 1033 cork->length -= length;
5e38e270 1034 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
e905a9ed 1035 return err;
1da177e4
LT
1036}
1037
1470ddf7
HX
1038static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1039 struct ipcm_cookie *ipc, struct rtable **rtp)
1040{
1041 struct inet_sock *inet = inet_sk(sk);
f6d8bd05 1042 struct ip_options_rcu *opt;
1470ddf7
HX
1043 struct rtable *rt;
1044
1045 /*
1046 * setup for corking.
1047 */
1048 opt = ipc->opt;
1049 if (opt) {
1050 if (cork->opt == NULL) {
1051 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1052 sk->sk_allocation);
1053 if (unlikely(cork->opt == NULL))
1054 return -ENOBUFS;
1055 }
f6d8bd05 1056 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1470ddf7
HX
1057 cork->flags |= IPCORK_OPT;
1058 cork->addr = ipc->addr;
1059 }
1060 rt = *rtp;
1061 if (unlikely(!rt))
1062 return -EFAULT;
1063 /*
1064 * We steal reference to this route, caller should not release it
1065 */
1066 *rtp = NULL;
1067 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
353e5c9a 1068 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1470ddf7
HX
1069 cork->dst = &rt->dst;
1070 cork->length = 0;
1071 cork->tx_flags = ipc->tx_flags;
1072 cork->page = NULL;
1073 cork->off = 0;
1074
1075 return 0;
1076}
1077
1078/*
1079 * ip_append_data() and ip_append_page() can make one large IP datagram
1080 * from many pieces of data. Each pieces will be holded on the socket
1081 * until ip_push_pending_frames() is called. Each piece can be a page
1082 * or non-page data.
1083 *
1084 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1085 * this interface potentially.
1086 *
1087 * LATER: length must be adjusted by pad at tail, when it is required.
1088 */
f5fca608 1089int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1470ddf7
HX
1090 int getfrag(void *from, char *to, int offset, int len,
1091 int odd, struct sk_buff *skb),
1092 void *from, int length, int transhdrlen,
1093 struct ipcm_cookie *ipc, struct rtable **rtp,
1094 unsigned int flags)
1095{
1096 struct inet_sock *inet = inet_sk(sk);
1097 int err;
1098
1099 if (flags&MSG_PROBE)
1100 return 0;
1101
1102 if (skb_queue_empty(&sk->sk_write_queue)) {
bdc712b4 1103 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1470ddf7
HX
1104 if (err)
1105 return err;
1106 } else {
1107 transhdrlen = 0;
1108 }
1109
f5fca608 1110 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
1470ddf7
HX
1111 from, length, transhdrlen, flags);
1112}
1113
f5fca608 1114ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1da177e4
LT
1115 int offset, size_t size, int flags)
1116{
1117 struct inet_sock *inet = inet_sk(sk);
1118 struct sk_buff *skb;
1119 struct rtable *rt;
1120 struct ip_options *opt = NULL;
bdc712b4 1121 struct inet_cork *cork;
1da177e4
LT
1122 int hh_len;
1123 int mtu;
1124 int len;
1125 int err;
1126 unsigned int maxfraglen, fragheaderlen, fraggap;
1127
1128 if (inet->hdrincl)
1129 return -EPERM;
1130
1131 if (flags&MSG_PROBE)
1132 return 0;
1133
1134 if (skb_queue_empty(&sk->sk_write_queue))
1135 return -EINVAL;
1136
bdc712b4
DM
1137 cork = &inet->cork.base;
1138 rt = (struct rtable *)cork->dst;
1139 if (cork->flags & IPCORK_OPT)
1140 opt = cork->opt;
1da177e4 1141
d8d1f30b 1142 if (!(rt->dst.dev->features&NETIF_F_SG))
1da177e4
LT
1143 return -EOPNOTSUPP;
1144
d8d1f30b 1145 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
bdc712b4 1146 mtu = cork->fragsize;
1da177e4
LT
1147
1148 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1149 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1150
bdc712b4 1151 if (cork->length + size > 0xFFFF - fragheaderlen) {
f5fca608 1152 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1da177e4
LT
1153 return -EMSGSIZE;
1154 }
1155
1156 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1157 return -EINVAL;
1158
bdc712b4 1159 cork->length += size;
26cde9f7
HX
1160 if ((size + skb->len > mtu) &&
1161 (sk->sk_protocol == IPPROTO_UDP) &&
d8d1f30b 1162 (rt->dst.dev->features & NETIF_F_UFO)) {
7967168c 1163 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
f83ef8c0 1164 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
7967168c 1165 }
e89e9cf5 1166
1da177e4
LT
1167
1168 while (size > 0) {
1169 int i;
1170
89114afd 1171 if (skb_is_gso(skb))
e89e9cf5
AR
1172 len = size;
1173 else {
1174
1175 /* Check if the remaining data fits into current packet. */
1176 len = mtu - skb->len;
1177 if (len < size)
1178 len = maxfraglen - skb->len;
1179 }
1da177e4
LT
1180 if (len <= 0) {
1181 struct sk_buff *skb_prev;
1da177e4
LT
1182 int alloclen;
1183
1184 skb_prev = skb;
0d0d2bba 1185 fraggap = skb_prev->len - maxfraglen;
1da177e4
LT
1186
1187 alloclen = fragheaderlen + hh_len + fraggap + 15;
1188 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1189 if (unlikely(!skb)) {
1190 err = -ENOBUFS;
1191 goto error;
1192 }
1193
1194 /*
1195 * Fill in the control structures
1196 */
1197 skb->ip_summed = CHECKSUM_NONE;
1198 skb->csum = 0;
1199 skb_reserve(skb, hh_len);
1200
1201 /*
1202 * Find where to start putting bytes.
1203 */
967b05f6 1204 skb_put(skb, fragheaderlen + fraggap);
2ca9e6f2 1205 skb_reset_network_header(skb);
b0e380b1
ACM
1206 skb->transport_header = (skb->network_header +
1207 fragheaderlen);
1da177e4 1208 if (fraggap) {
967b05f6
ACM
1209 skb->csum = skb_copy_and_csum_bits(skb_prev,
1210 maxfraglen,
9c70220b 1211 skb_transport_header(skb),
967b05f6 1212 fraggap, 0);
1da177e4
LT
1213 skb_prev->csum = csum_sub(skb_prev->csum,
1214 skb->csum);
e9fa4f7b 1215 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
1216 }
1217
1218 /*
1219 * Put the packet on the pending queue.
1220 */
1221 __skb_queue_tail(&sk->sk_write_queue, skb);
1222 continue;
1223 }
1224
1225 i = skb_shinfo(skb)->nr_frags;
1226 if (len > size)
1227 len = size;
1228 if (skb_can_coalesce(skb, i, page, offset)) {
1229 skb_shinfo(skb)->frags[i-1].size += len;
1230 } else if (i < MAX_SKB_FRAGS) {
1231 get_page(page);
1232 skb_fill_page_desc(skb, i, page, offset, len);
1233 } else {
1234 err = -EMSGSIZE;
1235 goto error;
1236 }
1237
1238 if (skb->ip_summed == CHECKSUM_NONE) {
44bb9363 1239 __wsum csum;
1da177e4
LT
1240 csum = csum_page(page, offset, len);
1241 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1242 }
1243
1244 skb->len += len;
1245 skb->data_len += len;
1e34a11d
DM
1246 skb->truesize += len;
1247 atomic_add(len, &sk->sk_wmem_alloc);
1da177e4
LT
1248 offset += len;
1249 size -= len;
1250 }
1251 return 0;
1252
1253error:
bdc712b4 1254 cork->length -= size;
5e38e270 1255 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1256 return err;
1257}
1258
1470ddf7 1259static void ip_cork_release(struct inet_cork *cork)
429f08e9 1260{
1470ddf7
HX
1261 cork->flags &= ~IPCORK_OPT;
1262 kfree(cork->opt);
1263 cork->opt = NULL;
1264 dst_release(cork->dst);
1265 cork->dst = NULL;
429f08e9
PE
1266}
1267
1da177e4
LT
1268/*
1269 * Combined all pending IP fragments on the socket as one IP datagram
1270 * and push them out.
1271 */
1c32c5ad 1272struct sk_buff *__ip_make_skb(struct sock *sk,
77968b78 1273 struct flowi4 *fl4,
1c32c5ad
HX
1274 struct sk_buff_head *queue,
1275 struct inet_cork *cork)
1da177e4
LT
1276{
1277 struct sk_buff *skb, *tmp_skb;
1278 struct sk_buff **tail_skb;
1279 struct inet_sock *inet = inet_sk(sk);
0388b004 1280 struct net *net = sock_net(sk);
1da177e4 1281 struct ip_options *opt = NULL;
1470ddf7 1282 struct rtable *rt = (struct rtable *)cork->dst;
1da177e4 1283 struct iphdr *iph;
76ab608d 1284 __be16 df = 0;
1da177e4 1285 __u8 ttl;
1da177e4 1286
1470ddf7 1287 if ((skb = __skb_dequeue(queue)) == NULL)
1da177e4
LT
1288 goto out;
1289 tail_skb = &(skb_shinfo(skb)->frag_list);
1290
1291 /* move skb->data to ip header from ext header */
d56f90a7 1292 if (skb->data < skb_network_header(skb))
bbe735e4 1293 __skb_pull(skb, skb_network_offset(skb));
1470ddf7 1294 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
cfe1fc77 1295 __skb_pull(tmp_skb, skb_network_header_len(skb));
1da177e4
LT
1296 *tail_skb = tmp_skb;
1297 tail_skb = &(tmp_skb->next);
1298 skb->len += tmp_skb->len;
1299 skb->data_len += tmp_skb->len;
1300 skb->truesize += tmp_skb->truesize;
1da177e4
LT
1301 tmp_skb->destructor = NULL;
1302 tmp_skb->sk = NULL;
1303 }
1304
1305 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1306 * to fragment the frame generated here. No matter, what transforms
1307 * how transforms change size of the packet, it will come out.
1308 */
628a5c56 1309 if (inet->pmtudisc < IP_PMTUDISC_DO)
1da177e4
LT
1310 skb->local_df = 1;
1311
1312 /* DF bit is set when we want to see DF on outgoing frames.
1313 * If local_df is set too, we still allow to fragment this frame
1314 * locally. */
628a5c56 1315 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
d8d1f30b
CG
1316 (skb->len <= dst_mtu(&rt->dst) &&
1317 ip_dont_fragment(sk, &rt->dst)))
1da177e4
LT
1318 df = htons(IP_DF);
1319
1470ddf7
HX
1320 if (cork->flags & IPCORK_OPT)
1321 opt = cork->opt;
1da177e4
LT
1322
1323 if (rt->rt_type == RTN_MULTICAST)
1324 ttl = inet->mc_ttl;
1325 else
d8d1f30b 1326 ttl = ip_select_ttl(inet, &rt->dst);
1da177e4
LT
1327
1328 iph = (struct iphdr *)skb->data;
1329 iph->version = 4;
1330 iph->ihl = 5;
1da177e4 1331 iph->tos = inet->tos;
1da177e4 1332 iph->frag_off = df;
d8d1f30b 1333 ip_select_ident(iph, &rt->dst, sk);
1da177e4
LT
1334 iph->ttl = ttl;
1335 iph->protocol = sk->sk_protocol;
77968b78
DM
1336 iph->saddr = fl4->saddr;
1337 iph->daddr = fl4->daddr;
1da177e4 1338
22f728f8
DM
1339 if (opt) {
1340 iph->ihl += opt->optlen>>2;
1341 ip_options_build(skb, opt, cork->addr, rt, 0);
1342 }
1343
1da177e4 1344 skb->priority = sk->sk_priority;
4a19ec58 1345 skb->mark = sk->sk_mark;
a21bba94
ED
1346 /*
1347 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1348 * on dst refcount
1349 */
1470ddf7 1350 cork->dst = NULL;
d8d1f30b 1351 skb_dst_set(skb, &rt->dst);
1da177e4 1352
96793b48 1353 if (iph->protocol == IPPROTO_ICMP)
0388b004 1354 icmp_out_count(net, ((struct icmphdr *)
96793b48
DS
1355 skb_transport_header(skb))->type);
1356
1c32c5ad
HX
1357 ip_cork_release(cork);
1358out:
1359 return skb;
1360}
1361
1362int ip_send_skb(struct sk_buff *skb)
1363{
1364 struct net *net = sock_net(skb->sk);
1365 int err;
1366
c439cb2e 1367 err = ip_local_out(skb);
1da177e4
LT
1368 if (err) {
1369 if (err > 0)
6ce9e7b5 1370 err = net_xmit_errno(err);
1da177e4 1371 if (err)
1c32c5ad 1372 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1373 }
1374
1da177e4 1375 return err;
1da177e4
LT
1376}
1377
77968b78 1378int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1470ddf7 1379{
1c32c5ad
HX
1380 struct sk_buff *skb;
1381
77968b78 1382 skb = ip_finish_skb(sk, fl4);
1c32c5ad
HX
1383 if (!skb)
1384 return 0;
1385
1386 /* Netfilter gets whole the not fragmented skb. */
1387 return ip_send_skb(skb);
1470ddf7
HX
1388}
1389
1da177e4
LT
1390/*
1391 * Throw away all pending data on the socket.
1392 */
1470ddf7
HX
1393static void __ip_flush_pending_frames(struct sock *sk,
1394 struct sk_buff_head *queue,
1395 struct inet_cork *cork)
1da177e4 1396{
1da177e4
LT
1397 struct sk_buff *skb;
1398
1470ddf7 1399 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1da177e4
LT
1400 kfree_skb(skb);
1401
1470ddf7
HX
1402 ip_cork_release(cork);
1403}
1404
1405void ip_flush_pending_frames(struct sock *sk)
1406{
bdc712b4 1407 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1da177e4
LT
1408}
1409
1c32c5ad 1410struct sk_buff *ip_make_skb(struct sock *sk,
77968b78 1411 struct flowi4 *fl4,
1c32c5ad
HX
1412 int getfrag(void *from, char *to, int offset,
1413 int len, int odd, struct sk_buff *skb),
1414 void *from, int length, int transhdrlen,
1415 struct ipcm_cookie *ipc, struct rtable **rtp,
1416 unsigned int flags)
1417{
b80d7226 1418 struct inet_cork cork;
1c32c5ad
HX
1419 struct sk_buff_head queue;
1420 int err;
1421
1422 if (flags & MSG_PROBE)
1423 return NULL;
1424
1425 __skb_queue_head_init(&queue);
1426
b80d7226
DM
1427 cork.flags = 0;
1428 cork.addr = 0;
70652728 1429 cork.opt = NULL;
1c32c5ad
HX
1430 err = ip_setup_cork(sk, &cork, ipc, rtp);
1431 if (err)
1432 return ERR_PTR(err);
1433
f5fca608 1434 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
1c32c5ad
HX
1435 from, length, transhdrlen, flags);
1436 if (err) {
1437 __ip_flush_pending_frames(sk, &queue, &cork);
1438 return ERR_PTR(err);
1439 }
1440
77968b78 1441 return __ip_make_skb(sk, fl4, &queue, &cork);
1c32c5ad 1442}
1da177e4
LT
1443
1444/*
1445 * Fetch data from kernel space and fill in checksum if needed.
1446 */
e905a9ed 1447static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1da177e4
LT
1448 int len, int odd, struct sk_buff *skb)
1449{
5084205f 1450 __wsum csum;
1da177e4
LT
1451
1452 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1453 skb->csum = csum_block_add(skb->csum, csum, odd);
e905a9ed 1454 return 0;
1da177e4
LT
1455}
1456
e905a9ed 1457/*
1da177e4
LT
1458 * Generic function to send a packet as reply to another packet.
1459 * Used to send TCP resets so far. ICMP should use this function too.
1460 *
e905a9ed 1461 * Should run single threaded per socket because it uses the sock
1da177e4 1462 * structure to pass arguments.
1da177e4 1463 */
0a5ebb80
DM
1464void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1465 struct ip_reply_arg *arg, unsigned int len)
1da177e4
LT
1466{
1467 struct inet_sock *inet = inet_sk(sk);
f6d8bd05 1468 struct ip_options_data replyopts;
1da177e4 1469 struct ipcm_cookie ipc;
77968b78 1470 struct flowi4 fl4;
511c3f92 1471 struct rtable *rt = skb_rtable(skb);
1da177e4 1472
f6d8bd05 1473 if (ip_options_echo(&replyopts.opt.opt, skb))
1da177e4
LT
1474 return;
1475
0a5ebb80 1476 ipc.addr = daddr;
1da177e4 1477 ipc.opt = NULL;
2244d07b 1478 ipc.tx_flags = 0;
1da177e4 1479
f6d8bd05 1480 if (replyopts.opt.opt.optlen) {
1da177e4
LT
1481 ipc.opt = &replyopts.opt;
1482
f6d8bd05
ED
1483 if (replyopts.opt.opt.srr)
1484 daddr = replyopts.opt.opt.faddr;
1da177e4
LT
1485 }
1486
77968b78
DM
1487 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1488 RT_TOS(ip_hdr(skb)->tos),
1489 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1490 ip_reply_arg_flowi_flags(arg),
1491 daddr, rt->rt_spec_dst,
1492 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1493 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1494 rt = ip_route_output_key(sock_net(sk), &fl4);
1495 if (IS_ERR(rt))
1496 return;
1da177e4
LT
1497
1498 /* And let IP do all the hard work.
1499
1500 This chunk is not reenterable, hence spinlock.
1501 Note that it uses the fact, that this function is called
1502 with locally disabled BH and that sk cannot be already spinlocked.
1503 */
1504 bh_lock_sock(sk);
eddc9ec5 1505 inet->tos = ip_hdr(skb)->tos;
1da177e4 1506 sk->sk_priority = skb->priority;
eddc9ec5 1507 sk->sk_protocol = ip_hdr(skb)->protocol;
f0e48dbf 1508 sk->sk_bound_dev_if = arg->bound_dev_if;
f5fca608 1509 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
2e77d89b 1510 &ipc, &rt, MSG_DONTWAIT);
1da177e4
LT
1511 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1512 if (arg->csumoffset >= 0)
9c70220b
ACM
1513 *((__sum16 *)skb_transport_header(skb) +
1514 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1515 arg->csum));
1da177e4 1516 skb->ip_summed = CHECKSUM_NONE;
77968b78 1517 ip_push_pending_frames(sk, &fl4);
1da177e4
LT
1518 }
1519
1520 bh_unlock_sock(sk);
1521
1522 ip_rt_put(rt);
1523}
1524
1da177e4
LT
1525void __init ip_init(void)
1526{
1da177e4
LT
1527 ip_rt_init();
1528 inet_initpeers();
1529
1530#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1531 igmp_mc_proc_init();
1532#endif
1533}