1 // SPDX-License-Identifier: GPL-2.0-only
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The Internet Protocol (IP) output module.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 * See ip_input.c for original log
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readability.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * Hirokazu Takahashi: sendfile() on UDP works now.
46 #include <linux/uaccess.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
71 #include <linux/skbuff.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <net/inet_ecn.h>
78 #include <net/lwtunnel.h>
79 #include <linux/bpf-cgroup.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
87 ip_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
89 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*));
91 /* Generate a checksum for an outgoing IP datagram. */
92 void ip_send_check(struct iphdr
*iph
)
95 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
97 EXPORT_SYMBOL(ip_send_check
);
99 int __ip_local_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
101 struct iphdr
*iph
= ip_hdr(skb
);
103 iph
->tot_len
= htons(skb
->len
);
106 /* if egress device is enslaved to an L3 master device pass the
107 * skb to its handler for processing
109 skb
= l3mdev_ip_out(sk
, skb
);
113 skb
->protocol
= htons(ETH_P_IP
);
115 return nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
,
116 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
120 int ip_local_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
124 err
= __ip_local_out(net
, sk
, skb
);
125 if (likely(err
== 1))
126 err
= dst_output(net
, sk
, skb
);
130 EXPORT_SYMBOL_GPL(ip_local_out
);
132 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
134 int ttl
= inet
->uc_ttl
;
137 ttl
= ip4_dst_hoplimit(dst
);
142 * Add an ip header to a skbuff and send it out.
145 int ip_build_and_send_pkt(struct sk_buff
*skb
, const struct sock
*sk
,
146 __be32 saddr
, __be32 daddr
, struct ip_options_rcu
*opt
,
149 struct inet_sock
*inet
= inet_sk(sk
);
150 struct rtable
*rt
= skb_rtable(skb
);
151 struct net
*net
= sock_net(sk
);
154 /* Build the IP header. */
155 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->opt
.optlen
: 0));
156 skb_reset_network_header(skb
);
161 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
162 iph
->daddr
= (opt
&& opt
->opt
.srr
? opt
->opt
.faddr
: daddr
);
164 iph
->protocol
= sk
->sk_protocol
;
165 /* Do not bother generating IPID for small packets (eg SYNACK) */
166 if (skb
->len
<= IPV4_MIN_MTU
|| ip_dont_fragment(sk
, &rt
->dst
)) {
167 iph
->frag_off
= htons(IP_DF
);
171 /* TCP packets here are SYNACK with fat IPv4/TCP options.
172 * Avoid using the hashed IP ident generator.
174 if (sk
->sk_protocol
== IPPROTO_TCP
)
175 iph
->id
= (__force __be16
)prandom_u32();
177 __ip_select_ident(net
, iph
, 1);
180 if (opt
&& opt
->opt
.optlen
) {
181 iph
->ihl
+= opt
->opt
.optlen
>>2;
182 ip_options_build(skb
, &opt
->opt
, daddr
, rt
, 0);
185 skb
->priority
= sk
->sk_priority
;
187 skb
->mark
= sk
->sk_mark
;
190 return ip_local_out(net
, skb
->sk
, skb
);
192 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
194 static int ip_finish_output2(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
196 struct dst_entry
*dst
= skb_dst(skb
);
197 struct rtable
*rt
= (struct rtable
*)dst
;
198 struct net_device
*dev
= dst
->dev
;
199 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
200 struct neighbour
*neigh
;
201 bool is_v6gw
= false;
203 if (rt
->rt_type
== RTN_MULTICAST
) {
204 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUTMCAST
, skb
->len
);
205 } else if (rt
->rt_type
== RTN_BROADCAST
)
206 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUTBCAST
, skb
->len
);
208 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
209 skb
= skb_expand_head(skb
, hh_len
);
214 if (lwtunnel_xmit_redirect(dst
->lwtstate
)) {
215 int res
= lwtunnel_xmit(skb
);
217 if (res
< 0 || res
== LWTUNNEL_XMIT_DONE
)
222 neigh
= ip_neigh_for_gw(rt
, skb
, &is_v6gw
);
223 if (!IS_ERR(neigh
)) {
226 sock_confirm_neigh(skb
, neigh
);
227 /* if crossing protocols, can not use the cached header */
228 res
= neigh_output(neigh
, skb
, is_v6gw
);
229 rcu_read_unlock_bh();
232 rcu_read_unlock_bh();
234 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
240 static int ip_finish_output_gso(struct net
*net
, struct sock
*sk
,
241 struct sk_buff
*skb
, unsigned int mtu
)
243 struct sk_buff
*segs
, *nskb
;
244 netdev_features_t features
;
247 /* common case: seglen is <= mtu
249 if (skb_gso_validate_network_len(skb
, mtu
))
250 return ip_finish_output2(net
, sk
, skb
);
252 /* Slowpath - GSO segment length exceeds the egress MTU.
254 * This can happen in several cases:
255 * - Forwarding of a TCP GRO skb, when DF flag is not set.
256 * - Forwarding of an skb that arrived on a virtualization interface
257 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
259 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
260 * interface with a smaller MTU.
261 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
262 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
265 features
= netif_skb_features(skb
);
266 BUILD_BUG_ON(sizeof(*IPCB(skb
)) > SKB_GSO_CB_OFFSET
);
267 segs
= skb_gso_segment(skb
, features
& ~NETIF_F_GSO_MASK
);
268 if (IS_ERR_OR_NULL(segs
)) {
275 skb_list_walk_safe(segs
, segs
, nskb
) {
278 skb_mark_not_on_list(segs
);
279 err
= ip_fragment(net
, sk
, segs
, mtu
, ip_finish_output2
);
288 static int __ip_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
292 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
293 /* Policy lookup after SNAT yielded a new policy */
294 if (skb_dst(skb
)->xfrm
) {
295 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
296 return dst_output(net
, sk
, skb
);
299 mtu
= ip_skb_dst_mtu(sk
, skb
);
301 return ip_finish_output_gso(net
, sk
, skb
, mtu
);
303 if (skb
->len
> mtu
|| IPCB(skb
)->frag_max_size
)
304 return ip_fragment(net
, sk
, skb
, mtu
, ip_finish_output2
);
306 return ip_finish_output2(net
, sk
, skb
);
309 static int ip_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
313 ret
= BPF_CGROUP_RUN_PROG_INET_EGRESS(sk
, skb
);
315 case NET_XMIT_SUCCESS
:
316 return __ip_finish_output(net
, sk
, skb
);
318 return __ip_finish_output(net
, sk
, skb
) ? : ret
;
325 static int ip_mc_finish_output(struct net
*net
, struct sock
*sk
,
328 struct rtable
*new_rt
;
332 ret
= BPF_CGROUP_RUN_PROG_INET_EGRESS(sk
, skb
);
337 case NET_XMIT_SUCCESS
:
344 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
345 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
346 * see ipv4_pktinfo_prepare().
348 new_rt
= rt_dst_clone(net
->loopback_dev
, skb_rtable(skb
));
352 skb_dst_set(skb
, &new_rt
->dst
);
355 err
= dev_loopback_xmit(net
, sk
, skb
);
356 return (do_cn
&& err
) ? ret
: err
;
359 int ip_mc_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
361 struct rtable
*rt
= skb_rtable(skb
);
362 struct net_device
*dev
= rt
->dst
.dev
;
365 * If the indicated interface is up and running, send the packet.
367 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
370 skb
->protocol
= htons(ETH_P_IP
);
373 * Multicasts are looped back for other local users
376 if (rt
->rt_flags
&RTCF_MULTICAST
) {
378 #ifdef CONFIG_IP_MROUTE
379 /* Small optimization: do not loopback not local frames,
380 which returned after forwarding; they will be dropped
381 by ip_mr_input in any case.
382 Note, that local frames are looped back to be delivered
385 This check is duplicated in ip_mr_input at the moment.
388 ((rt
->rt_flags
& RTCF_LOCAL
) ||
389 !(IPCB(skb
)->flags
& IPSKB_FORWARDED
))
392 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
394 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
395 net
, sk
, newskb
, NULL
, newskb
->dev
,
396 ip_mc_finish_output
);
399 /* Multicasts with ttl 0 must not go beyond the host */
401 if (ip_hdr(skb
)->ttl
== 0) {
407 if (rt
->rt_flags
&RTCF_BROADCAST
) {
408 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
410 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
411 net
, sk
, newskb
, NULL
, newskb
->dev
,
412 ip_mc_finish_output
);
415 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
416 net
, sk
, skb
, NULL
, skb
->dev
,
418 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
421 int ip_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
423 struct net_device
*dev
= skb_dst(skb
)->dev
, *indev
= skb
->dev
;
425 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
428 skb
->protocol
= htons(ETH_P_IP
);
430 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
431 net
, sk
, skb
, indev
, dev
,
433 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
435 EXPORT_SYMBOL(ip_output
);
438 * copy saddr and daddr, possibly using 64bit load/stores
440 * iph->saddr = fl4->saddr;
441 * iph->daddr = fl4->daddr;
443 static void ip_copy_addrs(struct iphdr
*iph
, const struct flowi4
*fl4
)
445 BUILD_BUG_ON(offsetof(typeof(*fl4
), daddr
) !=
446 offsetof(typeof(*fl4
), saddr
) + sizeof(fl4
->saddr
));
448 iph
->saddr
= fl4
->saddr
;
449 iph
->daddr
= fl4
->daddr
;
452 /* Note: skb->sk can be different from sk, in case of tunnels */
453 int __ip_queue_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
,
456 struct inet_sock
*inet
= inet_sk(sk
);
457 struct net
*net
= sock_net(sk
);
458 struct ip_options_rcu
*inet_opt
;
464 /* Skip all of this if the packet is already routed,
465 * f.e. by something like SCTP.
468 inet_opt
= rcu_dereference(inet
->inet_opt
);
470 rt
= skb_rtable(skb
);
474 /* Make sure we can route this packet. */
475 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
479 /* Use correct destination address if we have options. */
480 daddr
= inet
->inet_daddr
;
481 if (inet_opt
&& inet_opt
->opt
.srr
)
482 daddr
= inet_opt
->opt
.faddr
;
484 /* If this fails, retransmit mechanism of transport layer will
485 * keep trying until route appears or the connection times
488 rt
= ip_route_output_ports(net
, fl4
, sk
,
489 daddr
, inet
->inet_saddr
,
493 RT_CONN_FLAGS_TOS(sk
, tos
),
494 sk
->sk_bound_dev_if
);
497 sk_setup_caps(sk
, &rt
->dst
);
499 skb_dst_set_noref(skb
, &rt
->dst
);
502 if (inet_opt
&& inet_opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
505 /* OK, we know where to send it, allocate and build IP header. */
506 skb_push(skb
, sizeof(struct iphdr
) + (inet_opt
? inet_opt
->opt
.optlen
: 0));
507 skb_reset_network_header(skb
);
509 *((__be16
*)iph
) = htons((4 << 12) | (5 << 8) | (tos
& 0xff));
510 if (ip_dont_fragment(sk
, &rt
->dst
) && !skb
->ignore_df
)
511 iph
->frag_off
= htons(IP_DF
);
514 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
515 iph
->protocol
= sk
->sk_protocol
;
516 ip_copy_addrs(iph
, fl4
);
518 /* Transport layer set skb->h.foo itself. */
520 if (inet_opt
&& inet_opt
->opt
.optlen
) {
521 iph
->ihl
+= inet_opt
->opt
.optlen
>> 2;
522 ip_options_build(skb
, &inet_opt
->opt
, inet
->inet_daddr
, rt
, 0);
525 ip_select_ident_segs(net
, skb
, sk
,
526 skb_shinfo(skb
)->gso_segs
?: 1);
528 /* TODO : should we use skb->sk here instead of sk ? */
529 skb
->priority
= sk
->sk_priority
;
530 skb
->mark
= sk
->sk_mark
;
532 res
= ip_local_out(net
, sk
, skb
);
538 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
540 return -EHOSTUNREACH
;
542 EXPORT_SYMBOL(__ip_queue_xmit
);
544 int ip_queue_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
)
546 return __ip_queue_xmit(sk
, skb
, fl
, inet_sk(sk
)->tos
);
548 EXPORT_SYMBOL(ip_queue_xmit
);
550 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
552 to
->pkt_type
= from
->pkt_type
;
553 to
->priority
= from
->priority
;
554 to
->protocol
= from
->protocol
;
555 to
->skb_iif
= from
->skb_iif
;
557 skb_dst_copy(to
, from
);
559 to
->mark
= from
->mark
;
561 skb_copy_hash(to
, from
);
563 #ifdef CONFIG_NET_SCHED
564 to
->tc_index
= from
->tc_index
;
567 skb_ext_copy(to
, from
);
568 #if IS_ENABLED(CONFIG_IP_VS)
569 to
->ipvs_property
= from
->ipvs_property
;
571 skb_copy_secmark(to
, from
);
574 static int ip_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
576 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*))
578 struct iphdr
*iph
= ip_hdr(skb
);
580 if ((iph
->frag_off
& htons(IP_DF
)) == 0)
581 return ip_do_fragment(net
, sk
, skb
, output
);
583 if (unlikely(!skb
->ignore_df
||
584 (IPCB(skb
)->frag_max_size
&&
585 IPCB(skb
)->frag_max_size
> mtu
))) {
586 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
587 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
593 return ip_do_fragment(net
, sk
, skb
, output
);
596 void ip_fraglist_init(struct sk_buff
*skb
, struct iphdr
*iph
,
597 unsigned int hlen
, struct ip_fraglist_iter
*iter
)
599 unsigned int first_len
= skb_pagelen(skb
);
601 iter
->frag
= skb_shinfo(skb
)->frag_list
;
602 skb_frag_list_init(skb
);
608 skb
->data_len
= first_len
- skb_headlen(skb
);
609 skb
->len
= first_len
;
610 iph
->tot_len
= htons(first_len
);
611 iph
->frag_off
= htons(IP_MF
);
614 EXPORT_SYMBOL(ip_fraglist_init
);
616 void ip_fraglist_prepare(struct sk_buff
*skb
, struct ip_fraglist_iter
*iter
)
618 unsigned int hlen
= iter
->hlen
;
619 struct iphdr
*iph
= iter
->iph
;
620 struct sk_buff
*frag
;
623 frag
->ip_summed
= CHECKSUM_NONE
;
624 skb_reset_transport_header(frag
);
625 __skb_push(frag
, hlen
);
626 skb_reset_network_header(frag
);
627 memcpy(skb_network_header(frag
), iph
, hlen
);
628 iter
->iph
= ip_hdr(frag
);
630 iph
->tot_len
= htons(frag
->len
);
631 ip_copy_metadata(frag
, skb
);
632 iter
->offset
+= skb
->len
- hlen
;
633 iph
->frag_off
= htons(iter
->offset
>> 3);
635 iph
->frag_off
|= htons(IP_MF
);
636 /* Ready, complete checksum */
639 EXPORT_SYMBOL(ip_fraglist_prepare
);
641 void ip_frag_init(struct sk_buff
*skb
, unsigned int hlen
,
642 unsigned int ll_rs
, unsigned int mtu
, bool DF
,
643 struct ip_frag_state
*state
)
645 struct iphdr
*iph
= ip_hdr(skb
);
649 state
->ll_rs
= ll_rs
;
652 state
->left
= skb
->len
- hlen
; /* Space per frame */
653 state
->ptr
= hlen
; /* Where to start from */
655 state
->offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
656 state
->not_last_frag
= iph
->frag_off
& htons(IP_MF
);
658 EXPORT_SYMBOL(ip_frag_init
);
660 static void ip_frag_ipcb(struct sk_buff
*from
, struct sk_buff
*to
,
663 /* Copy the flags to each fragment. */
664 IPCB(to
)->flags
= IPCB(from
)->flags
;
666 /* ANK: dirty, but effective trick. Upgrade options only if
667 * the segment to be fragmented was THE FIRST (otherwise,
668 * options are already fixed) and make it ONCE
669 * on the initial skb, so that all the following fragments
670 * will inherit fixed options.
673 ip_options_fragment(from
);
676 struct sk_buff
*ip_frag_next(struct sk_buff
*skb
, struct ip_frag_state
*state
)
678 unsigned int len
= state
->left
;
679 struct sk_buff
*skb2
;
683 /* IF: it doesn't fit, use 'mtu' - the data space left */
684 if (len
> state
->mtu
)
686 /* IF: we are not sending up to and including the packet end
687 then align the next start on an eight byte boundary */
688 if (len
< state
->left
) {
692 /* Allocate buffer */
693 skb2
= alloc_skb(len
+ state
->hlen
+ state
->ll_rs
, GFP_ATOMIC
);
695 return ERR_PTR(-ENOMEM
);
698 * Set up data on packet
701 ip_copy_metadata(skb2
, skb
);
702 skb_reserve(skb2
, state
->ll_rs
);
703 skb_put(skb2
, len
+ state
->hlen
);
704 skb_reset_network_header(skb2
);
705 skb2
->transport_header
= skb2
->network_header
+ state
->hlen
;
708 * Charge the memory for the fragment to any owner
713 skb_set_owner_w(skb2
, skb
->sk
);
716 * Copy the packet header into the new buffer.
719 skb_copy_from_linear_data(skb
, skb_network_header(skb2
), state
->hlen
);
722 * Copy a block of the IP datagram.
724 if (skb_copy_bits(skb
, state
->ptr
, skb_transport_header(skb2
), len
))
729 * Fill in the new header fields.
732 iph
->frag_off
= htons((state
->offset
>> 3));
734 iph
->frag_off
|= htons(IP_DF
);
737 * Added AC : If we are fragmenting a fragment that's not the
738 * last fragment then keep MF on each bit
740 if (state
->left
> 0 || state
->not_last_frag
)
741 iph
->frag_off
|= htons(IP_MF
);
743 state
->offset
+= len
;
745 iph
->tot_len
= htons(len
+ state
->hlen
);
751 EXPORT_SYMBOL(ip_frag_next
);
754 * This IP datagram is too large to be sent in one piece. Break it up into
755 * smaller pieces (each of size equal to IP header plus
756 * a block of the data of the original IP data part) that will yet fit in a
757 * single device frame, and queue such a frame for sending.
760 int ip_do_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
761 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*))
764 struct sk_buff
*skb2
;
765 struct rtable
*rt
= skb_rtable(skb
);
766 unsigned int mtu
, hlen
, ll_rs
;
767 struct ip_fraglist_iter iter
;
768 ktime_t tstamp
= skb
->tstamp
;
769 struct ip_frag_state state
;
772 /* for offloaded checksums cleanup checksum before fragmentation */
773 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
774 (err
= skb_checksum_help(skb
)))
778 * Point into the IP datagram header.
783 mtu
= ip_skb_dst_mtu(sk
, skb
);
784 if (IPCB(skb
)->frag_max_size
&& IPCB(skb
)->frag_max_size
< mtu
)
785 mtu
= IPCB(skb
)->frag_max_size
;
788 * Setup starting values.
792 mtu
= mtu
- hlen
; /* Size of data space */
793 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
794 ll_rs
= LL_RESERVED_SPACE(rt
->dst
.dev
);
796 /* When frag_list is given, use it. First, check its validity:
797 * some transformers could create wrong frag_list or break existing
798 * one, it is not prohibited. In this case fall back to copying.
800 * LATER: this step can be merged to real generation of fragments,
801 * we can switch to copy when see the first bad fragment.
803 if (skb_has_frag_list(skb
)) {
804 struct sk_buff
*frag
, *frag2
;
805 unsigned int first_len
= skb_pagelen(skb
);
807 if (first_len
- hlen
> mtu
||
808 ((first_len
- hlen
) & 7) ||
809 ip_is_fragment(iph
) ||
811 skb_headroom(skb
) < ll_rs
)
814 skb_walk_frags(skb
, frag
) {
815 /* Correct geometry. */
816 if (frag
->len
> mtu
||
817 ((frag
->len
& 7) && frag
->next
) ||
818 skb_headroom(frag
) < hlen
+ ll_rs
)
819 goto slow_path_clean
;
821 /* Partially cloned skb? */
822 if (skb_shared(frag
))
823 goto slow_path_clean
;
828 frag
->destructor
= sock_wfree
;
830 skb
->truesize
-= frag
->truesize
;
833 /* Everything is OK. Generate! */
834 ip_fraglist_init(skb
, iph
, hlen
, &iter
);
837 /* Prepare header of the next frame,
838 * before previous one went down. */
840 bool first_frag
= (iter
.offset
== 0);
842 IPCB(iter
.frag
)->flags
= IPCB(skb
)->flags
;
843 ip_fraglist_prepare(skb
, &iter
);
844 if (first_frag
&& IPCB(skb
)->opt
.optlen
) {
845 /* ipcb->opt is not populated for frags
846 * coming from __ip_make_skb(),
847 * ip_options_fragment() needs optlen
849 IPCB(iter
.frag
)->opt
.optlen
=
850 IPCB(skb
)->opt
.optlen
;
851 ip_options_fragment(iter
.frag
);
852 ip_send_check(iter
.iph
);
856 skb
->tstamp
= tstamp
;
857 err
= output(net
, sk
, skb
);
860 IP_INC_STATS(net
, IPSTATS_MIB_FRAGCREATES
);
861 if (err
|| !iter
.frag
)
864 skb
= ip_fraglist_next(&iter
);
868 IP_INC_STATS(net
, IPSTATS_MIB_FRAGOKS
);
872 kfree_skb_list(iter
.frag
);
874 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
878 skb_walk_frags(skb
, frag2
) {
882 frag2
->destructor
= NULL
;
883 skb
->truesize
+= frag2
->truesize
;
889 * Fragment the datagram.
892 ip_frag_init(skb
, hlen
, ll_rs
, mtu
, IPCB(skb
)->flags
& IPSKB_FRAG_PMTU
,
896 * Keep copying data until we run out.
899 while (state
.left
> 0) {
900 bool first_frag
= (state
.offset
== 0);
902 skb2
= ip_frag_next(skb
, &state
);
907 ip_frag_ipcb(skb
, skb2
, first_frag
);
910 * Put this fragment into the sending queue.
912 skb2
->tstamp
= tstamp
;
913 err
= output(net
, sk
, skb2
);
917 IP_INC_STATS(net
, IPSTATS_MIB_FRAGCREATES
);
920 IP_INC_STATS(net
, IPSTATS_MIB_FRAGOKS
);
925 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
928 EXPORT_SYMBOL(ip_do_fragment
);
931 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
933 struct msghdr
*msg
= from
;
935 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
936 if (!copy_from_iter_full(to
, len
, &msg
->msg_iter
))
940 if (!csum_and_copy_from_iter_full(to
, len
, &csum
, &msg
->msg_iter
))
942 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
946 EXPORT_SYMBOL(ip_generic_getfrag
);
949 csum_page(struct page
*page
, int offset
, int copy
)
954 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
959 static int __ip_append_data(struct sock
*sk
,
961 struct sk_buff_head
*queue
,
962 struct inet_cork
*cork
,
963 struct page_frag
*pfrag
,
964 int getfrag(void *from
, char *to
, int offset
,
965 int len
, int odd
, struct sk_buff
*skb
),
966 void *from
, int length
, int transhdrlen
,
969 struct inet_sock
*inet
= inet_sk(sk
);
970 struct ubuf_info
*uarg
= NULL
;
973 struct ip_options
*opt
= cork
->opt
;
980 unsigned int maxfraglen
, fragheaderlen
, maxnonfragsize
;
981 int csummode
= CHECKSUM_NONE
;
982 struct rtable
*rt
= (struct rtable
*)cork
->dst
;
983 unsigned int wmem_alloc_delta
= 0;
984 bool paged
, extra_uref
= false;
987 skb
= skb_peek_tail(queue
);
989 exthdrlen
= !skb
? rt
->dst
.header_len
: 0;
990 mtu
= cork
->gso_size
? IP_MAX_MTU
: cork
->fragsize
;
991 paged
= !!cork
->gso_size
;
993 if (cork
->tx_flags
& SKBTX_ANY_SW_TSTAMP
&&
994 sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
)
995 tskey
= sk
->sk_tskey
++;
997 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
999 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1000 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1001 maxnonfragsize
= ip_sk_ignore_df(sk
) ? IP_MAX_MTU
: mtu
;
1003 if (cork
->length
+ length
> maxnonfragsize
- fragheaderlen
) {
1004 ip_local_error(sk
, EMSGSIZE
, fl4
->daddr
, inet
->inet_dport
,
1005 mtu
- (opt
? opt
->optlen
: 0));
1010 * transhdrlen > 0 means that this is the first fragment and we wish
1011 * it won't be fragmented in the future.
1014 length
+ fragheaderlen
<= mtu
&&
1015 rt
->dst
.dev
->features
& (NETIF_F_HW_CSUM
| NETIF_F_IP_CSUM
) &&
1016 (!(flags
& MSG_MORE
) || cork
->gso_size
) &&
1017 (!exthdrlen
|| (rt
->dst
.dev
->features
& NETIF_F_HW_ESP_TX_CSUM
)))
1018 csummode
= CHECKSUM_PARTIAL
;
1020 if (flags
& MSG_ZEROCOPY
&& length
&& sock_flag(sk
, SOCK_ZEROCOPY
)) {
1021 uarg
= msg_zerocopy_realloc(sk
, length
, skb_zcopy(skb
));
1024 extra_uref
= !skb_zcopy(skb
); /* only ref on new uarg */
1025 if (rt
->dst
.dev
->features
& NETIF_F_SG
&&
1026 csummode
== CHECKSUM_PARTIAL
) {
1030 skb_zcopy_set(skb
, uarg
, &extra_uref
);
1034 cork
->length
+= length
;
1036 /* So, what's going on in the loop below?
1038 * We use calculated fragment length to generate chained skb,
1039 * each of segments is IP fragment ready for sending to network after
1040 * adding appropriate IP header.
1046 while (length
> 0) {
1047 /* Check if the remaining data fits into current packet. */
1048 copy
= mtu
- skb
->len
;
1050 copy
= maxfraglen
- skb
->len
;
1053 unsigned int datalen
;
1054 unsigned int fraglen
;
1055 unsigned int fraggap
;
1056 unsigned int alloclen
, alloc_extra
;
1057 unsigned int pagedlen
;
1058 struct sk_buff
*skb_prev
;
1062 fraggap
= skb_prev
->len
- maxfraglen
;
1067 * If remaining data exceeds the mtu,
1068 * we know we need more fragment(s).
1070 datalen
= length
+ fraggap
;
1071 if (datalen
> mtu
- fragheaderlen
)
1072 datalen
= maxfraglen
- fragheaderlen
;
1073 fraglen
= datalen
+ fragheaderlen
;
1076 alloc_extra
= hh_len
+ 15;
1077 alloc_extra
+= exthdrlen
;
1079 /* The last fragment gets additional space at tail.
1080 * Note, with MSG_MORE we overallocate on fragments,
1081 * because we have no idea what fragment will be
1084 if (datalen
== length
+ fraggap
)
1085 alloc_extra
+= rt
->dst
.trailer_len
;
1087 if ((flags
& MSG_MORE
) &&
1088 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
1091 (fraglen
+ alloc_extra
< SKB_MAX_ALLOC
||
1092 !(rt
->dst
.dev
->features
& NETIF_F_SG
)))
1095 alloclen
= min_t(int, fraglen
, MAX_HEADER
);
1096 pagedlen
= fraglen
- alloclen
;
1099 alloclen
+= alloc_extra
;
1102 skb
= sock_alloc_send_skb(sk
, alloclen
,
1103 (flags
& MSG_DONTWAIT
), &err
);
1106 if (refcount_read(&sk
->sk_wmem_alloc
) + wmem_alloc_delta
<=
1108 skb
= alloc_skb(alloclen
,
1117 * Fill in the control structures
1119 skb
->ip_summed
= csummode
;
1121 skb_reserve(skb
, hh_len
);
1124 * Find where to start putting bytes.
1126 data
= skb_put(skb
, fraglen
+ exthdrlen
- pagedlen
);
1127 skb_set_network_header(skb
, exthdrlen
);
1128 skb
->transport_header
= (skb
->network_header
+
1130 data
+= fragheaderlen
+ exthdrlen
;
1133 skb
->csum
= skb_copy_and_csum_bits(
1134 skb_prev
, maxfraglen
,
1135 data
+ transhdrlen
, fraggap
);
1136 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1139 pskb_trim_unique(skb_prev
, maxfraglen
);
1142 copy
= datalen
- transhdrlen
- fraggap
- pagedlen
;
1143 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1150 length
-= copy
+ transhdrlen
;
1153 csummode
= CHECKSUM_NONE
;
1155 /* only the initial fragment is time stamped */
1156 skb_shinfo(skb
)->tx_flags
= cork
->tx_flags
;
1158 skb_shinfo(skb
)->tskey
= tskey
;
1160 skb_zcopy_set(skb
, uarg
, &extra_uref
);
1162 if ((flags
& MSG_CONFIRM
) && !skb_prev
)
1163 skb_set_dst_pending_confirm(skb
, 1);
1166 * Put the packet on the pending queue.
1168 if (!skb
->destructor
) {
1169 skb
->destructor
= sock_wfree
;
1171 wmem_alloc_delta
+= skb
->truesize
;
1173 __skb_queue_tail(queue
, skb
);
1180 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
) &&
1181 skb_tailroom(skb
) >= copy
) {
1185 if (getfrag(from
, skb_put(skb
, copy
),
1186 offset
, copy
, off
, skb
) < 0) {
1187 __skb_trim(skb
, off
);
1191 } else if (!uarg
|| !uarg
->zerocopy
) {
1192 int i
= skb_shinfo(skb
)->nr_frags
;
1195 if (!sk_page_frag_refill(sk
, pfrag
))
1198 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1201 if (i
== MAX_SKB_FRAGS
)
1204 __skb_fill_page_desc(skb
, i
, pfrag
->page
,
1206 skb_shinfo(skb
)->nr_frags
= ++i
;
1207 get_page(pfrag
->page
);
1209 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1211 page_address(pfrag
->page
) + pfrag
->offset
,
1212 offset
, copy
, skb
->len
, skb
) < 0)
1215 pfrag
->offset
+= copy
;
1216 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1218 skb
->data_len
+= copy
;
1219 skb
->truesize
+= copy
;
1220 wmem_alloc_delta
+= copy
;
1222 err
= skb_zerocopy_iter_dgram(skb
, from
, copy
);
1230 if (wmem_alloc_delta
)
1231 refcount_add(wmem_alloc_delta
, &sk
->sk_wmem_alloc
);
1237 net_zcopy_put_abort(uarg
, extra_uref
);
1238 cork
->length
-= length
;
1239 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1240 refcount_add(wmem_alloc_delta
, &sk
->sk_wmem_alloc
);
1244 static int ip_setup_cork(struct sock
*sk
, struct inet_cork
*cork
,
1245 struct ipcm_cookie
*ipc
, struct rtable
**rtp
)
1247 struct ip_options_rcu
*opt
;
1255 * setup for corking.
1260 cork
->opt
= kmalloc(sizeof(struct ip_options
) + 40,
1262 if (unlikely(!cork
->opt
))
1265 memcpy(cork
->opt
, &opt
->opt
, sizeof(struct ip_options
) + opt
->opt
.optlen
);
1266 cork
->flags
|= IPCORK_OPT
;
1267 cork
->addr
= ipc
->addr
;
1270 cork
->fragsize
= ip_sk_use_pmtu(sk
) ?
1271 dst_mtu(&rt
->dst
) : READ_ONCE(rt
->dst
.dev
->mtu
);
1273 if (!inetdev_valid_mtu(cork
->fragsize
))
1274 return -ENETUNREACH
;
1276 cork
->gso_size
= ipc
->gso_size
;
1278 cork
->dst
= &rt
->dst
;
1279 /* We stole this route, caller should not release it. */
1283 cork
->ttl
= ipc
->ttl
;
1284 cork
->tos
= ipc
->tos
;
1285 cork
->mark
= ipc
->sockc
.mark
;
1286 cork
->priority
= ipc
->priority
;
1287 cork
->transmit_time
= ipc
->sockc
.transmit_time
;
1289 sock_tx_timestamp(sk
, ipc
->sockc
.tsflags
, &cork
->tx_flags
);
1295 * ip_append_data() and ip_append_page() can make one large IP datagram
1296 * from many pieces of data. Each pieces will be holded on the socket
1297 * until ip_push_pending_frames() is called. Each piece can be a page
1300 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1301 * this interface potentially.
1303 * LATER: length must be adjusted by pad at tail, when it is required.
1305 int ip_append_data(struct sock
*sk
, struct flowi4
*fl4
,
1306 int getfrag(void *from
, char *to
, int offset
, int len
,
1307 int odd
, struct sk_buff
*skb
),
1308 void *from
, int length
, int transhdrlen
,
1309 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
1312 struct inet_sock
*inet
= inet_sk(sk
);
1315 if (flags
&MSG_PROBE
)
1318 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1319 err
= ip_setup_cork(sk
, &inet
->cork
.base
, ipc
, rtp
);
1326 return __ip_append_data(sk
, fl4
, &sk
->sk_write_queue
, &inet
->cork
.base
,
1327 sk_page_frag(sk
), getfrag
,
1328 from
, length
, transhdrlen
, flags
);
1331 ssize_t
ip_append_page(struct sock
*sk
, struct flowi4
*fl4
, struct page
*page
,
1332 int offset
, size_t size
, int flags
)
1334 struct inet_sock
*inet
= inet_sk(sk
);
1335 struct sk_buff
*skb
;
1337 struct ip_options
*opt
= NULL
;
1338 struct inet_cork
*cork
;
1343 unsigned int maxfraglen
, fragheaderlen
, fraggap
, maxnonfragsize
;
1348 if (flags
&MSG_PROBE
)
1351 if (skb_queue_empty(&sk
->sk_write_queue
))
1354 cork
= &inet
->cork
.base
;
1355 rt
= (struct rtable
*)cork
->dst
;
1356 if (cork
->flags
& IPCORK_OPT
)
1359 if (!(rt
->dst
.dev
->features
& NETIF_F_SG
))
1362 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1363 mtu
= cork
->gso_size
? IP_MAX_MTU
: cork
->fragsize
;
1365 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1366 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1367 maxnonfragsize
= ip_sk_ignore_df(sk
) ? 0xFFFF : mtu
;
1369 if (cork
->length
+ size
> maxnonfragsize
- fragheaderlen
) {
1370 ip_local_error(sk
, EMSGSIZE
, fl4
->daddr
, inet
->inet_dport
,
1371 mtu
- (opt
? opt
->optlen
: 0));
1375 skb
= skb_peek_tail(&sk
->sk_write_queue
);
1379 cork
->length
+= size
;
1382 /* Check if the remaining data fits into current packet. */
1383 len
= mtu
- skb
->len
;
1385 len
= maxfraglen
- skb
->len
;
1388 struct sk_buff
*skb_prev
;
1392 fraggap
= skb_prev
->len
- maxfraglen
;
1394 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1395 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1396 if (unlikely(!skb
)) {
1402 * Fill in the control structures
1404 skb
->ip_summed
= CHECKSUM_NONE
;
1406 skb_reserve(skb
, hh_len
);
1409 * Find where to start putting bytes.
1411 skb_put(skb
, fragheaderlen
+ fraggap
);
1412 skb_reset_network_header(skb
);
1413 skb
->transport_header
= (skb
->network_header
+
1416 skb
->csum
= skb_copy_and_csum_bits(skb_prev
,
1418 skb_transport_header(skb
),
1420 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1422 pskb_trim_unique(skb_prev
, maxfraglen
);
1426 * Put the packet on the pending queue.
1428 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1435 if (skb_append_pagefrags(skb
, page
, offset
, len
)) {
1440 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1442 csum
= csum_page(page
, offset
, len
);
1443 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1447 skb
->data_len
+= len
;
1448 skb
->truesize
+= len
;
1449 refcount_add(len
, &sk
->sk_wmem_alloc
);
1456 cork
->length
-= size
;
1457 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1461 static void ip_cork_release(struct inet_cork
*cork
)
1463 cork
->flags
&= ~IPCORK_OPT
;
1466 dst_release(cork
->dst
);
1471 * Combined all pending IP fragments on the socket as one IP datagram
1472 * and push them out.
1474 struct sk_buff
*__ip_make_skb(struct sock
*sk
,
1476 struct sk_buff_head
*queue
,
1477 struct inet_cork
*cork
)
1479 struct sk_buff
*skb
, *tmp_skb
;
1480 struct sk_buff
**tail_skb
;
1481 struct inet_sock
*inet
= inet_sk(sk
);
1482 struct net
*net
= sock_net(sk
);
1483 struct ip_options
*opt
= NULL
;
1484 struct rtable
*rt
= (struct rtable
*)cork
->dst
;
1489 skb
= __skb_dequeue(queue
);
1492 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1494 /* move skb->data to ip header from ext header */
1495 if (skb
->data
< skb_network_header(skb
))
1496 __skb_pull(skb
, skb_network_offset(skb
));
1497 while ((tmp_skb
= __skb_dequeue(queue
)) != NULL
) {
1498 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1499 *tail_skb
= tmp_skb
;
1500 tail_skb
= &(tmp_skb
->next
);
1501 skb
->len
+= tmp_skb
->len
;
1502 skb
->data_len
+= tmp_skb
->len
;
1503 skb
->truesize
+= tmp_skb
->truesize
;
1504 tmp_skb
->destructor
= NULL
;
1508 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1509 * to fragment the frame generated here. No matter, what transforms
1510 * how transforms change size of the packet, it will come out.
1512 skb
->ignore_df
= ip_sk_ignore_df(sk
);
1514 /* DF bit is set when we want to see DF on outgoing frames.
1515 * If ignore_df is set too, we still allow to fragment this frame
1517 if (inet
->pmtudisc
== IP_PMTUDISC_DO
||
1518 inet
->pmtudisc
== IP_PMTUDISC_PROBE
||
1519 (skb
->len
<= dst_mtu(&rt
->dst
) &&
1520 ip_dont_fragment(sk
, &rt
->dst
)))
1523 if (cork
->flags
& IPCORK_OPT
)
1528 else if (rt
->rt_type
== RTN_MULTICAST
)
1531 ttl
= ip_select_ttl(inet
, &rt
->dst
);
1536 iph
->tos
= (cork
->tos
!= -1) ? cork
->tos
: inet
->tos
;
1539 iph
->protocol
= sk
->sk_protocol
;
1540 ip_copy_addrs(iph
, fl4
);
1541 ip_select_ident(net
, skb
, sk
);
1544 iph
->ihl
+= opt
->optlen
>> 2;
1545 ip_options_build(skb
, opt
, cork
->addr
, rt
, 0);
1548 skb
->priority
= (cork
->tos
!= -1) ? cork
->priority
: sk
->sk_priority
;
1549 skb
->mark
= cork
->mark
;
1550 skb
->tstamp
= cork
->transmit_time
;
1552 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1556 skb_dst_set(skb
, &rt
->dst
);
1558 if (iph
->protocol
== IPPROTO_ICMP
)
1559 icmp_out_count(net
, ((struct icmphdr
*)
1560 skb_transport_header(skb
))->type
);
1562 ip_cork_release(cork
);
1567 int ip_send_skb(struct net
*net
, struct sk_buff
*skb
)
1571 err
= ip_local_out(net
, skb
->sk
, skb
);
1574 err
= net_xmit_errno(err
);
1576 IP_INC_STATS(net
, IPSTATS_MIB_OUTDISCARDS
);
1582 int ip_push_pending_frames(struct sock
*sk
, struct flowi4
*fl4
)
1584 struct sk_buff
*skb
;
1586 skb
= ip_finish_skb(sk
, fl4
);
1590 /* Netfilter gets whole the not fragmented skb. */
1591 return ip_send_skb(sock_net(sk
), skb
);
1595 * Throw away all pending data on the socket.
1597 static void __ip_flush_pending_frames(struct sock
*sk
,
1598 struct sk_buff_head
*queue
,
1599 struct inet_cork
*cork
)
1601 struct sk_buff
*skb
;
1603 while ((skb
= __skb_dequeue_tail(queue
)) != NULL
)
1606 ip_cork_release(cork
);
1609 void ip_flush_pending_frames(struct sock
*sk
)
1611 __ip_flush_pending_frames(sk
, &sk
->sk_write_queue
, &inet_sk(sk
)->cork
.base
);
1614 struct sk_buff
*ip_make_skb(struct sock
*sk
,
1616 int getfrag(void *from
, char *to
, int offset
,
1617 int len
, int odd
, struct sk_buff
*skb
),
1618 void *from
, int length
, int transhdrlen
,
1619 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
1620 struct inet_cork
*cork
, unsigned int flags
)
1622 struct sk_buff_head queue
;
1625 if (flags
& MSG_PROBE
)
1628 __skb_queue_head_init(&queue
);
1633 err
= ip_setup_cork(sk
, cork
, ipc
, rtp
);
1635 return ERR_PTR(err
);
1637 err
= __ip_append_data(sk
, fl4
, &queue
, cork
,
1638 ¤t
->task_frag
, getfrag
,
1639 from
, length
, transhdrlen
, flags
);
1641 __ip_flush_pending_frames(sk
, &queue
, cork
);
1642 return ERR_PTR(err
);
1645 return __ip_make_skb(sk
, fl4
, &queue
, cork
);
1649 * Fetch data from kernel space and fill in checksum if needed.
1651 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1652 int len
, int odd
, struct sk_buff
*skb
)
1656 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
);
1657 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1662 * Generic function to send a packet as reply to another packet.
1663 * Used to send some TCP resets/acks so far.
1665 void ip_send_unicast_reply(struct sock
*sk
, struct sk_buff
*skb
,
1666 const struct ip_options
*sopt
,
1667 __be32 daddr
, __be32 saddr
,
1668 const struct ip_reply_arg
*arg
,
1669 unsigned int len
, u64 transmit_time
)
1671 struct ip_options_data replyopts
;
1672 struct ipcm_cookie ipc
;
1674 struct rtable
*rt
= skb_rtable(skb
);
1675 struct net
*net
= sock_net(sk
);
1676 struct sk_buff
*nskb
;
1680 if (__ip_options_echo(net
, &replyopts
.opt
.opt
, skb
, sopt
))
1685 ipc
.sockc
.transmit_time
= transmit_time
;
1687 if (replyopts
.opt
.opt
.optlen
) {
1688 ipc
.opt
= &replyopts
.opt
;
1690 if (replyopts
.opt
.opt
.srr
)
1691 daddr
= replyopts
.opt
.opt
.faddr
;
1694 oif
= arg
->bound_dev_if
;
1695 if (!oif
&& netif_index_is_l3_master(net
, skb
->skb_iif
))
1698 flowi4_init_output(&fl4
, oif
,
1699 IP4_REPLY_MARK(net
, skb
->mark
) ?: sk
->sk_mark
,
1701 RT_SCOPE_UNIVERSE
, ip_hdr(skb
)->protocol
,
1702 ip_reply_arg_flowi_flags(arg
),
1704 tcp_hdr(skb
)->source
, tcp_hdr(skb
)->dest
,
1706 security_skb_classify_flow(skb
, flowi4_to_flowi_common(&fl4
));
1707 rt
= ip_route_output_key(net
, &fl4
);
1711 inet_sk(sk
)->tos
= arg
->tos
& ~INET_ECN_MASK
;
1713 sk
->sk_protocol
= ip_hdr(skb
)->protocol
;
1714 sk
->sk_bound_dev_if
= arg
->bound_dev_if
;
1715 sk
->sk_sndbuf
= sysctl_wmem_default
;
1716 ipc
.sockc
.mark
= fl4
.flowi4_mark
;
1717 err
= ip_append_data(sk
, &fl4
, ip_reply_glue_bits
, arg
->iov
->iov_base
,
1718 len
, 0, &ipc
, &rt
, MSG_DONTWAIT
);
1719 if (unlikely(err
)) {
1720 ip_flush_pending_frames(sk
);
1724 nskb
= skb_peek(&sk
->sk_write_queue
);
1726 if (arg
->csumoffset
>= 0)
1727 *((__sum16
*)skb_transport_header(nskb
) +
1728 arg
->csumoffset
) = csum_fold(csum_add(nskb
->csum
,
1730 nskb
->ip_summed
= CHECKSUM_NONE
;
1731 ip_push_pending_frames(sk
, &fl4
);
1737 void __init
ip_init(void)
1742 #if defined(CONFIG_IP_MULTICAST)