2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 * See ip_input.c for original log
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
23 * Bradford Johnson: Fix faulty handling of some frames when
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * Hirokazu Takahashi: sendfile() on UDP works now.
45 #include <linux/uaccess.h>
46 #include <linux/module.h>
47 #include <linux/types.h>
48 #include <linux/kernel.h>
50 #include <linux/string.h>
51 #include <linux/errno.h>
52 #include <linux/highmem.h>
53 #include <linux/slab.h>
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
70 #include <linux/skbuff.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <net/lwtunnel.h>
77 #include <linux/bpf-cgroup.h>
78 #include <linux/igmp.h>
79 #include <linux/netfilter_ipv4.h>
80 #include <linux/netfilter_bridge.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
85 ip_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
87 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*));
89 /* Generate a checksum for an outgoing IP datagram. */
90 void ip_send_check(struct iphdr
*iph
)
93 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
95 EXPORT_SYMBOL(ip_send_check
);
97 int __ip_local_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
99 struct iphdr
*iph
= ip_hdr(skb
);
101 iph
->tot_len
= htons(skb
->len
);
104 /* if egress device is enslaved to an L3 master device pass the
105 * skb to its handler for processing
107 skb
= l3mdev_ip_out(sk
, skb
);
111 skb
->protocol
= htons(ETH_P_IP
);
113 return nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
,
114 net
, sk
, skb
, NULL
, skb_dst(skb
)->dev
,
118 int ip_local_out(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
122 err
= __ip_local_out(net
, sk
, skb
);
123 if (likely(err
== 1))
124 err
= dst_output(net
, sk
, skb
);
128 EXPORT_SYMBOL_GPL(ip_local_out
);
130 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
132 int ttl
= inet
->uc_ttl
;
135 ttl
= ip4_dst_hoplimit(dst
);
140 * Add an ip header to a skbuff and send it out.
143 int ip_build_and_send_pkt(struct sk_buff
*skb
, const struct sock
*sk
,
144 __be32 saddr
, __be32 daddr
, struct ip_options_rcu
*opt
)
146 struct inet_sock
*inet
= inet_sk(sk
);
147 struct rtable
*rt
= skb_rtable(skb
);
148 struct net
*net
= sock_net(sk
);
151 /* Build the IP header. */
152 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->opt
.optlen
: 0));
153 skb_reset_network_header(skb
);
157 iph
->tos
= inet
->tos
;
158 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
159 iph
->daddr
= (opt
&& opt
->opt
.srr
? opt
->opt
.faddr
: daddr
);
161 iph
->protocol
= sk
->sk_protocol
;
162 if (ip_dont_fragment(sk
, &rt
->dst
)) {
163 iph
->frag_off
= htons(IP_DF
);
167 __ip_select_ident(net
, iph
, 1);
170 if (opt
&& opt
->opt
.optlen
) {
171 iph
->ihl
+= opt
->opt
.optlen
>>2;
172 ip_options_build(skb
, &opt
->opt
, daddr
, rt
, 0);
175 skb
->priority
= sk
->sk_priority
;
176 skb
->mark
= sk
->sk_mark
;
179 return ip_local_out(net
, skb
->sk
, skb
);
181 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
183 static int ip_finish_output2(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
185 struct dst_entry
*dst
= skb_dst(skb
);
186 struct rtable
*rt
= (struct rtable
*)dst
;
187 struct net_device
*dev
= dst
->dev
;
188 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
189 struct neighbour
*neigh
;
192 if (rt
->rt_type
== RTN_MULTICAST
) {
193 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUTMCAST
, skb
->len
);
194 } else if (rt
->rt_type
== RTN_BROADCAST
)
195 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUTBCAST
, skb
->len
);
197 /* Be paranoid, rather than too clever. */
198 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
199 struct sk_buff
*skb2
;
201 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
207 skb_set_owner_w(skb2
, skb
->sk
);
212 if (lwtunnel_xmit_redirect(dst
->lwtstate
)) {
213 int res
= lwtunnel_xmit(skb
);
215 if (res
< 0 || res
== LWTUNNEL_XMIT_DONE
)
220 nexthop
= (__force u32
) rt_nexthop(rt
, ip_hdr(skb
)->daddr
);
221 neigh
= __ipv4_neigh_lookup_noref(dev
, nexthop
);
222 if (unlikely(!neigh
))
223 neigh
= __neigh_create(&arp_tbl
, &nexthop
, dev
, false);
224 if (!IS_ERR(neigh
)) {
225 int res
= dst_neigh_output(dst
, neigh
, skb
);
227 rcu_read_unlock_bh();
230 rcu_read_unlock_bh();
232 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
238 static int ip_finish_output_gso(struct net
*net
, struct sock
*sk
,
239 struct sk_buff
*skb
, unsigned int mtu
)
241 netdev_features_t features
;
242 struct sk_buff
*segs
;
245 /* common case: seglen is <= mtu
247 if (skb_gso_validate_mtu(skb
, mtu
))
248 return ip_finish_output2(net
, sk
, skb
);
250 /* Slowpath - GSO segment length exceeds the egress MTU.
252 * This can happen in several cases:
253 * - Forwarding of a TCP GRO skb, when DF flag is not set.
254 * - Forwarding of an skb that arrived on a virtualization interface
255 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
257 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
258 * interface with a smaller MTU.
259 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
260 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
263 features
= netif_skb_features(skb
);
264 BUILD_BUG_ON(sizeof(*IPCB(skb
)) > SKB_SGO_CB_OFFSET
);
265 segs
= skb_gso_segment(skb
, features
& ~NETIF_F_GSO_MASK
);
266 if (IS_ERR_OR_NULL(segs
)) {
274 struct sk_buff
*nskb
= segs
->next
;
278 err
= ip_fragment(net
, sk
, segs
, mtu
, ip_finish_output2
);
288 static int ip_finish_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
293 ret
= BPF_CGROUP_RUN_PROG_INET_EGRESS(sk
, skb
);
299 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
300 /* Policy lookup after SNAT yielded a new policy */
301 if (skb_dst(skb
)->xfrm
) {
302 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
303 return dst_output(net
, sk
, skb
);
306 mtu
= ip_skb_dst_mtu(sk
, skb
);
308 return ip_finish_output_gso(net
, sk
, skb
, mtu
);
310 if (skb
->len
> mtu
|| (IPCB(skb
)->flags
& IPSKB_FRAG_PMTU
))
311 return ip_fragment(net
, sk
, skb
, mtu
, ip_finish_output2
);
313 return ip_finish_output2(net
, sk
, skb
);
316 static int ip_mc_finish_output(struct net
*net
, struct sock
*sk
,
321 ret
= BPF_CGROUP_RUN_PROG_INET_EGRESS(sk
, skb
);
327 return dev_loopback_xmit(net
, sk
, skb
);
330 int ip_mc_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
332 struct rtable
*rt
= skb_rtable(skb
);
333 struct net_device
*dev
= rt
->dst
.dev
;
336 * If the indicated interface is up and running, send the packet.
338 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
341 skb
->protocol
= htons(ETH_P_IP
);
344 * Multicasts are looped back for other local users
347 if (rt
->rt_flags
&RTCF_MULTICAST
) {
349 #ifdef CONFIG_IP_MROUTE
350 /* Small optimization: do not loopback not local frames,
351 which returned after forwarding; they will be dropped
352 by ip_mr_input in any case.
353 Note, that local frames are looped back to be delivered
356 This check is duplicated in ip_mr_input at the moment.
359 ((rt
->rt_flags
& RTCF_LOCAL
) ||
360 !(IPCB(skb
)->flags
& IPSKB_FORWARDED
))
363 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
365 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
366 net
, sk
, newskb
, NULL
, newskb
->dev
,
367 ip_mc_finish_output
);
370 /* Multicasts with ttl 0 must not go beyond the host */
372 if (ip_hdr(skb
)->ttl
== 0) {
378 if (rt
->rt_flags
&RTCF_BROADCAST
) {
379 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
381 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
382 net
, sk
, newskb
, NULL
, newskb
->dev
,
383 ip_mc_finish_output
);
386 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
387 net
, sk
, skb
, NULL
, skb
->dev
,
389 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
392 int ip_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
394 struct net_device
*dev
= skb_dst(skb
)->dev
;
396 IP_UPD_PO_STATS(net
, IPSTATS_MIB_OUT
, skb
->len
);
399 skb
->protocol
= htons(ETH_P_IP
);
401 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
402 net
, sk
, skb
, NULL
, dev
,
404 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
408 * copy saddr and daddr, possibly using 64bit load/stores
410 * iph->saddr = fl4->saddr;
411 * iph->daddr = fl4->daddr;
413 static void ip_copy_addrs(struct iphdr
*iph
, const struct flowi4
*fl4
)
415 BUILD_BUG_ON(offsetof(typeof(*fl4
), daddr
) !=
416 offsetof(typeof(*fl4
), saddr
) + sizeof(fl4
->saddr
));
417 memcpy(&iph
->saddr
, &fl4
->saddr
,
418 sizeof(fl4
->saddr
) + sizeof(fl4
->daddr
));
421 /* Note: skb->sk can be different from sk, in case of tunnels */
422 int ip_queue_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
)
424 struct inet_sock
*inet
= inet_sk(sk
);
425 struct net
*net
= sock_net(sk
);
426 struct ip_options_rcu
*inet_opt
;
432 /* Skip all of this if the packet is already routed,
433 * f.e. by something like SCTP.
436 inet_opt
= rcu_dereference(inet
->inet_opt
);
438 rt
= skb_rtable(skb
);
442 /* Make sure we can route this packet. */
443 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
447 /* Use correct destination address if we have options. */
448 daddr
= inet
->inet_daddr
;
449 if (inet_opt
&& inet_opt
->opt
.srr
)
450 daddr
= inet_opt
->opt
.faddr
;
452 /* If this fails, retransmit mechanism of transport layer will
453 * keep trying until route appears or the connection times
456 rt
= ip_route_output_ports(net
, fl4
, sk
,
457 daddr
, inet
->inet_saddr
,
462 sk
->sk_bound_dev_if
);
465 sk_setup_caps(sk
, &rt
->dst
);
467 skb_dst_set_noref(skb
, &rt
->dst
);
470 if (inet_opt
&& inet_opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
473 /* OK, we know where to send it, allocate and build IP header. */
474 skb_push(skb
, sizeof(struct iphdr
) + (inet_opt
? inet_opt
->opt
.optlen
: 0));
475 skb_reset_network_header(skb
);
477 *((__be16
*)iph
) = htons((4 << 12) | (5 << 8) | (inet
->tos
& 0xff));
478 if (ip_dont_fragment(sk
, &rt
->dst
) && !skb
->ignore_df
)
479 iph
->frag_off
= htons(IP_DF
);
482 iph
->ttl
= ip_select_ttl(inet
, &rt
->dst
);
483 iph
->protocol
= sk
->sk_protocol
;
484 ip_copy_addrs(iph
, fl4
);
486 /* Transport layer set skb->h.foo itself. */
488 if (inet_opt
&& inet_opt
->opt
.optlen
) {
489 iph
->ihl
+= inet_opt
->opt
.optlen
>> 2;
490 ip_options_build(skb
, &inet_opt
->opt
, inet
->inet_daddr
, rt
, 0);
493 ip_select_ident_segs(net
, skb
, sk
,
494 skb_shinfo(skb
)->gso_segs
?: 1);
496 /* TODO : should we use skb->sk here instead of sk ? */
497 skb
->priority
= sk
->sk_priority
;
498 skb
->mark
= sk
->sk_mark
;
500 res
= ip_local_out(net
, sk
, skb
);
506 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
508 return -EHOSTUNREACH
;
510 EXPORT_SYMBOL(ip_queue_xmit
);
512 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
514 to
->pkt_type
= from
->pkt_type
;
515 to
->priority
= from
->priority
;
516 to
->protocol
= from
->protocol
;
518 skb_dst_copy(to
, from
);
520 to
->mark
= from
->mark
;
522 /* Copy the flags to each fragment. */
523 IPCB(to
)->flags
= IPCB(from
)->flags
;
525 #ifdef CONFIG_NET_SCHED
526 to
->tc_index
= from
->tc_index
;
529 #if IS_ENABLED(CONFIG_IP_VS)
530 to
->ipvs_property
= from
->ipvs_property
;
532 skb_copy_secmark(to
, from
);
535 static int ip_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
537 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*))
539 struct iphdr
*iph
= ip_hdr(skb
);
541 if ((iph
->frag_off
& htons(IP_DF
)) == 0)
542 return ip_do_fragment(net
, sk
, skb
, output
);
544 if (unlikely(!skb
->ignore_df
||
545 (IPCB(skb
)->frag_max_size
&&
546 IPCB(skb
)->frag_max_size
> mtu
))) {
547 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
548 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
554 return ip_do_fragment(net
, sk
, skb
, output
);
558 * This IP datagram is too large to be sent in one piece. Break it up into
559 * smaller pieces (each of size equal to IP header plus
560 * a block of the data of the original IP data part) that will yet fit in a
561 * single device frame, and queue such a frame for sending.
564 int ip_do_fragment(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
,
565 int (*output
)(struct net
*, struct sock
*, struct sk_buff
*))
569 struct sk_buff
*skb2
;
570 unsigned int mtu
, hlen
, left
, len
, ll_rs
;
572 __be16 not_last_frag
;
573 struct rtable
*rt
= skb_rtable(skb
);
576 /* for offloaded checksums cleanup checksum before fragmentation */
577 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
578 (err
= skb_checksum_help(skb
)))
582 * Point into the IP datagram header.
587 mtu
= ip_skb_dst_mtu(sk
, skb
);
588 if (IPCB(skb
)->frag_max_size
&& IPCB(skb
)->frag_max_size
< mtu
)
589 mtu
= IPCB(skb
)->frag_max_size
;
592 * Setup starting values.
596 mtu
= mtu
- hlen
; /* Size of data space */
597 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
599 /* When frag_list is given, use it. First, check its validity:
600 * some transformers could create wrong frag_list or break existing
601 * one, it is not prohibited. In this case fall back to copying.
603 * LATER: this step can be merged to real generation of fragments,
604 * we can switch to copy when see the first bad fragment.
606 if (skb_has_frag_list(skb
)) {
607 struct sk_buff
*frag
, *frag2
;
608 unsigned int first_len
= skb_pagelen(skb
);
610 if (first_len
- hlen
> mtu
||
611 ((first_len
- hlen
) & 7) ||
612 ip_is_fragment(iph
) ||
616 skb_walk_frags(skb
, frag
) {
617 /* Correct geometry. */
618 if (frag
->len
> mtu
||
619 ((frag
->len
& 7) && frag
->next
) ||
620 skb_headroom(frag
) < hlen
)
621 goto slow_path_clean
;
623 /* Partially cloned skb? */
624 if (skb_shared(frag
))
625 goto slow_path_clean
;
630 frag
->destructor
= sock_wfree
;
632 skb
->truesize
-= frag
->truesize
;
635 /* Everything is OK. Generate! */
639 frag
= skb_shinfo(skb
)->frag_list
;
640 skb_frag_list_init(skb
);
641 skb
->data_len
= first_len
- skb_headlen(skb
);
642 skb
->len
= first_len
;
643 iph
->tot_len
= htons(first_len
);
644 iph
->frag_off
= htons(IP_MF
);
648 /* Prepare header of the next frame,
649 * before previous one went down. */
651 frag
->ip_summed
= CHECKSUM_NONE
;
652 skb_reset_transport_header(frag
);
653 __skb_push(frag
, hlen
);
654 skb_reset_network_header(frag
);
655 memcpy(skb_network_header(frag
), iph
, hlen
);
657 iph
->tot_len
= htons(frag
->len
);
658 ip_copy_metadata(frag
, skb
);
660 ip_options_fragment(frag
);
661 offset
+= skb
->len
- hlen
;
662 iph
->frag_off
= htons(offset
>>3);
664 iph
->frag_off
|= htons(IP_MF
);
665 /* Ready, complete checksum */
669 err
= output(net
, sk
, skb
);
672 IP_INC_STATS(net
, IPSTATS_MIB_FRAGCREATES
);
682 IP_INC_STATS(net
, IPSTATS_MIB_FRAGOKS
);
691 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
695 skb_walk_frags(skb
, frag2
) {
699 frag2
->destructor
= NULL
;
700 skb
->truesize
+= frag2
->truesize
;
707 left
= skb
->len
- hlen
; /* Space per frame */
708 ptr
= hlen
; /* Where to start from */
710 ll_rs
= LL_RESERVED_SPACE(rt
->dst
.dev
);
713 * Fragment the datagram.
716 offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
717 not_last_frag
= iph
->frag_off
& htons(IP_MF
);
720 * Keep copying data until we run out.
725 /* IF: it doesn't fit, use 'mtu' - the data space left */
728 /* IF: we are not sending up to and including the packet end
729 then align the next start on an eight byte boundary */
734 /* Allocate buffer */
735 skb2
= alloc_skb(len
+ hlen
+ ll_rs
, GFP_ATOMIC
);
742 * Set up data on packet
745 ip_copy_metadata(skb2
, skb
);
746 skb_reserve(skb2
, ll_rs
);
747 skb_put(skb2
, len
+ hlen
);
748 skb_reset_network_header(skb2
);
749 skb2
->transport_header
= skb2
->network_header
+ hlen
;
752 * Charge the memory for the fragment to any owner
757 skb_set_owner_w(skb2
, skb
->sk
);
760 * Copy the packet header into the new buffer.
763 skb_copy_from_linear_data(skb
, skb_network_header(skb2
), hlen
);
766 * Copy a block of the IP datagram.
768 if (skb_copy_bits(skb
, ptr
, skb_transport_header(skb2
), len
))
773 * Fill in the new header fields.
776 iph
->frag_off
= htons((offset
>> 3));
778 if (IPCB(skb
)->flags
& IPSKB_FRAG_PMTU
)
779 iph
->frag_off
|= htons(IP_DF
);
781 /* ANK: dirty, but effective trick. Upgrade options only if
782 * the segment to be fragmented was THE FIRST (otherwise,
783 * options are already fixed) and make it ONCE
784 * on the initial skb, so that all the following fragments
785 * will inherit fixed options.
788 ip_options_fragment(skb
);
791 * Added AC : If we are fragmenting a fragment that's not the
792 * last fragment then keep MF on each bit
794 if (left
> 0 || not_last_frag
)
795 iph
->frag_off
|= htons(IP_MF
);
800 * Put this fragment into the sending queue.
802 iph
->tot_len
= htons(len
+ hlen
);
806 err
= output(net
, sk
, skb2
);
810 IP_INC_STATS(net
, IPSTATS_MIB_FRAGCREATES
);
813 IP_INC_STATS(net
, IPSTATS_MIB_FRAGOKS
);
818 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
821 EXPORT_SYMBOL(ip_do_fragment
);
824 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
826 struct msghdr
*msg
= from
;
828 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
829 if (!copy_from_iter_full(to
, len
, &msg
->msg_iter
))
833 if (!csum_and_copy_from_iter_full(to
, len
, &csum
, &msg
->msg_iter
))
835 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
839 EXPORT_SYMBOL(ip_generic_getfrag
);
842 csum_page(struct page
*page
, int offset
, int copy
)
847 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
852 static inline int ip_ufo_append_data(struct sock
*sk
,
853 struct sk_buff_head
*queue
,
854 int getfrag(void *from
, char *to
, int offset
, int len
,
855 int odd
, struct sk_buff
*skb
),
856 void *from
, int length
, int hh_len
, int fragheaderlen
,
857 int transhdrlen
, int maxfraglen
, unsigned int flags
)
862 /* There is support for UDP fragmentation offload by network
863 * device, so create one single skb packet containing complete
866 skb
= skb_peek_tail(queue
);
868 skb
= sock_alloc_send_skb(sk
,
869 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
870 (flags
& MSG_DONTWAIT
), &err
);
875 /* reserve space for Hardware header */
876 skb_reserve(skb
, hh_len
);
878 /* create space for UDP/IP header */
879 skb_put(skb
, fragheaderlen
+ transhdrlen
);
881 /* initialize network header pointer */
882 skb_reset_network_header(skb
);
884 /* initialize protocol header pointer */
885 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
889 __skb_queue_tail(queue
, skb
);
890 } else if (skb_is_gso(skb
)) {
894 skb
->ip_summed
= CHECKSUM_PARTIAL
;
895 /* specify the length of each IP datagram fragment */
896 skb_shinfo(skb
)->gso_size
= maxfraglen
- fragheaderlen
;
897 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
900 return skb_append_datato_frags(sk
, skb
, getfrag
, from
,
901 (length
- transhdrlen
));
904 static int __ip_append_data(struct sock
*sk
,
906 struct sk_buff_head
*queue
,
907 struct inet_cork
*cork
,
908 struct page_frag
*pfrag
,
909 int getfrag(void *from
, char *to
, int offset
,
910 int len
, int odd
, struct sk_buff
*skb
),
911 void *from
, int length
, int transhdrlen
,
914 struct inet_sock
*inet
= inet_sk(sk
);
917 struct ip_options
*opt
= cork
->opt
;
924 unsigned int maxfraglen
, fragheaderlen
, maxnonfragsize
;
925 int csummode
= CHECKSUM_NONE
;
926 struct rtable
*rt
= (struct rtable
*)cork
->dst
;
929 skb
= skb_peek_tail(queue
);
931 exthdrlen
= !skb
? rt
->dst
.header_len
: 0;
932 mtu
= cork
->fragsize
;
933 if (cork
->tx_flags
& SKBTX_ANY_SW_TSTAMP
&&
934 sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
)
935 tskey
= sk
->sk_tskey
++;
937 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
939 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
940 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
941 maxnonfragsize
= ip_sk_ignore_df(sk
) ? 0xFFFF : mtu
;
943 if (cork
->length
+ length
> maxnonfragsize
- fragheaderlen
) {
944 ip_local_error(sk
, EMSGSIZE
, fl4
->daddr
, inet
->inet_dport
,
945 mtu
- (opt
? opt
->optlen
: 0));
950 * transhdrlen > 0 means that this is the first fragment and we wish
951 * it won't be fragmented in the future.
954 length
+ fragheaderlen
<= mtu
&&
955 rt
->dst
.dev
->features
& (NETIF_F_HW_CSUM
| NETIF_F_IP_CSUM
) &&
956 !(flags
& MSG_MORE
) &&
958 csummode
= CHECKSUM_PARTIAL
;
960 cork
->length
+= length
;
961 if ((skb
&& skb_is_gso(skb
)) ||
962 (((length
+ (skb
? skb
->len
: fragheaderlen
)) > mtu
) &&
963 (skb_queue_len(queue
) <= 1) &&
964 (sk
->sk_protocol
== IPPROTO_UDP
) &&
965 (rt
->dst
.dev
->features
& NETIF_F_UFO
) && !dst_xfrm(&rt
->dst
) &&
966 (sk
->sk_type
== SOCK_DGRAM
) && !sk
->sk_no_check_tx
)) {
967 err
= ip_ufo_append_data(sk
, queue
, getfrag
, from
, length
,
968 hh_len
, fragheaderlen
, transhdrlen
,
975 /* So, what's going on in the loop below?
977 * We use calculated fragment length to generate chained skb,
978 * each of segments is IP fragment ready for sending to network after
979 * adding appropriate IP header.
986 /* Check if the remaining data fits into current packet. */
987 copy
= mtu
- skb
->len
;
989 copy
= maxfraglen
- skb
->len
;
992 unsigned int datalen
;
993 unsigned int fraglen
;
994 unsigned int fraggap
;
995 unsigned int alloclen
;
996 struct sk_buff
*skb_prev
;
1000 fraggap
= skb_prev
->len
- maxfraglen
;
1005 * If remaining data exceeds the mtu,
1006 * we know we need more fragment(s).
1008 datalen
= length
+ fraggap
;
1009 if (datalen
> mtu
- fragheaderlen
)
1010 datalen
= maxfraglen
- fragheaderlen
;
1011 fraglen
= datalen
+ fragheaderlen
;
1013 if ((flags
& MSG_MORE
) &&
1014 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
1019 alloclen
+= exthdrlen
;
1021 /* The last fragment gets additional space at tail.
1022 * Note, with MSG_MORE we overallocate on fragments,
1023 * because we have no idea what fragment will be
1026 if (datalen
== length
+ fraggap
)
1027 alloclen
+= rt
->dst
.trailer_len
;
1030 skb
= sock_alloc_send_skb(sk
,
1031 alloclen
+ hh_len
+ 15,
1032 (flags
& MSG_DONTWAIT
), &err
);
1035 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1037 skb
= sock_wmalloc(sk
,
1038 alloclen
+ hh_len
+ 15, 1,
1047 * Fill in the control structures
1049 skb
->ip_summed
= csummode
;
1051 skb_reserve(skb
, hh_len
);
1053 /* only the initial fragment is time stamped */
1054 skb_shinfo(skb
)->tx_flags
= cork
->tx_flags
;
1056 skb_shinfo(skb
)->tskey
= tskey
;
1060 * Find where to start putting bytes.
1062 data
= skb_put(skb
, fraglen
+ exthdrlen
);
1063 skb_set_network_header(skb
, exthdrlen
);
1064 skb
->transport_header
= (skb
->network_header
+
1066 data
+= fragheaderlen
+ exthdrlen
;
1069 skb
->csum
= skb_copy_and_csum_bits(
1070 skb_prev
, maxfraglen
,
1071 data
+ transhdrlen
, fraggap
, 0);
1072 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1075 pskb_trim_unique(skb_prev
, maxfraglen
);
1078 copy
= datalen
- transhdrlen
- fraggap
;
1079 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1086 length
-= datalen
- fraggap
;
1089 csummode
= CHECKSUM_NONE
;
1092 * Put the packet on the pending queue.
1094 __skb_queue_tail(queue
, skb
);
1101 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
)) {
1105 if (getfrag(from
, skb_put(skb
, copy
),
1106 offset
, copy
, off
, skb
) < 0) {
1107 __skb_trim(skb
, off
);
1112 int i
= skb_shinfo(skb
)->nr_frags
;
1115 if (!sk_page_frag_refill(sk
, pfrag
))
1118 if (!skb_can_coalesce(skb
, i
, pfrag
->page
,
1121 if (i
== MAX_SKB_FRAGS
)
1124 __skb_fill_page_desc(skb
, i
, pfrag
->page
,
1126 skb_shinfo(skb
)->nr_frags
= ++i
;
1127 get_page(pfrag
->page
);
1129 copy
= min_t(int, copy
, pfrag
->size
- pfrag
->offset
);
1131 page_address(pfrag
->page
) + pfrag
->offset
,
1132 offset
, copy
, skb
->len
, skb
) < 0)
1135 pfrag
->offset
+= copy
;
1136 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1138 skb
->data_len
+= copy
;
1139 skb
->truesize
+= copy
;
1140 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1151 cork
->length
-= length
;
1152 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1156 static int ip_setup_cork(struct sock
*sk
, struct inet_cork
*cork
,
1157 struct ipcm_cookie
*ipc
, struct rtable
**rtp
)
1159 struct ip_options_rcu
*opt
;
1163 * setup for corking.
1168 cork
->opt
= kmalloc(sizeof(struct ip_options
) + 40,
1170 if (unlikely(!cork
->opt
))
1173 memcpy(cork
->opt
, &opt
->opt
, sizeof(struct ip_options
) + opt
->opt
.optlen
);
1174 cork
->flags
|= IPCORK_OPT
;
1175 cork
->addr
= ipc
->addr
;
1181 * We steal reference to this route, caller should not release it
1184 cork
->fragsize
= ip_sk_use_pmtu(sk
) ?
1185 dst_mtu(&rt
->dst
) : rt
->dst
.dev
->mtu
;
1186 cork
->dst
= &rt
->dst
;
1188 cork
->ttl
= ipc
->ttl
;
1189 cork
->tos
= ipc
->tos
;
1190 cork
->priority
= ipc
->priority
;
1191 cork
->tx_flags
= ipc
->tx_flags
;
1197 * ip_append_data() and ip_append_page() can make one large IP datagram
1198 * from many pieces of data. Each pieces will be holded on the socket
1199 * until ip_push_pending_frames() is called. Each piece can be a page
1202 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1203 * this interface potentially.
1205 * LATER: length must be adjusted by pad at tail, when it is required.
1207 int ip_append_data(struct sock
*sk
, struct flowi4
*fl4
,
1208 int getfrag(void *from
, char *to
, int offset
, int len
,
1209 int odd
, struct sk_buff
*skb
),
1210 void *from
, int length
, int transhdrlen
,
1211 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
1214 struct inet_sock
*inet
= inet_sk(sk
);
1217 if (flags
&MSG_PROBE
)
1220 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1221 err
= ip_setup_cork(sk
, &inet
->cork
.base
, ipc
, rtp
);
1228 return __ip_append_data(sk
, fl4
, &sk
->sk_write_queue
, &inet
->cork
.base
,
1229 sk_page_frag(sk
), getfrag
,
1230 from
, length
, transhdrlen
, flags
);
1233 ssize_t
ip_append_page(struct sock
*sk
, struct flowi4
*fl4
, struct page
*page
,
1234 int offset
, size_t size
, int flags
)
1236 struct inet_sock
*inet
= inet_sk(sk
);
1237 struct sk_buff
*skb
;
1239 struct ip_options
*opt
= NULL
;
1240 struct inet_cork
*cork
;
1245 unsigned int maxfraglen
, fragheaderlen
, fraggap
, maxnonfragsize
;
1250 if (flags
&MSG_PROBE
)
1253 if (skb_queue_empty(&sk
->sk_write_queue
))
1256 cork
= &inet
->cork
.base
;
1257 rt
= (struct rtable
*)cork
->dst
;
1258 if (cork
->flags
& IPCORK_OPT
)
1261 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
))
1264 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1265 mtu
= cork
->fragsize
;
1267 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1268 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1269 maxnonfragsize
= ip_sk_ignore_df(sk
) ? 0xFFFF : mtu
;
1271 if (cork
->length
+ size
> maxnonfragsize
- fragheaderlen
) {
1272 ip_local_error(sk
, EMSGSIZE
, fl4
->daddr
, inet
->inet_dport
,
1273 mtu
- (opt
? opt
->optlen
: 0));
1277 skb
= skb_peek_tail(&sk
->sk_write_queue
);
1281 if ((size
+ skb
->len
> mtu
) &&
1282 (skb_queue_len(&sk
->sk_write_queue
) == 1) &&
1283 (sk
->sk_protocol
== IPPROTO_UDP
) &&
1284 (rt
->dst
.dev
->features
& NETIF_F_UFO
)) {
1285 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1288 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
1289 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1291 cork
->length
+= size
;
1294 if (skb_is_gso(skb
)) {
1298 /* Check if the remaining data fits into current packet. */
1299 len
= mtu
- skb
->len
;
1301 len
= maxfraglen
- skb
->len
;
1304 struct sk_buff
*skb_prev
;
1308 fraggap
= skb_prev
->len
- maxfraglen
;
1310 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1311 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1312 if (unlikely(!skb
)) {
1318 * Fill in the control structures
1320 skb
->ip_summed
= CHECKSUM_NONE
;
1322 skb_reserve(skb
, hh_len
);
1325 * Find where to start putting bytes.
1327 skb_put(skb
, fragheaderlen
+ fraggap
);
1328 skb_reset_network_header(skb
);
1329 skb
->transport_header
= (skb
->network_header
+
1332 skb
->csum
= skb_copy_and_csum_bits(skb_prev
,
1334 skb_transport_header(skb
),
1336 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1338 pskb_trim_unique(skb_prev
, maxfraglen
);
1342 * Put the packet on the pending queue.
1344 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1351 if (skb_append_pagefrags(skb
, page
, offset
, len
)) {
1356 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1358 csum
= csum_page(page
, offset
, len
);
1359 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1363 skb
->data_len
+= len
;
1364 skb
->truesize
+= len
;
1365 atomic_add(len
, &sk
->sk_wmem_alloc
);
1372 cork
->length
-= size
;
1373 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1377 static void ip_cork_release(struct inet_cork
*cork
)
1379 cork
->flags
&= ~IPCORK_OPT
;
1382 dst_release(cork
->dst
);
1387 * Combined all pending IP fragments on the socket as one IP datagram
1388 * and push them out.
1390 struct sk_buff
*__ip_make_skb(struct sock
*sk
,
1392 struct sk_buff_head
*queue
,
1393 struct inet_cork
*cork
)
1395 struct sk_buff
*skb
, *tmp_skb
;
1396 struct sk_buff
**tail_skb
;
1397 struct inet_sock
*inet
= inet_sk(sk
);
1398 struct net
*net
= sock_net(sk
);
1399 struct ip_options
*opt
= NULL
;
1400 struct rtable
*rt
= (struct rtable
*)cork
->dst
;
1405 skb
= __skb_dequeue(queue
);
1408 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1410 /* move skb->data to ip header from ext header */
1411 if (skb
->data
< skb_network_header(skb
))
1412 __skb_pull(skb
, skb_network_offset(skb
));
1413 while ((tmp_skb
= __skb_dequeue(queue
)) != NULL
) {
1414 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1415 *tail_skb
= tmp_skb
;
1416 tail_skb
= &(tmp_skb
->next
);
1417 skb
->len
+= tmp_skb
->len
;
1418 skb
->data_len
+= tmp_skb
->len
;
1419 skb
->truesize
+= tmp_skb
->truesize
;
1420 tmp_skb
->destructor
= NULL
;
1424 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1425 * to fragment the frame generated here. No matter, what transforms
1426 * how transforms change size of the packet, it will come out.
1428 skb
->ignore_df
= ip_sk_ignore_df(sk
);
1430 /* DF bit is set when we want to see DF on outgoing frames.
1431 * If ignore_df is set too, we still allow to fragment this frame
1433 if (inet
->pmtudisc
== IP_PMTUDISC_DO
||
1434 inet
->pmtudisc
== IP_PMTUDISC_PROBE
||
1435 (skb
->len
<= dst_mtu(&rt
->dst
) &&
1436 ip_dont_fragment(sk
, &rt
->dst
)))
1439 if (cork
->flags
& IPCORK_OPT
)
1444 else if (rt
->rt_type
== RTN_MULTICAST
)
1447 ttl
= ip_select_ttl(inet
, &rt
->dst
);
1452 iph
->tos
= (cork
->tos
!= -1) ? cork
->tos
: inet
->tos
;
1455 iph
->protocol
= sk
->sk_protocol
;
1456 ip_copy_addrs(iph
, fl4
);
1457 ip_select_ident(net
, skb
, sk
);
1460 iph
->ihl
+= opt
->optlen
>>2;
1461 ip_options_build(skb
, opt
, cork
->addr
, rt
, 0);
1464 skb
->priority
= (cork
->tos
!= -1) ? cork
->priority
: sk
->sk_priority
;
1465 skb
->mark
= sk
->sk_mark
;
1467 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1471 skb_dst_set(skb
, &rt
->dst
);
1473 if (iph
->protocol
== IPPROTO_ICMP
)
1474 icmp_out_count(net
, ((struct icmphdr
*)
1475 skb_transport_header(skb
))->type
);
1477 ip_cork_release(cork
);
1482 int ip_send_skb(struct net
*net
, struct sk_buff
*skb
)
1486 err
= ip_local_out(net
, skb
->sk
, skb
);
1489 err
= net_xmit_errno(err
);
1491 IP_INC_STATS(net
, IPSTATS_MIB_OUTDISCARDS
);
1497 int ip_push_pending_frames(struct sock
*sk
, struct flowi4
*fl4
)
1499 struct sk_buff
*skb
;
1501 skb
= ip_finish_skb(sk
, fl4
);
1505 /* Netfilter gets whole the not fragmented skb. */
1506 return ip_send_skb(sock_net(sk
), skb
);
1510 * Throw away all pending data on the socket.
1512 static void __ip_flush_pending_frames(struct sock
*sk
,
1513 struct sk_buff_head
*queue
,
1514 struct inet_cork
*cork
)
1516 struct sk_buff
*skb
;
1518 while ((skb
= __skb_dequeue_tail(queue
)) != NULL
)
1521 ip_cork_release(cork
);
1524 void ip_flush_pending_frames(struct sock
*sk
)
1526 __ip_flush_pending_frames(sk
, &sk
->sk_write_queue
, &inet_sk(sk
)->cork
.base
);
1529 struct sk_buff
*ip_make_skb(struct sock
*sk
,
1531 int getfrag(void *from
, char *to
, int offset
,
1532 int len
, int odd
, struct sk_buff
*skb
),
1533 void *from
, int length
, int transhdrlen
,
1534 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
1537 struct inet_cork cork
;
1538 struct sk_buff_head queue
;
1541 if (flags
& MSG_PROBE
)
1544 __skb_queue_head_init(&queue
);
1549 err
= ip_setup_cork(sk
, &cork
, ipc
, rtp
);
1551 return ERR_PTR(err
);
1553 err
= __ip_append_data(sk
, fl4
, &queue
, &cork
,
1554 ¤t
->task_frag
, getfrag
,
1555 from
, length
, transhdrlen
, flags
);
1557 __ip_flush_pending_frames(sk
, &queue
, &cork
);
1558 return ERR_PTR(err
);
1561 return __ip_make_skb(sk
, fl4
, &queue
, &cork
);
1565 * Fetch data from kernel space and fill in checksum if needed.
1567 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1568 int len
, int odd
, struct sk_buff
*skb
)
1572 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
, 0);
1573 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1578 * Generic function to send a packet as reply to another packet.
1579 * Used to send some TCP resets/acks so far.
1581 void ip_send_unicast_reply(struct sock
*sk
, struct sk_buff
*skb
,
1582 const struct ip_options
*sopt
,
1583 __be32 daddr
, __be32 saddr
,
1584 const struct ip_reply_arg
*arg
,
1587 struct ip_options_data replyopts
;
1588 struct ipcm_cookie ipc
;
1590 struct rtable
*rt
= skb_rtable(skb
);
1591 struct net
*net
= sock_net(sk
);
1592 struct sk_buff
*nskb
;
1596 if (__ip_options_echo(&replyopts
.opt
.opt
, skb
, sopt
))
1605 if (replyopts
.opt
.opt
.optlen
) {
1606 ipc
.opt
= &replyopts
.opt
;
1608 if (replyopts
.opt
.opt
.srr
)
1609 daddr
= replyopts
.opt
.opt
.faddr
;
1612 oif
= arg
->bound_dev_if
;
1613 if (!oif
&& netif_index_is_l3_master(net
, skb
->skb_iif
))
1616 flowi4_init_output(&fl4
, oif
,
1617 IP4_REPLY_MARK(net
, skb
->mark
),
1619 RT_SCOPE_UNIVERSE
, ip_hdr(skb
)->protocol
,
1620 ip_reply_arg_flowi_flags(arg
),
1622 tcp_hdr(skb
)->source
, tcp_hdr(skb
)->dest
,
1624 security_skb_classify_flow(skb
, flowi4_to_flowi(&fl4
));
1625 rt
= ip_route_output_key(net
, &fl4
);
1629 inet_sk(sk
)->tos
= arg
->tos
;
1631 sk
->sk_priority
= skb
->priority
;
1632 sk
->sk_protocol
= ip_hdr(skb
)->protocol
;
1633 sk
->sk_bound_dev_if
= arg
->bound_dev_if
;
1634 sk
->sk_sndbuf
= sysctl_wmem_default
;
1635 sk
->sk_mark
= fl4
.flowi4_mark
;
1636 err
= ip_append_data(sk
, &fl4
, ip_reply_glue_bits
, arg
->iov
->iov_base
,
1637 len
, 0, &ipc
, &rt
, MSG_DONTWAIT
);
1638 if (unlikely(err
)) {
1639 ip_flush_pending_frames(sk
);
1643 nskb
= skb_peek(&sk
->sk_write_queue
);
1645 if (arg
->csumoffset
>= 0)
1646 *((__sum16
*)skb_transport_header(nskb
) +
1647 arg
->csumoffset
) = csum_fold(csum_add(nskb
->csum
,
1649 nskb
->ip_summed
= CHECKSUM_NONE
;
1650 ip_push_pending_frames(sk
, &fl4
);
1656 void __init
ip_init(void)
1661 #if defined(CONFIG_IP_MULTICAST)