2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Hirokazu Takahashi, <taka@valinux.co.jp>
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
21 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
23 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
58 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71 * James Chapman : Add L2TP encapsulation type.
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
80 #define pr_fmt(fmt) "UDP: " fmt
82 #include <linux/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/memblock.h>
85 #include <linux/highmem.h>
86 #include <linux/swap.h>
87 #include <linux/types.h>
88 #include <linux/fcntl.h>
89 #include <linux/module.h>
90 #include <linux/socket.h>
91 #include <linux/sockios.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
95 #include <linux/errno.h>
96 #include <linux/timer.h>
98 #include <linux/inet.h>
99 #include <linux/netdevice.h>
100 #include <linux/slab.h>
101 #include <net/tcp_states.h>
102 #include <linux/skbuff.h>
103 #include <linux/proc_fs.h>
104 #include <linux/seq_file.h>
105 #include <net/net_namespace.h>
106 #include <net/icmp.h>
107 #include <net/inet_hashtables.h>
108 #include <net/ip_tunnels.h>
109 #include <net/route.h>
110 #include <net/checksum.h>
111 #include <net/xfrm.h>
112 #include <trace/events/udp.h>
113 #include <linux/static_key.h>
114 #include <trace/events/skb.h>
115 #include <net/busy_poll.h>
116 #include "udp_impl.h"
117 #include <net/sock_reuseport.h>
118 #include <net/addrconf.h>
119 #include <net/udp_tunnel.h>
121 struct udp_table udp_table __read_mostly
;
122 EXPORT_SYMBOL(udp_table
);
124 long sysctl_udp_mem
[3] __read_mostly
;
125 EXPORT_SYMBOL(sysctl_udp_mem
);
127 atomic_long_t udp_memory_allocated
;
128 EXPORT_SYMBOL(udp_memory_allocated
);
130 #define MAX_UDP_PORTS 65536
131 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
133 /* IPCB reference means this can not be used from early demux */
134 static bool udp_lib_exact_dif_match(struct net
*net
, struct sk_buff
*skb
)
136 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
137 if (!net
->ipv4
.sysctl_udp_l3mdev_accept
&&
138 skb
&& ipv4_l3mdev_skb(IPCB(skb
)->flags
))
144 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
145 const struct udp_hslot
*hslot
,
146 unsigned long *bitmap
,
147 struct sock
*sk
, unsigned int log
)
150 kuid_t uid
= sock_i_uid(sk
);
152 sk_for_each(sk2
, &hslot
->head
) {
153 if (net_eq(sock_net(sk2
), net
) &&
155 (bitmap
|| udp_sk(sk2
)->udp_port_hash
== num
) &&
156 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
157 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
158 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
159 inet_rcv_saddr_equal(sk
, sk2
, true)) {
160 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
161 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
162 uid_eq(uid
, sock_i_uid(sk2
))) {
168 __set_bit(udp_sk(sk2
)->udp_port_hash
>> log
,
177 * Note: we still hold spinlock of primary hash chain, so no other writer
178 * can insert/delete a socket with local_port == num
180 static int udp_lib_lport_inuse2(struct net
*net
, __u16 num
,
181 struct udp_hslot
*hslot2
,
185 kuid_t uid
= sock_i_uid(sk
);
188 spin_lock(&hslot2
->lock
);
189 udp_portaddr_for_each_entry(sk2
, &hslot2
->head
) {
190 if (net_eq(sock_net(sk2
), net
) &&
192 (udp_sk(sk2
)->udp_port_hash
== num
) &&
193 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
194 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
195 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
196 inet_rcv_saddr_equal(sk
, sk2
, true)) {
197 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
198 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
199 uid_eq(uid
, sock_i_uid(sk2
))) {
207 spin_unlock(&hslot2
->lock
);
211 static int udp_reuseport_add_sock(struct sock
*sk
, struct udp_hslot
*hslot
)
213 struct net
*net
= sock_net(sk
);
214 kuid_t uid
= sock_i_uid(sk
);
217 sk_for_each(sk2
, &hslot
->head
) {
218 if (net_eq(sock_net(sk2
), net
) &&
220 sk2
->sk_family
== sk
->sk_family
&&
221 ipv6_only_sock(sk2
) == ipv6_only_sock(sk
) &&
222 (udp_sk(sk2
)->udp_port_hash
== udp_sk(sk
)->udp_port_hash
) &&
223 (sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
224 sk2
->sk_reuseport
&& uid_eq(uid
, sock_i_uid(sk2
)) &&
225 inet_rcv_saddr_equal(sk
, sk2
, false)) {
226 return reuseport_add_sock(sk
, sk2
,
227 inet_rcv_saddr_any(sk
));
231 return reuseport_alloc(sk
, inet_rcv_saddr_any(sk
));
235 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
237 * @sk: socket struct in question
238 * @snum: port number to look up
239 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
242 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
243 unsigned int hash2_nulladdr
)
245 struct udp_hslot
*hslot
, *hslot2
;
246 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
248 struct net
*net
= sock_net(sk
);
251 int low
, high
, remaining
;
253 unsigned short first
, last
;
254 DECLARE_BITMAP(bitmap
, PORTS_PER_CHAIN
);
256 inet_get_local_port_range(net
, &low
, &high
);
257 remaining
= (high
- low
) + 1;
259 rand
= prandom_u32();
260 first
= reciprocal_scale(rand
, remaining
) + low
;
262 * force rand to be an odd multiple of UDP_HTABLE_SIZE
264 rand
= (rand
| 1) * (udptable
->mask
+ 1);
265 last
= first
+ udptable
->mask
+ 1;
267 hslot
= udp_hashslot(udptable
, net
, first
);
268 bitmap_zero(bitmap
, PORTS_PER_CHAIN
);
269 spin_lock_bh(&hslot
->lock
);
270 udp_lib_lport_inuse(net
, snum
, hslot
, bitmap
, sk
,
275 * Iterate on all possible values of snum for this hash.
276 * Using steps of an odd multiple of UDP_HTABLE_SIZE
277 * give us randomization and full range coverage.
280 if (low
<= snum
&& snum
<= high
&&
281 !test_bit(snum
>> udptable
->log
, bitmap
) &&
282 !inet_is_local_reserved_port(net
, snum
))
285 } while (snum
!= first
);
286 spin_unlock_bh(&hslot
->lock
);
288 } while (++first
!= last
);
291 hslot
= udp_hashslot(udptable
, net
, snum
);
292 spin_lock_bh(&hslot
->lock
);
293 if (hslot
->count
> 10) {
295 unsigned int slot2
= udp_sk(sk
)->udp_portaddr_hash
^ snum
;
297 slot2
&= udptable
->mask
;
298 hash2_nulladdr
&= udptable
->mask
;
300 hslot2
= udp_hashslot2(udptable
, slot2
);
301 if (hslot
->count
< hslot2
->count
)
302 goto scan_primary_hash
;
304 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
, sk
);
305 if (!exist
&& (hash2_nulladdr
!= slot2
)) {
306 hslot2
= udp_hashslot2(udptable
, hash2_nulladdr
);
307 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
316 if (udp_lib_lport_inuse(net
, snum
, hslot
, NULL
, sk
, 0))
320 inet_sk(sk
)->inet_num
= snum
;
321 udp_sk(sk
)->udp_port_hash
= snum
;
322 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
323 if (sk_unhashed(sk
)) {
324 if (sk
->sk_reuseport
&&
325 udp_reuseport_add_sock(sk
, hslot
)) {
326 inet_sk(sk
)->inet_num
= 0;
327 udp_sk(sk
)->udp_port_hash
= 0;
328 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
332 sk_add_node_rcu(sk
, &hslot
->head
);
334 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
336 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
337 spin_lock(&hslot2
->lock
);
338 if (IS_ENABLED(CONFIG_IPV6
) && sk
->sk_reuseport
&&
339 sk
->sk_family
== AF_INET6
)
340 hlist_add_tail_rcu(&udp_sk(sk
)->udp_portaddr_node
,
343 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
346 spin_unlock(&hslot2
->lock
);
348 sock_set_flag(sk
, SOCK_RCU_FREE
);
351 spin_unlock_bh(&hslot
->lock
);
355 EXPORT_SYMBOL(udp_lib_get_port
);
357 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
359 unsigned int hash2_nulladdr
=
360 ipv4_portaddr_hash(sock_net(sk
), htonl(INADDR_ANY
), snum
);
361 unsigned int hash2_partial
=
362 ipv4_portaddr_hash(sock_net(sk
), inet_sk(sk
)->inet_rcv_saddr
, 0);
364 /* precompute partial secondary hash */
365 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
366 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
369 static int compute_score(struct sock
*sk
, struct net
*net
,
370 __be32 saddr
, __be16 sport
,
371 __be32 daddr
, unsigned short hnum
,
372 int dif
, int sdif
, bool exact_dif
)
375 struct inet_sock
*inet
;
378 if (!net_eq(sock_net(sk
), net
) ||
379 udp_sk(sk
)->udp_port_hash
!= hnum
||
383 if (sk
->sk_rcv_saddr
!= daddr
)
386 score
= (sk
->sk_family
== PF_INET
) ? 2 : 1;
389 if (inet
->inet_daddr
) {
390 if (inet
->inet_daddr
!= saddr
)
395 if (inet
->inet_dport
) {
396 if (inet
->inet_dport
!= sport
)
401 dev_match
= udp_sk_bound_dev_eq(net
, sk
->sk_bound_dev_if
,
407 if (sk
->sk_incoming_cpu
== raw_smp_processor_id())
412 static u32
udp_ehashfn(const struct net
*net
, const __be32 laddr
,
413 const __u16 lport
, const __be32 faddr
,
416 static u32 udp_ehash_secret __read_mostly
;
418 net_get_random_once(&udp_ehash_secret
, sizeof(udp_ehash_secret
));
420 return __inet_ehashfn(laddr
, lport
, faddr
, fport
,
421 udp_ehash_secret
+ net_hash_mix(net
));
424 /* called with rcu_read_lock() */
425 static struct sock
*udp4_lib_lookup2(struct net
*net
,
426 __be32 saddr
, __be16 sport
,
427 __be32 daddr
, unsigned int hnum
,
428 int dif
, int sdif
, bool exact_dif
,
429 struct udp_hslot
*hslot2
,
432 struct sock
*sk
, *result
;
438 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
439 score
= compute_score(sk
, net
, saddr
, sport
,
440 daddr
, hnum
, dif
, sdif
, exact_dif
);
441 if (score
> badness
) {
442 if (sk
->sk_reuseport
) {
443 hash
= udp_ehashfn(net
, daddr
, hnum
,
445 result
= reuseport_select_sock(sk
, hash
, skb
,
446 sizeof(struct udphdr
));
457 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
458 * harder than this. -DaveM
460 struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
,
461 __be16 sport
, __be32 daddr
, __be16 dport
, int dif
,
462 int sdif
, struct udp_table
*udptable
, struct sk_buff
*skb
)
465 unsigned short hnum
= ntohs(dport
);
466 unsigned int hash2
, slot2
;
467 struct udp_hslot
*hslot2
;
468 bool exact_dif
= udp_lib_exact_dif_match(net
, skb
);
470 hash2
= ipv4_portaddr_hash(net
, daddr
, hnum
);
471 slot2
= hash2
& udptable
->mask
;
472 hslot2
= &udptable
->hash2
[slot2
];
474 result
= udp4_lib_lookup2(net
, saddr
, sport
,
475 daddr
, hnum
, dif
, sdif
,
476 exact_dif
, hslot2
, skb
);
478 hash2
= ipv4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
);
479 slot2
= hash2
& udptable
->mask
;
480 hslot2
= &udptable
->hash2
[slot2
];
482 result
= udp4_lib_lookup2(net
, saddr
, sport
,
483 htonl(INADDR_ANY
), hnum
, dif
, sdif
,
484 exact_dif
, hslot2
, skb
);
486 if (unlikely(IS_ERR(result
)))
490 EXPORT_SYMBOL_GPL(__udp4_lib_lookup
);
492 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
493 __be16 sport
, __be16 dport
,
494 struct udp_table
*udptable
)
496 const struct iphdr
*iph
= ip_hdr(skb
);
498 return __udp4_lib_lookup(dev_net(skb
->dev
), iph
->saddr
, sport
,
499 iph
->daddr
, dport
, inet_iif(skb
),
500 inet_sdif(skb
), udptable
, skb
);
503 struct sock
*udp4_lib_lookup_skb(struct sk_buff
*skb
,
504 __be16 sport
, __be16 dport
)
506 return __udp4_lib_lookup_skb(skb
, sport
, dport
, &udp_table
);
508 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb
);
510 /* Must be called under rcu_read_lock().
511 * Does increment socket refcount.
513 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
514 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
515 __be32 daddr
, __be16 dport
, int dif
)
519 sk
= __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
520 dif
, 0, &udp_table
, NULL
);
521 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
525 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
528 static inline bool __udp_is_mcast_sock(struct net
*net
, struct sock
*sk
,
529 __be16 loc_port
, __be32 loc_addr
,
530 __be16 rmt_port
, __be32 rmt_addr
,
531 int dif
, int sdif
, unsigned short hnum
)
533 struct inet_sock
*inet
= inet_sk(sk
);
535 if (!net_eq(sock_net(sk
), net
) ||
536 udp_sk(sk
)->udp_port_hash
!= hnum
||
537 (inet
->inet_daddr
&& inet
->inet_daddr
!= rmt_addr
) ||
538 (inet
->inet_dport
!= rmt_port
&& inet
->inet_dport
) ||
539 (inet
->inet_rcv_saddr
&& inet
->inet_rcv_saddr
!= loc_addr
) ||
540 ipv6_only_sock(sk
) ||
541 (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
&&
542 sk
->sk_bound_dev_if
!= sdif
))
544 if (!ip_mc_sf_allow(sk
, loc_addr
, rmt_addr
, dif
, sdif
))
549 DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key
);
550 void udp_encap_enable(void)
552 static_branch_inc(&udp_encap_needed_key
);
554 EXPORT_SYMBOL(udp_encap_enable
);
556 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
557 * through error handlers in encapsulations looking for a match.
559 static int __udp4_lib_err_encap_no_sk(struct sk_buff
*skb
, u32 info
)
563 for (i
= 0; i
< MAX_IPTUN_ENCAP_OPS
; i
++) {
564 int (*handler
)(struct sk_buff
*skb
, u32 info
);
565 const struct ip_tunnel_encap_ops
*encap
;
567 encap
= rcu_dereference(iptun_encaps
[i
]);
570 handler
= encap
->err_handler
;
571 if (handler
&& !handler(skb
, info
))
578 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
579 * reversing source and destination port: this will match tunnels that force the
580 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
581 * lwtunnels might actually break this assumption by being configured with
582 * different destination ports on endpoints, in this case we won't be able to
583 * trace ICMP messages back to them.
585 * If this doesn't match any socket, probe tunnels with arbitrary destination
586 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
587 * we've sent packets to won't necessarily match the local destination port.
589 * Then ask the tunnel implementation to match the error against a valid
592 * Return an error if we can't find a match, the socket if we need further
593 * processing, zero otherwise.
595 static struct sock
*__udp4_lib_err_encap(struct net
*net
,
596 const struct iphdr
*iph
,
598 struct udp_table
*udptable
,
599 struct sk_buff
*skb
, u32 info
)
601 int network_offset
, transport_offset
;
604 network_offset
= skb_network_offset(skb
);
605 transport_offset
= skb_transport_offset(skb
);
607 /* Network header needs to point to the outer IPv4 header inside ICMP */
608 skb_reset_network_header(skb
);
610 /* Transport header needs to point to the UDP header */
611 skb_set_transport_header(skb
, iph
->ihl
<< 2);
613 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->source
,
614 iph
->saddr
, uh
->dest
, skb
->dev
->ifindex
, 0,
617 int (*lookup
)(struct sock
*sk
, struct sk_buff
*skb
);
618 struct udp_sock
*up
= udp_sk(sk
);
620 lookup
= READ_ONCE(up
->encap_err_lookup
);
621 if (!lookup
|| lookup(sk
, skb
))
626 sk
= ERR_PTR(__udp4_lib_err_encap_no_sk(skb
, info
));
628 skb_set_transport_header(skb
, transport_offset
);
629 skb_set_network_header(skb
, network_offset
);
635 * This routine is called by the ICMP module when it gets some
636 * sort of error condition. If err < 0 then the socket should
637 * be closed and the error returned to the user. If err > 0
638 * it's just the icmp type << 8 | icmp code.
639 * Header points to the ip header of the error packet. We move
640 * on past this. Then (as it used to claim before adjustment)
641 * header points to the first 8 bytes of the udp header. We need
642 * to find the appropriate port.
645 int __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct udp_table
*udptable
)
647 struct inet_sock
*inet
;
648 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
649 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
650 const int type
= icmp_hdr(skb
)->type
;
651 const int code
= icmp_hdr(skb
)->code
;
656 struct net
*net
= dev_net(skb
->dev
);
658 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
659 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
,
660 inet_sdif(skb
), udptable
, NULL
);
662 /* No socket for error: try tunnels before discarding */
663 sk
= ERR_PTR(-ENOENT
);
664 if (static_branch_unlikely(&udp_encap_needed_key
)) {
665 sk
= __udp4_lib_err_encap(net
, iph
, uh
, udptable
, skb
,
672 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
685 case ICMP_TIME_EXCEEDED
:
688 case ICMP_SOURCE_QUENCH
:
690 case ICMP_PARAMETERPROB
:
694 case ICMP_DEST_UNREACH
:
695 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
696 ipv4_sk_update_pmtu(skb
, sk
, info
);
697 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
705 if (code
<= NR_ICMP_UNREACH
) {
706 harderr
= icmp_err_convert
[code
].fatal
;
707 err
= icmp_err_convert
[code
].errno
;
711 ipv4_sk_redirect(skb
, sk
);
716 * RFC1122: OK. Passes ICMP errors back to application, as per
720 /* ...not for tunnels though: we don't have a sending socket */
723 if (!inet
->recverr
) {
724 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
727 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
730 sk
->sk_error_report(sk
);
735 int udp_err(struct sk_buff
*skb
, u32 info
)
737 return __udp4_lib_err(skb
, info
, &udp_table
);
741 * Throw away all pending data and cancel the corking. Socket is locked.
743 void udp_flush_pending_frames(struct sock
*sk
)
745 struct udp_sock
*up
= udp_sk(sk
);
750 ip_flush_pending_frames(sk
);
753 EXPORT_SYMBOL(udp_flush_pending_frames
);
756 * udp4_hwcsum - handle outgoing HW checksumming
757 * @skb: sk_buff containing the filled-in UDP header
758 * (checksum field must be zeroed out)
759 * @src: source IP address
760 * @dst: destination IP address
762 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
)
764 struct udphdr
*uh
= udp_hdr(skb
);
765 int offset
= skb_transport_offset(skb
);
766 int len
= skb
->len
- offset
;
770 if (!skb_has_frag_list(skb
)) {
772 * Only one fragment on the socket.
774 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
775 skb
->csum_offset
= offsetof(struct udphdr
, check
);
776 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
,
779 struct sk_buff
*frags
;
782 * HW-checksum won't work as there are two or more
783 * fragments on the socket so that all csums of sk_buffs
786 skb_walk_frags(skb
, frags
) {
787 csum
= csum_add(csum
, frags
->csum
);
791 csum
= skb_checksum(skb
, offset
, hlen
, csum
);
792 skb
->ip_summed
= CHECKSUM_NONE
;
794 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
796 uh
->check
= CSUM_MANGLED_0
;
799 EXPORT_SYMBOL_GPL(udp4_hwcsum
);
801 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
802 * for the simple case like when setting the checksum for a UDP tunnel.
804 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
805 __be32 saddr
, __be32 daddr
, int len
)
807 struct udphdr
*uh
= udp_hdr(skb
);
811 } else if (skb_is_gso(skb
)) {
812 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
813 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
815 uh
->check
= udp_v4_check(len
, saddr
, daddr
, lco_csum(skb
));
817 uh
->check
= CSUM_MANGLED_0
;
819 skb
->ip_summed
= CHECKSUM_PARTIAL
;
820 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
821 skb
->csum_offset
= offsetof(struct udphdr
, check
);
822 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
825 EXPORT_SYMBOL(udp_set_csum
);
827 static int udp_send_skb(struct sk_buff
*skb
, struct flowi4
*fl4
,
828 struct inet_cork
*cork
)
830 struct sock
*sk
= skb
->sk
;
831 struct inet_sock
*inet
= inet_sk(sk
);
834 int is_udplite
= IS_UDPLITE(sk
);
835 int offset
= skb_transport_offset(skb
);
836 int len
= skb
->len
- offset
;
840 * Create a UDP header
843 uh
->source
= inet
->inet_sport
;
844 uh
->dest
= fl4
->fl4_dport
;
845 uh
->len
= htons(len
);
848 if (cork
->gso_size
) {
849 const int hlen
= skb_network_header_len(skb
) +
850 sizeof(struct udphdr
);
852 if (hlen
+ cork
->gso_size
> cork
->fragsize
) {
856 if (skb
->len
> cork
->gso_size
* UDP_MAX_SEGMENTS
) {
860 if (sk
->sk_no_check_tx
) {
864 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| is_udplite
||
865 dst_xfrm(skb_dst(skb
))) {
870 skb_shinfo(skb
)->gso_size
= cork
->gso_size
;
871 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP_L4
;
872 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(len
- sizeof(uh
),
877 if (is_udplite
) /* UDP-Lite */
878 csum
= udplite_csum(skb
);
880 else if (sk
->sk_no_check_tx
) { /* UDP csum off */
882 skb
->ip_summed
= CHECKSUM_NONE
;
885 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
888 udp4_hwcsum(skb
, fl4
->saddr
, fl4
->daddr
);
892 csum
= udp_csum(skb
);
894 /* add protocol-dependent pseudo-header */
895 uh
->check
= csum_tcpudp_magic(fl4
->saddr
, fl4
->daddr
, len
,
896 sk
->sk_protocol
, csum
);
898 uh
->check
= CSUM_MANGLED_0
;
901 err
= ip_send_skb(sock_net(sk
), skb
);
903 if (err
== -ENOBUFS
&& !inet
->recverr
) {
904 UDP_INC_STATS(sock_net(sk
),
905 UDP_MIB_SNDBUFERRORS
, is_udplite
);
909 UDP_INC_STATS(sock_net(sk
),
910 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
915 * Push out all pending data as one UDP datagram. Socket is locked.
917 int udp_push_pending_frames(struct sock
*sk
)
919 struct udp_sock
*up
= udp_sk(sk
);
920 struct inet_sock
*inet
= inet_sk(sk
);
921 struct flowi4
*fl4
= &inet
->cork
.fl
.u
.ip4
;
925 skb
= ip_finish_skb(sk
, fl4
);
929 err
= udp_send_skb(skb
, fl4
, &inet
->cork
.base
);
936 EXPORT_SYMBOL(udp_push_pending_frames
);
938 static int __udp_cmsg_send(struct cmsghdr
*cmsg
, u16
*gso_size
)
940 switch (cmsg
->cmsg_type
) {
942 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(__u16
)))
944 *gso_size
= *(__u16
*)CMSG_DATA(cmsg
);
951 int udp_cmsg_send(struct sock
*sk
, struct msghdr
*msg
, u16
*gso_size
)
953 struct cmsghdr
*cmsg
;
954 bool need_ip
= false;
957 for_each_cmsghdr(cmsg
, msg
) {
958 if (!CMSG_OK(msg
, cmsg
))
961 if (cmsg
->cmsg_level
!= SOL_UDP
) {
966 err
= __udp_cmsg_send(cmsg
, gso_size
);
973 EXPORT_SYMBOL_GPL(udp_cmsg_send
);
975 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
977 struct inet_sock
*inet
= inet_sk(sk
);
978 struct udp_sock
*up
= udp_sk(sk
);
979 DECLARE_SOCKADDR(struct sockaddr_in
*, usin
, msg
->msg_name
);
980 struct flowi4 fl4_stack
;
983 struct ipcm_cookie ipc
;
984 struct rtable
*rt
= NULL
;
987 __be32 daddr
, faddr
, saddr
;
990 int err
, is_udplite
= IS_UDPLITE(sk
);
991 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
992 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
994 struct ip_options_data opt_copy
;
1003 if (msg
->msg_flags
& MSG_OOB
) /* Mirror BSD error message compatibility */
1006 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1008 fl4
= &inet
->cork
.fl
.u
.ip4
;
1011 * There are pending frames.
1012 * The socket lock must be held while it's corked.
1015 if (likely(up
->pending
)) {
1016 if (unlikely(up
->pending
!= AF_INET
)) {
1020 goto do_append_data
;
1024 ulen
+= sizeof(struct udphdr
);
1027 * Get and verify the address.
1030 if (msg
->msg_namelen
< sizeof(*usin
))
1032 if (usin
->sin_family
!= AF_INET
) {
1033 if (usin
->sin_family
!= AF_UNSPEC
)
1034 return -EAFNOSUPPORT
;
1037 daddr
= usin
->sin_addr
.s_addr
;
1038 dport
= usin
->sin_port
;
1042 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1043 return -EDESTADDRREQ
;
1044 daddr
= inet
->inet_daddr
;
1045 dport
= inet
->inet_dport
;
1046 /* Open fast path for connected socket.
1047 Route will not be used, if at least one option is set.
1052 ipcm_init_sk(&ipc
, inet
);
1053 ipc
.gso_size
= up
->gso_size
;
1055 if (msg
->msg_controllen
) {
1056 err
= udp_cmsg_send(sk
, msg
, &ipc
.gso_size
);
1058 err
= ip_cmsg_send(sk
, msg
, &ipc
,
1059 sk
->sk_family
== AF_INET6
);
1060 if (unlikely(err
< 0)) {
1069 struct ip_options_rcu
*inet_opt
;
1072 inet_opt
= rcu_dereference(inet
->inet_opt
);
1074 memcpy(&opt_copy
, inet_opt
,
1075 sizeof(*inet_opt
) + inet_opt
->opt
.optlen
);
1076 ipc
.opt
= &opt_copy
.opt
;
1081 if (cgroup_bpf_enabled
&& !connected
) {
1082 err
= BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk
,
1083 (struct sockaddr
*)usin
, &ipc
.addr
);
1087 if (usin
->sin_port
== 0) {
1088 /* BPF program set invalid port. Reject it. */
1092 daddr
= usin
->sin_addr
.s_addr
;
1093 dport
= usin
->sin_port
;
1098 ipc
.addr
= faddr
= daddr
;
1100 if (ipc
.opt
&& ipc
.opt
->opt
.srr
) {
1105 faddr
= ipc
.opt
->opt
.faddr
;
1108 tos
= get_rttos(&ipc
, inet
);
1109 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
1110 (msg
->msg_flags
& MSG_DONTROUTE
) ||
1111 (ipc
.opt
&& ipc
.opt
->opt
.is_strictroute
)) {
1116 if (ipv4_is_multicast(daddr
)) {
1117 if (!ipc
.oif
|| netif_index_is_l3_master(sock_net(sk
), ipc
.oif
))
1118 ipc
.oif
= inet
->mc_index
;
1120 saddr
= inet
->mc_addr
;
1122 } else if (!ipc
.oif
) {
1123 ipc
.oif
= inet
->uc_index
;
1124 } else if (ipv4_is_lbcast(daddr
) && inet
->uc_index
) {
1125 /* oif is set, packet is to local broadcast and
1126 * and uc_index is set. oif is most likely set
1127 * by sk_bound_dev_if. If uc_index != oif check if the
1128 * oif is an L3 master and uc_index is an L3 slave.
1129 * If so, we want to allow the send using the uc_index.
1131 if (ipc
.oif
!= inet
->uc_index
&&
1132 ipc
.oif
== l3mdev_master_ifindex_by_index(sock_net(sk
),
1134 ipc
.oif
= inet
->uc_index
;
1139 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
1142 struct net
*net
= sock_net(sk
);
1143 __u8 flow_flags
= inet_sk_flowi_flags(sk
);
1147 flowi4_init_output(fl4
, ipc
.oif
, sk
->sk_mark
, tos
,
1148 RT_SCOPE_UNIVERSE
, sk
->sk_protocol
,
1150 faddr
, saddr
, dport
, inet
->inet_sport
,
1153 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
1154 rt
= ip_route_output_flow(net
, fl4
, sk
);
1158 if (err
== -ENETUNREACH
)
1159 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
1164 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
1165 !sock_flag(sk
, SOCK_BROADCAST
))
1168 sk_dst_set(sk
, dst_clone(&rt
->dst
));
1171 if (msg
->msg_flags
&MSG_CONFIRM
)
1177 daddr
= ipc
.addr
= fl4
->daddr
;
1179 /* Lockless fast path for the non-corking case. */
1181 struct inet_cork cork
;
1183 skb
= ip_make_skb(sk
, fl4
, getfrag
, msg
, ulen
,
1184 sizeof(struct udphdr
), &ipc
, &rt
,
1185 &cork
, msg
->msg_flags
);
1187 if (!IS_ERR_OR_NULL(skb
))
1188 err
= udp_send_skb(skb
, fl4
, &cork
);
1193 if (unlikely(up
->pending
)) {
1194 /* The socket is already corked while preparing it. */
1195 /* ... which is an evident application bug. --ANK */
1198 net_dbg_ratelimited("socket already corked\n");
1203 * Now cork the socket to pend data.
1205 fl4
= &inet
->cork
.fl
.u
.ip4
;
1208 fl4
->fl4_dport
= dport
;
1209 fl4
->fl4_sport
= inet
->inet_sport
;
1210 up
->pending
= AF_INET
;
1214 err
= ip_append_data(sk
, fl4
, getfrag
, msg
, ulen
,
1215 sizeof(struct udphdr
), &ipc
, &rt
,
1216 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1218 udp_flush_pending_frames(sk
);
1220 err
= udp_push_pending_frames(sk
);
1221 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1233 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1234 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1235 * we don't have a good statistic (IpOutDiscards but it can be too many
1236 * things). We could add another new stat but at least for now that
1237 * seems like overkill.
1239 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1240 UDP_INC_STATS(sock_net(sk
),
1241 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1246 if (msg
->msg_flags
& MSG_PROBE
)
1247 dst_confirm_neigh(&rt
->dst
, &fl4
->daddr
);
1248 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1249 goto back_from_confirm
;
1253 EXPORT_SYMBOL(udp_sendmsg
);
1255 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1256 size_t size
, int flags
)
1258 struct inet_sock
*inet
= inet_sk(sk
);
1259 struct udp_sock
*up
= udp_sk(sk
);
1262 if (flags
& MSG_SENDPAGE_NOTLAST
)
1266 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
1268 /* Call udp_sendmsg to specify destination address which
1269 * sendpage interface can't pass.
1270 * This will succeed only when the socket is connected.
1272 ret
= udp_sendmsg(sk
, &msg
, 0);
1279 if (unlikely(!up
->pending
)) {
1282 net_dbg_ratelimited("cork failed\n");
1286 ret
= ip_append_page(sk
, &inet
->cork
.fl
.u
.ip4
,
1287 page
, offset
, size
, flags
);
1288 if (ret
== -EOPNOTSUPP
) {
1290 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
1294 udp_flush_pending_frames(sk
);
1299 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
1300 ret
= udp_push_pending_frames(sk
);
1308 #define UDP_SKB_IS_STATELESS 0x80000000
1310 static void udp_set_dev_scratch(struct sk_buff
*skb
)
1312 struct udp_dev_scratch
*scratch
= udp_skb_scratch(skb
);
1314 BUILD_BUG_ON(sizeof(struct udp_dev_scratch
) > sizeof(long));
1315 scratch
->_tsize_state
= skb
->truesize
;
1316 #if BITS_PER_LONG == 64
1317 scratch
->len
= skb
->len
;
1318 scratch
->csum_unnecessary
= !!skb_csum_unnecessary(skb
);
1319 scratch
->is_linear
= !skb_is_nonlinear(skb
);
1321 /* all head states execept sp (dst, sk, nf) are always cleared by
1322 * udp_rcv() and we need to preserve secpath, if present, to eventually
1323 * process IP_CMSG_PASSSEC at recvmsg() time
1325 if (likely(!skb_sec_path(skb
)))
1326 scratch
->_tsize_state
|= UDP_SKB_IS_STATELESS
;
1329 static int udp_skb_truesize(struct sk_buff
*skb
)
1331 return udp_skb_scratch(skb
)->_tsize_state
& ~UDP_SKB_IS_STATELESS
;
1334 static bool udp_skb_has_head_state(struct sk_buff
*skb
)
1336 return !(udp_skb_scratch(skb
)->_tsize_state
& UDP_SKB_IS_STATELESS
);
1339 /* fully reclaim rmem/fwd memory allocated for skb */
1340 static void udp_rmem_release(struct sock
*sk
, int size
, int partial
,
1341 bool rx_queue_lock_held
)
1343 struct udp_sock
*up
= udp_sk(sk
);
1344 struct sk_buff_head
*sk_queue
;
1347 if (likely(partial
)) {
1348 up
->forward_deficit
+= size
;
1349 size
= up
->forward_deficit
;
1350 if (size
< (sk
->sk_rcvbuf
>> 2))
1353 size
+= up
->forward_deficit
;
1355 up
->forward_deficit
= 0;
1357 /* acquire the sk_receive_queue for fwd allocated memory scheduling,
1358 * if the called don't held it already
1360 sk_queue
= &sk
->sk_receive_queue
;
1361 if (!rx_queue_lock_held
)
1362 spin_lock(&sk_queue
->lock
);
1365 sk
->sk_forward_alloc
+= size
;
1366 amt
= (sk
->sk_forward_alloc
- partial
) & ~(SK_MEM_QUANTUM
- 1);
1367 sk
->sk_forward_alloc
-= amt
;
1370 __sk_mem_reduce_allocated(sk
, amt
>> SK_MEM_QUANTUM_SHIFT
);
1372 atomic_sub(size
, &sk
->sk_rmem_alloc
);
1374 /* this can save us from acquiring the rx queue lock on next receive */
1375 skb_queue_splice_tail_init(sk_queue
, &up
->reader_queue
);
1377 if (!rx_queue_lock_held
)
1378 spin_unlock(&sk_queue
->lock
);
1381 /* Note: called with reader_queue.lock held.
1382 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1383 * This avoids a cache line miss while receive_queue lock is held.
1384 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1386 void udp_skb_destructor(struct sock
*sk
, struct sk_buff
*skb
)
1388 prefetch(&skb
->data
);
1389 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, false);
1391 EXPORT_SYMBOL(udp_skb_destructor
);
1393 /* as above, but the caller held the rx queue lock, too */
1394 static void udp_skb_dtor_locked(struct sock
*sk
, struct sk_buff
*skb
)
1396 prefetch(&skb
->data
);
1397 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, true);
1400 /* Idea of busylocks is to let producers grab an extra spinlock
1401 * to relieve pressure on the receive_queue spinlock shared by consumer.
1402 * Under flood, this means that only one producer can be in line
1403 * trying to acquire the receive_queue spinlock.
1404 * These busylock can be allocated on a per cpu manner, instead of a
1405 * per socket one (that would consume a cache line per socket)
1407 static int udp_busylocks_log __read_mostly
;
1408 static spinlock_t
*udp_busylocks __read_mostly
;
1410 static spinlock_t
*busylock_acquire(void *ptr
)
1414 busy
= udp_busylocks
+ hash_ptr(ptr
, udp_busylocks_log
);
1419 static void busylock_release(spinlock_t
*busy
)
1425 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
)
1427 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
1428 int rmem
, delta
, amt
, err
= -ENOMEM
;
1429 spinlock_t
*busy
= NULL
;
1432 /* try to avoid the costly atomic add/sub pair when the receive
1433 * queue is full; always allow at least a packet
1435 rmem
= atomic_read(&sk
->sk_rmem_alloc
);
1436 if (rmem
> sk
->sk_rcvbuf
)
1439 /* Under mem pressure, it might be helpful to help udp_recvmsg()
1440 * having linear skbs :
1441 * - Reduce memory overhead and thus increase receive queue capacity
1442 * - Less cache line misses at copyout() time
1443 * - Less work at consume_skb() (less alien page frag freeing)
1445 if (rmem
> (sk
->sk_rcvbuf
>> 1)) {
1448 busy
= busylock_acquire(sk
);
1450 size
= skb
->truesize
;
1451 udp_set_dev_scratch(skb
);
1453 /* we drop only if the receive buf is full and the receive
1454 * queue contains some other skb
1456 rmem
= atomic_add_return(size
, &sk
->sk_rmem_alloc
);
1457 if (rmem
> (size
+ sk
->sk_rcvbuf
))
1460 spin_lock(&list
->lock
);
1461 if (size
>= sk
->sk_forward_alloc
) {
1462 amt
= sk_mem_pages(size
);
1463 delta
= amt
<< SK_MEM_QUANTUM_SHIFT
;
1464 if (!__sk_mem_raise_allocated(sk
, delta
, amt
, SK_MEM_RECV
)) {
1466 spin_unlock(&list
->lock
);
1470 sk
->sk_forward_alloc
+= delta
;
1473 sk
->sk_forward_alloc
-= size
;
1475 /* no need to setup a destructor, we will explicitly release the
1476 * forward allocated memory on dequeue
1478 sock_skb_set_dropcount(sk
, skb
);
1480 __skb_queue_tail(list
, skb
);
1481 spin_unlock(&list
->lock
);
1483 if (!sock_flag(sk
, SOCK_DEAD
))
1484 sk
->sk_data_ready(sk
);
1486 busylock_release(busy
);
1490 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
1493 atomic_inc(&sk
->sk_drops
);
1494 busylock_release(busy
);
1497 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb
);
1499 void udp_destruct_sock(struct sock
*sk
)
1501 /* reclaim completely the forward allocated memory */
1502 struct udp_sock
*up
= udp_sk(sk
);
1503 unsigned int total
= 0;
1504 struct sk_buff
*skb
;
1506 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &up
->reader_queue
);
1507 while ((skb
= __skb_dequeue(&up
->reader_queue
)) != NULL
) {
1508 total
+= skb
->truesize
;
1511 udp_rmem_release(sk
, total
, 0, true);
1513 inet_sock_destruct(sk
);
1515 EXPORT_SYMBOL_GPL(udp_destruct_sock
);
1517 int udp_init_sock(struct sock
*sk
)
1519 skb_queue_head_init(&udp_sk(sk
)->reader_queue
);
1520 sk
->sk_destruct
= udp_destruct_sock
;
1523 EXPORT_SYMBOL_GPL(udp_init_sock
);
1525 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1527 if (unlikely(READ_ONCE(sk
->sk_peek_off
) >= 0)) {
1528 bool slow
= lock_sock_fast(sk
);
1530 sk_peek_offset_bwd(sk
, len
);
1531 unlock_sock_fast(sk
, slow
);
1534 if (!skb_unref(skb
))
1537 /* In the more common cases we cleared the head states previously,
1538 * see __udp_queue_rcv_skb().
1540 if (unlikely(udp_skb_has_head_state(skb
)))
1541 skb_release_head_state(skb
);
1542 __consume_stateless_skb(skb
);
1544 EXPORT_SYMBOL_GPL(skb_consume_udp
);
1546 static struct sk_buff
*__first_packet_length(struct sock
*sk
,
1547 struct sk_buff_head
*rcvq
,
1550 struct sk_buff
*skb
;
1552 while ((skb
= skb_peek(rcvq
)) != NULL
) {
1553 if (udp_lib_checksum_complete(skb
)) {
1554 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
,
1556 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
1558 atomic_inc(&sk
->sk_drops
);
1559 __skb_unlink(skb
, rcvq
);
1560 *total
+= skb
->truesize
;
1563 /* the csum related bits could be changed, refresh
1566 udp_set_dev_scratch(skb
);
1574 * first_packet_length - return length of first packet in receive queue
1577 * Drops all bad checksum frames, until a valid one is found.
1578 * Returns the length of found skb, or -1 if none is found.
1580 static int first_packet_length(struct sock
*sk
)
1582 struct sk_buff_head
*rcvq
= &udp_sk(sk
)->reader_queue
;
1583 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1584 struct sk_buff
*skb
;
1588 spin_lock_bh(&rcvq
->lock
);
1589 skb
= __first_packet_length(sk
, rcvq
, &total
);
1590 if (!skb
&& !skb_queue_empty(sk_queue
)) {
1591 spin_lock(&sk_queue
->lock
);
1592 skb_queue_splice_tail_init(sk_queue
, rcvq
);
1593 spin_unlock(&sk_queue
->lock
);
1595 skb
= __first_packet_length(sk
, rcvq
, &total
);
1597 res
= skb
? skb
->len
: -1;
1599 udp_rmem_release(sk
, total
, 1, false);
1600 spin_unlock_bh(&rcvq
->lock
);
1605 * IOCTL requests applicable to the UDP protocol
1608 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1613 int amount
= sk_wmem_alloc_get(sk
);
1615 return put_user(amount
, (int __user
*)arg
);
1620 int amount
= max_t(int, 0, first_packet_length(sk
));
1622 return put_user(amount
, (int __user
*)arg
);
1626 return -ENOIOCTLCMD
;
1631 EXPORT_SYMBOL(udp_ioctl
);
1633 struct sk_buff
*__skb_recv_udp(struct sock
*sk
, unsigned int flags
,
1634 int noblock
, int *off
, int *err
)
1636 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1637 struct sk_buff_head
*queue
;
1638 struct sk_buff
*last
;
1642 queue
= &udp_sk(sk
)->reader_queue
;
1643 flags
|= noblock
? MSG_DONTWAIT
: 0;
1644 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1646 struct sk_buff
*skb
;
1648 error
= sock_error(sk
);
1654 spin_lock_bh(&queue
->lock
);
1655 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
,
1659 spin_unlock_bh(&queue
->lock
);
1663 if (skb_queue_empty(sk_queue
)) {
1664 spin_unlock_bh(&queue
->lock
);
1668 /* refill the reader queue and walk it again
1669 * keep both queues locked to avoid re-acquiring
1670 * the sk_receive_queue lock if fwd memory scheduling
1673 spin_lock(&sk_queue
->lock
);
1674 skb_queue_splice_tail_init(sk_queue
, queue
);
1676 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
,
1677 udp_skb_dtor_locked
,
1679 spin_unlock(&sk_queue
->lock
);
1680 spin_unlock_bh(&queue
->lock
);
1685 if (!sk_can_busy_loop(sk
))
1688 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
1689 } while (!skb_queue_empty(sk_queue
));
1691 /* sk_queue is empty, reader_queue may contain peeked packets */
1693 !__skb_wait_for_more_packets(sk
, &error
, &timeo
,
1694 (struct sk_buff
*)sk_queue
));
1699 EXPORT_SYMBOL(__skb_recv_udp
);
1702 * This should be easy, if there is something there we
1703 * return it, otherwise we block.
1706 int udp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int noblock
,
1707 int flags
, int *addr_len
)
1709 struct inet_sock
*inet
= inet_sk(sk
);
1710 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
1711 struct sk_buff
*skb
;
1712 unsigned int ulen
, copied
;
1713 int off
, err
, peeking
= flags
& MSG_PEEK
;
1714 int is_udplite
= IS_UDPLITE(sk
);
1715 bool checksum_valid
= false;
1717 if (flags
& MSG_ERRQUEUE
)
1718 return ip_recv_error(sk
, msg
, len
, addr_len
);
1721 off
= sk_peek_offset(sk
, flags
);
1722 skb
= __skb_recv_udp(sk
, flags
, noblock
, &off
, &err
);
1726 ulen
= udp_skb_len(skb
);
1728 if (copied
> ulen
- off
)
1729 copied
= ulen
- off
;
1730 else if (copied
< ulen
)
1731 msg
->msg_flags
|= MSG_TRUNC
;
1734 * If checksum is needed at all, try to do it while copying the
1735 * data. If the data is truncated, or if we only want a partial
1736 * coverage checksum (UDP-Lite), do it before the copy.
1739 if (copied
< ulen
|| peeking
||
1740 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
1741 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
1742 !__udp_lib_checksum_complete(skb
);
1743 if (!checksum_valid
)
1747 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
1748 if (udp_skb_is_linear(skb
))
1749 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
1751 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
1753 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
1759 if (unlikely(err
)) {
1761 atomic_inc(&sk
->sk_drops
);
1762 UDP_INC_STATS(sock_net(sk
),
1763 UDP_MIB_INERRORS
, is_udplite
);
1770 UDP_INC_STATS(sock_net(sk
),
1771 UDP_MIB_INDATAGRAMS
, is_udplite
);
1773 sock_recv_ts_and_drops(msg
, sk
, skb
);
1775 /* Copy the address. */
1777 sin
->sin_family
= AF_INET
;
1778 sin
->sin_port
= udp_hdr(skb
)->source
;
1779 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
1780 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
1781 *addr_len
= sizeof(*sin
);
1784 if (udp_sk(sk
)->gro_enabled
)
1785 udp_cmsg_recv(msg
, sk
, skb
);
1787 if (inet
->cmsg_flags
)
1788 ip_cmsg_recv_offset(msg
, sk
, skb
, sizeof(struct udphdr
), off
);
1791 if (flags
& MSG_TRUNC
)
1794 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
1798 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
1799 udp_skb_destructor
)) {
1800 UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1801 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1805 /* starting over for a new packet, but check if we need to yield */
1807 msg
->msg_flags
&= ~MSG_TRUNC
;
1811 int udp_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
1813 /* This check is replicated from __ip4_datagram_connect() and
1814 * intended to prevent BPF program called below from accessing bytes
1815 * that are out of the bound specified by user in addr_len.
1817 if (addr_len
< sizeof(struct sockaddr_in
))
1820 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk
, uaddr
);
1822 EXPORT_SYMBOL(udp_pre_connect
);
1824 int __udp_disconnect(struct sock
*sk
, int flags
)
1826 struct inet_sock
*inet
= inet_sk(sk
);
1828 * 1003.1g - break association.
1831 sk
->sk_state
= TCP_CLOSE
;
1832 inet
->inet_daddr
= 0;
1833 inet
->inet_dport
= 0;
1834 sock_rps_reset_rxhash(sk
);
1835 sk
->sk_bound_dev_if
= 0;
1836 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
1837 inet_reset_saddr(sk
);
1839 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
1840 sk
->sk_prot
->unhash(sk
);
1841 inet
->inet_sport
= 0;
1846 EXPORT_SYMBOL(__udp_disconnect
);
1848 int udp_disconnect(struct sock
*sk
, int flags
)
1851 __udp_disconnect(sk
, flags
);
1855 EXPORT_SYMBOL(udp_disconnect
);
1857 void udp_lib_unhash(struct sock
*sk
)
1859 if (sk_hashed(sk
)) {
1860 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1861 struct udp_hslot
*hslot
, *hslot2
;
1863 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1864 udp_sk(sk
)->udp_port_hash
);
1865 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1867 spin_lock_bh(&hslot
->lock
);
1868 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1869 reuseport_detach_sock(sk
);
1870 if (sk_del_node_init_rcu(sk
)) {
1872 inet_sk(sk
)->inet_num
= 0;
1873 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
1875 spin_lock(&hslot2
->lock
);
1876 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1878 spin_unlock(&hslot2
->lock
);
1880 spin_unlock_bh(&hslot
->lock
);
1883 EXPORT_SYMBOL(udp_lib_unhash
);
1886 * inet_rcv_saddr was changed, we must rehash secondary hash
1888 void udp_lib_rehash(struct sock
*sk
, u16 newhash
)
1890 if (sk_hashed(sk
)) {
1891 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1892 struct udp_hslot
*hslot
, *hslot2
, *nhslot2
;
1894 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1895 nhslot2
= udp_hashslot2(udptable
, newhash
);
1896 udp_sk(sk
)->udp_portaddr_hash
= newhash
;
1898 if (hslot2
!= nhslot2
||
1899 rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1900 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1901 udp_sk(sk
)->udp_port_hash
);
1902 /* we must lock primary chain too */
1903 spin_lock_bh(&hslot
->lock
);
1904 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1905 reuseport_detach_sock(sk
);
1907 if (hslot2
!= nhslot2
) {
1908 spin_lock(&hslot2
->lock
);
1909 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1911 spin_unlock(&hslot2
->lock
);
1913 spin_lock(&nhslot2
->lock
);
1914 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
1917 spin_unlock(&nhslot2
->lock
);
1920 spin_unlock_bh(&hslot
->lock
);
1924 EXPORT_SYMBOL(udp_lib_rehash
);
1926 void udp_v4_rehash(struct sock
*sk
)
1928 u16 new_hash
= ipv4_portaddr_hash(sock_net(sk
),
1929 inet_sk(sk
)->inet_rcv_saddr
,
1930 inet_sk(sk
)->inet_num
);
1931 udp_lib_rehash(sk
, new_hash
);
1934 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1938 if (inet_sk(sk
)->inet_daddr
) {
1939 sock_rps_save_rxhash(sk
, skb
);
1940 sk_mark_napi_id(sk
, skb
);
1941 sk_incoming_cpu_update(sk
);
1943 sk_mark_napi_id_once(sk
, skb
);
1946 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
1948 int is_udplite
= IS_UDPLITE(sk
);
1950 /* Note that an ENOMEM error is charged twice */
1952 UDP_INC_STATS(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
1954 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1956 trace_udp_fail_queue_rcv_skb(rc
, sk
);
1966 * >0: "udp encap" protocol resubmission
1968 * Note that in the success and error cases, the skb is assumed to
1969 * have either been requeued or freed.
1971 static int udp_queue_rcv_one_skb(struct sock
*sk
, struct sk_buff
*skb
)
1973 struct udp_sock
*up
= udp_sk(sk
);
1974 int is_udplite
= IS_UDPLITE(sk
);
1977 * Charge it to the socket, dropping if the queue is full.
1979 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1983 if (static_branch_unlikely(&udp_encap_needed_key
) && up
->encap_type
) {
1984 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
1987 * This is an encapsulation socket so pass the skb to
1988 * the socket's udp_encap_rcv() hook. Otherwise, just
1989 * fall through and pass this up the UDP socket.
1990 * up->encap_rcv() returns the following value:
1991 * =0 if skb was successfully passed to the encap
1992 * handler or was discarded by it.
1993 * >0 if skb should be passed on to UDP.
1994 * <0 if skb should be resubmitted as proto -N
1997 /* if we're overly short, let UDP handle it */
1998 encap_rcv
= READ_ONCE(up
->encap_rcv
);
2002 /* Verify checksum before giving to encap */
2003 if (udp_lib_checksum_complete(skb
))
2006 ret
= encap_rcv(sk
, skb
);
2008 __UDP_INC_STATS(sock_net(sk
),
2009 UDP_MIB_INDATAGRAMS
,
2015 /* FALLTHROUGH -- it's a UDP Packet */
2019 * UDP-Lite specific tests, ignored on UDP sockets
2021 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
2024 * MIB statistics other than incrementing the error count are
2025 * disabled for the following two types of errors: these depend
2026 * on the application settings, not on the functioning of the
2027 * protocol stack as such.
2029 * RFC 3828 here recommends (sec 3.3): "There should also be a
2030 * way ... to ... at least let the receiving application block
2031 * delivery of packets with coverage values less than a value
2032 * provided by the application."
2034 if (up
->pcrlen
== 0) { /* full coverage was set */
2035 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
2036 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
2039 /* The next case involves violating the min. coverage requested
2040 * by the receiver. This is subtle: if receiver wants x and x is
2041 * greater than the buffersize/MTU then receiver will complain
2042 * that it wants x while sender emits packets of smaller size y.
2043 * Therefore the above ...()->partial_cov statement is essential.
2045 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
2046 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
2047 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
2052 prefetch(&sk
->sk_rmem_alloc
);
2053 if (rcu_access_pointer(sk
->sk_filter
) &&
2054 udp_lib_checksum_complete(skb
))
2057 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
2060 udp_csum_pull_header(skb
);
2062 ipv4_pktinfo_prepare(sk
, skb
);
2063 return __udp_queue_rcv_skb(sk
, skb
);
2066 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
2068 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
2069 atomic_inc(&sk
->sk_drops
);
2074 static int udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
2076 struct sk_buff
*next
, *segs
;
2079 if (likely(!udp_unexpected_gso(sk
, skb
)))
2080 return udp_queue_rcv_one_skb(sk
, skb
);
2082 BUILD_BUG_ON(sizeof(struct udp_skb_cb
) > SKB_SGO_CB_OFFSET
);
2083 __skb_push(skb
, -skb_mac_offset(skb
));
2084 segs
= udp_rcv_segment(sk
, skb
, true);
2085 for (skb
= segs
; skb
; skb
= next
) {
2087 __skb_pull(skb
, skb_transport_offset(skb
));
2088 ret
= udp_queue_rcv_one_skb(sk
, skb
);
2090 ip_protocol_deliver_rcu(dev_net(skb
->dev
), skb
, -ret
);
2095 /* For TCP sockets, sk_rx_dst is protected by socket lock
2096 * For UDP, we use xchg() to guard against concurrent changes.
2098 bool udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
2100 struct dst_entry
*old
;
2102 if (dst_hold_safe(dst
)) {
2103 old
= xchg(&sk
->sk_rx_dst
, dst
);
2109 EXPORT_SYMBOL(udp_sk_rx_dst_set
);
2112 * Multicasts and broadcasts go to each listener.
2114 * Note: called only from the BH handler context.
2116 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
2118 __be32 saddr
, __be32 daddr
,
2119 struct udp_table
*udptable
,
2122 struct sock
*sk
, *first
= NULL
;
2123 unsigned short hnum
= ntohs(uh
->dest
);
2124 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
2125 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
2126 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
2127 int dif
= skb
->dev
->ifindex
;
2128 int sdif
= inet_sdif(skb
);
2129 struct hlist_node
*node
;
2130 struct sk_buff
*nskb
;
2133 hash2_any
= ipv4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
) &
2135 hash2
= ipv4_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
2137 hslot
= &udptable
->hash2
[hash2
];
2138 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
2141 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
2142 if (!__udp_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
2143 uh
->source
, saddr
, dif
, sdif
, hnum
))
2150 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2152 if (unlikely(!nskb
)) {
2153 atomic_inc(&sk
->sk_drops
);
2154 __UDP_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
2156 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
,
2160 if (udp_queue_rcv_skb(sk
, nskb
) > 0)
2164 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
2165 if (use_hash2
&& hash2
!= hash2_any
) {
2171 if (udp_queue_rcv_skb(first
, skb
) > 0)
2175 __UDP_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
2176 proto
== IPPROTO_UDPLITE
);
2181 /* Initialize UDP checksum. If exited with zero value (success),
2182 * CHECKSUM_UNNECESSARY means, that no more checks are required.
2183 * Otherwise, csum completion requires chacksumming packet body,
2184 * including udp header and folding it to skb->csum.
2186 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
2191 UDP_SKB_CB(skb
)->partial_cov
= 0;
2192 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
2194 if (proto
== IPPROTO_UDPLITE
) {
2195 err
= udplite_checksum_init(skb
, uh
);
2199 if (UDP_SKB_CB(skb
)->partial_cov
) {
2200 skb
->csum
= inet_compute_pseudo(skb
, proto
);
2205 /* Note, we are only interested in != 0 or == 0, thus the
2208 err
= (__force
int)skb_checksum_init_zero_check(skb
, proto
, uh
->check
,
2209 inet_compute_pseudo
);
2213 if (skb
->ip_summed
== CHECKSUM_COMPLETE
&& !skb
->csum_valid
) {
2214 /* If SW calculated the value, we know it's bad */
2215 if (skb
->csum_complete_sw
)
2218 /* HW says the value is bad. Let's validate that.
2219 * skb->csum is no longer the full packet checksum,
2220 * so don't treat it as such.
2222 skb_checksum_complete_unset(skb
);
2228 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2229 * return code conversion for ip layer consumption
2231 static int udp_unicast_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
,
2236 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
2237 skb_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
2238 inet_compute_pseudo
);
2240 ret
= udp_queue_rcv_skb(sk
, skb
);
2242 /* a return value > 0 means to resubmit the input, but
2243 * it wants the return to be -protocol, or 0
2251 * All we need to do is get the socket, and then do a checksum.
2254 int __udp4_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
2259 unsigned short ulen
;
2260 struct rtable
*rt
= skb_rtable(skb
);
2261 __be32 saddr
, daddr
;
2262 struct net
*net
= dev_net(skb
->dev
);
2265 * Validate the packet.
2267 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
2268 goto drop
; /* No space for header. */
2271 ulen
= ntohs(uh
->len
);
2272 saddr
= ip_hdr(skb
)->saddr
;
2273 daddr
= ip_hdr(skb
)->daddr
;
2275 if (ulen
> skb
->len
)
2278 if (proto
== IPPROTO_UDP
) {
2279 /* UDP validates ulen. */
2280 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
2285 if (udp4_csum_init(skb
, uh
, proto
))
2288 sk
= skb_steal_sock(skb
);
2290 struct dst_entry
*dst
= skb_dst(skb
);
2293 if (unlikely(sk
->sk_rx_dst
!= dst
))
2294 udp_sk_rx_dst_set(sk
, dst
);
2296 ret
= udp_unicast_rcv_skb(sk
, skb
, uh
);
2301 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
2302 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
2303 saddr
, daddr
, udptable
, proto
);
2305 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
2307 return udp_unicast_rcv_skb(sk
, skb
, uh
);
2309 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2313 /* No socket. Drop packet silently, if checksum is wrong */
2314 if (udp_lib_checksum_complete(skb
))
2317 __UDP_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
2318 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
2321 * Hmm. We got an UDP packet to a port to which we
2322 * don't wanna listen. Ignore it.
2328 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2329 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2330 &saddr
, ntohs(uh
->source
),
2332 &daddr
, ntohs(uh
->dest
));
2337 * RFC1122: OK. Discards the bad packet silently (as far as
2338 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2340 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2341 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2342 &saddr
, ntohs(uh
->source
), &daddr
, ntohs(uh
->dest
),
2344 __UDP_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
2346 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
2351 /* We can only early demux multicast if there is a single matching socket.
2352 * If more than one socket found returns NULL
2354 static struct sock
*__udp4_lib_mcast_demux_lookup(struct net
*net
,
2355 __be16 loc_port
, __be32 loc_addr
,
2356 __be16 rmt_port
, __be32 rmt_addr
,
2359 struct sock
*sk
, *result
;
2360 unsigned short hnum
= ntohs(loc_port
);
2361 unsigned int slot
= udp_hashfn(net
, hnum
, udp_table
.mask
);
2362 struct udp_hslot
*hslot
= &udp_table
.hash
[slot
];
2364 /* Do not bother scanning a too big list */
2365 if (hslot
->count
> 10)
2369 sk_for_each_rcu(sk
, &hslot
->head
) {
2370 if (__udp_is_mcast_sock(net
, sk
, loc_port
, loc_addr
,
2371 rmt_port
, rmt_addr
, dif
, sdif
, hnum
)) {
2381 /* For unicast we should only early demux connected sockets or we can
2382 * break forwarding setups. The chains here can be long so only check
2383 * if the first socket is an exact match and if not move on.
2385 static struct sock
*__udp4_lib_demux_lookup(struct net
*net
,
2386 __be16 loc_port
, __be32 loc_addr
,
2387 __be16 rmt_port
, __be32 rmt_addr
,
2390 unsigned short hnum
= ntohs(loc_port
);
2391 unsigned int hash2
= ipv4_portaddr_hash(net
, loc_addr
, hnum
);
2392 unsigned int slot2
= hash2
& udp_table
.mask
;
2393 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
2394 INET_ADDR_COOKIE(acookie
, rmt_addr
, loc_addr
);
2395 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
2398 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
2399 if (INET_MATCH(sk
, net
, acookie
, rmt_addr
,
2400 loc_addr
, ports
, dif
, sdif
))
2402 /* Only check first socket in chain */
2408 int udp_v4_early_demux(struct sk_buff
*skb
)
2410 struct net
*net
= dev_net(skb
->dev
);
2411 struct in_device
*in_dev
= NULL
;
2412 const struct iphdr
*iph
;
2413 const struct udphdr
*uh
;
2414 struct sock
*sk
= NULL
;
2415 struct dst_entry
*dst
;
2416 int dif
= skb
->dev
->ifindex
;
2417 int sdif
= inet_sdif(skb
);
2420 /* validate the packet */
2421 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct udphdr
)))
2427 if (skb
->pkt_type
== PACKET_MULTICAST
) {
2428 in_dev
= __in_dev_get_rcu(skb
->dev
);
2433 ours
= ip_check_mc_rcu(in_dev
, iph
->daddr
, iph
->saddr
,
2438 sk
= __udp4_lib_mcast_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2439 uh
->source
, iph
->saddr
,
2441 } else if (skb
->pkt_type
== PACKET_HOST
) {
2442 sk
= __udp4_lib_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2443 uh
->source
, iph
->saddr
, dif
, sdif
);
2446 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
2450 skb
->destructor
= sock_efree
;
2451 dst
= READ_ONCE(sk
->sk_rx_dst
);
2454 dst
= dst_check(dst
, 0);
2458 /* set noref for now.
2459 * any place which wants to hold dst has to call
2462 skb_dst_set_noref(skb
, dst
);
2464 /* for unconnected multicast sockets we need to validate
2465 * the source on each packet
2467 if (!inet_sk(sk
)->inet_daddr
&& in_dev
)
2468 return ip_mc_validate_source(skb
, iph
->daddr
,
2469 iph
->saddr
, iph
->tos
,
2470 skb
->dev
, in_dev
, &itag
);
2475 int udp_rcv(struct sk_buff
*skb
)
2477 return __udp4_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
2480 void udp_destroy_sock(struct sock
*sk
)
2482 struct udp_sock
*up
= udp_sk(sk
);
2483 bool slow
= lock_sock_fast(sk
);
2484 udp_flush_pending_frames(sk
);
2485 unlock_sock_fast(sk
, slow
);
2486 if (static_branch_unlikely(&udp_encap_needed_key
)) {
2487 if (up
->encap_type
) {
2488 void (*encap_destroy
)(struct sock
*sk
);
2489 encap_destroy
= READ_ONCE(up
->encap_destroy
);
2493 if (up
->encap_enabled
)
2494 static_branch_dec(&udp_encap_needed_key
);
2499 * Socket option code for UDP
2501 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
2502 char __user
*optval
, unsigned int optlen
,
2503 int (*push_pending_frames
)(struct sock
*))
2505 struct udp_sock
*up
= udp_sk(sk
);
2508 int is_udplite
= IS_UDPLITE(sk
);
2510 if (optlen
< sizeof(int))
2513 if (get_user(val
, (int __user
*)optval
))
2516 valbool
= val
? 1 : 0;
2525 push_pending_frames(sk
);
2533 case UDP_ENCAP_ESPINUDP
:
2534 case UDP_ENCAP_ESPINUDP_NON_IKE
:
2535 up
->encap_rcv
= xfrm4_udp_encap_rcv
;
2537 case UDP_ENCAP_L2TPINUDP
:
2538 up
->encap_type
= val
;
2540 udp_tunnel_encap_enable(sk
->sk_socket
);
2549 case UDP_NO_CHECK6_TX
:
2550 up
->no_check6_tx
= valbool
;
2553 case UDP_NO_CHECK6_RX
:
2554 up
->no_check6_rx
= valbool
;
2558 if (val
< 0 || val
> USHRT_MAX
)
2566 udp_tunnel_encap_enable(sk
->sk_socket
);
2567 up
->gro_enabled
= valbool
;
2572 * UDP-Lite's partial checksum coverage (RFC 3828).
2574 /* The sender sets actual checksum coverage length via this option.
2575 * The case coverage > packet length is handled by send module. */
2576 case UDPLITE_SEND_CSCOV
:
2577 if (!is_udplite
) /* Disable the option on UDP sockets */
2578 return -ENOPROTOOPT
;
2579 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
2581 else if (val
> USHRT_MAX
)
2584 up
->pcflag
|= UDPLITE_SEND_CC
;
2587 /* The receiver specifies a minimum checksum coverage value. To make
2588 * sense, this should be set to at least 8 (as done below). If zero is
2589 * used, this again means full checksum coverage. */
2590 case UDPLITE_RECV_CSCOV
:
2591 if (!is_udplite
) /* Disable the option on UDP sockets */
2592 return -ENOPROTOOPT
;
2593 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
2595 else if (val
> USHRT_MAX
)
2598 up
->pcflag
|= UDPLITE_RECV_CC
;
2608 EXPORT_SYMBOL(udp_lib_setsockopt
);
2610 int udp_setsockopt(struct sock
*sk
, int level
, int optname
,
2611 char __user
*optval
, unsigned int optlen
)
2613 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2614 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
2615 udp_push_pending_frames
);
2616 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2619 #ifdef CONFIG_COMPAT
2620 int compat_udp_setsockopt(struct sock
*sk
, int level
, int optname
,
2621 char __user
*optval
, unsigned int optlen
)
2623 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2624 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
2625 udp_push_pending_frames
);
2626 return compat_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2630 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
2631 char __user
*optval
, int __user
*optlen
)
2633 struct udp_sock
*up
= udp_sk(sk
);
2636 if (get_user(len
, optlen
))
2639 len
= min_t(unsigned int, len
, sizeof(int));
2650 val
= up
->encap_type
;
2653 case UDP_NO_CHECK6_TX
:
2654 val
= up
->no_check6_tx
;
2657 case UDP_NO_CHECK6_RX
:
2658 val
= up
->no_check6_rx
;
2665 /* The following two cannot be changed on UDP sockets, the return is
2666 * always 0 (which corresponds to the full checksum coverage of UDP). */
2667 case UDPLITE_SEND_CSCOV
:
2671 case UDPLITE_RECV_CSCOV
:
2676 return -ENOPROTOOPT
;
2679 if (put_user(len
, optlen
))
2681 if (copy_to_user(optval
, &val
, len
))
2685 EXPORT_SYMBOL(udp_lib_getsockopt
);
2687 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2688 char __user
*optval
, int __user
*optlen
)
2690 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2691 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2692 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2695 #ifdef CONFIG_COMPAT
2696 int compat_udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2697 char __user
*optval
, int __user
*optlen
)
2699 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2700 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2701 return compat_ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2705 * udp_poll - wait for a UDP event.
2706 * @file - file struct
2708 * @wait - poll table
2710 * This is same as datagram poll, except for the special case of
2711 * blocking sockets. If application is using a blocking fd
2712 * and a packet with checksum error is in the queue;
2713 * then it could get return from select indicating data available
2714 * but then block when reading it. Add special case code
2715 * to work around these arguably broken applications.
2717 __poll_t
udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2719 __poll_t mask
= datagram_poll(file
, sock
, wait
);
2720 struct sock
*sk
= sock
->sk
;
2722 if (!skb_queue_empty(&udp_sk(sk
)->reader_queue
))
2723 mask
|= EPOLLIN
| EPOLLRDNORM
;
2725 /* Check for false positives due to checksum errors */
2726 if ((mask
& EPOLLRDNORM
) && !(file
->f_flags
& O_NONBLOCK
) &&
2727 !(sk
->sk_shutdown
& RCV_SHUTDOWN
) && first_packet_length(sk
) == -1)
2728 mask
&= ~(EPOLLIN
| EPOLLRDNORM
);
2733 EXPORT_SYMBOL(udp_poll
);
2735 int udp_abort(struct sock
*sk
, int err
)
2740 sk
->sk_error_report(sk
);
2741 __udp_disconnect(sk
, 0);
2747 EXPORT_SYMBOL_GPL(udp_abort
);
2749 struct proto udp_prot
= {
2751 .owner
= THIS_MODULE
,
2752 .close
= udp_lib_close
,
2753 .pre_connect
= udp_pre_connect
,
2754 .connect
= ip4_datagram_connect
,
2755 .disconnect
= udp_disconnect
,
2757 .init
= udp_init_sock
,
2758 .destroy
= udp_destroy_sock
,
2759 .setsockopt
= udp_setsockopt
,
2760 .getsockopt
= udp_getsockopt
,
2761 .sendmsg
= udp_sendmsg
,
2762 .recvmsg
= udp_recvmsg
,
2763 .sendpage
= udp_sendpage
,
2764 .release_cb
= ip4_datagram_release_cb
,
2765 .hash
= udp_lib_hash
,
2766 .unhash
= udp_lib_unhash
,
2767 .rehash
= udp_v4_rehash
,
2768 .get_port
= udp_v4_get_port
,
2769 .memory_allocated
= &udp_memory_allocated
,
2770 .sysctl_mem
= sysctl_udp_mem
,
2771 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_wmem_min
),
2772 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_rmem_min
),
2773 .obj_size
= sizeof(struct udp_sock
),
2774 .h
.udp_table
= &udp_table
,
2775 #ifdef CONFIG_COMPAT
2776 .compat_setsockopt
= compat_udp_setsockopt
,
2777 .compat_getsockopt
= compat_udp_getsockopt
,
2779 .diag_destroy
= udp_abort
,
2781 EXPORT_SYMBOL(udp_prot
);
2783 /* ------------------------------------------------------------------------ */
2784 #ifdef CONFIG_PROC_FS
2786 static struct sock
*udp_get_first(struct seq_file
*seq
, int start
)
2789 struct udp_seq_afinfo
*afinfo
= PDE_DATA(file_inode(seq
->file
));
2790 struct udp_iter_state
*state
= seq
->private;
2791 struct net
*net
= seq_file_net(seq
);
2793 for (state
->bucket
= start
; state
->bucket
<= afinfo
->udp_table
->mask
;
2795 struct udp_hslot
*hslot
= &afinfo
->udp_table
->hash
[state
->bucket
];
2797 if (hlist_empty(&hslot
->head
))
2800 spin_lock_bh(&hslot
->lock
);
2801 sk_for_each(sk
, &hslot
->head
) {
2802 if (!net_eq(sock_net(sk
), net
))
2804 if (sk
->sk_family
== afinfo
->family
)
2807 spin_unlock_bh(&hslot
->lock
);
2814 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
2816 struct udp_seq_afinfo
*afinfo
= PDE_DATA(file_inode(seq
->file
));
2817 struct udp_iter_state
*state
= seq
->private;
2818 struct net
*net
= seq_file_net(seq
);
2822 } while (sk
&& (!net_eq(sock_net(sk
), net
) || sk
->sk_family
!= afinfo
->family
));
2825 if (state
->bucket
<= afinfo
->udp_table
->mask
)
2826 spin_unlock_bh(&afinfo
->udp_table
->hash
[state
->bucket
].lock
);
2827 return udp_get_first(seq
, state
->bucket
+ 1);
2832 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
2834 struct sock
*sk
= udp_get_first(seq
, 0);
2837 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
2839 return pos
? NULL
: sk
;
2842 void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2844 struct udp_iter_state
*state
= seq
->private;
2845 state
->bucket
= MAX_UDP_PORTS
;
2847 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
2849 EXPORT_SYMBOL(udp_seq_start
);
2851 void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2855 if (v
== SEQ_START_TOKEN
)
2856 sk
= udp_get_idx(seq
, 0);
2858 sk
= udp_get_next(seq
, v
);
2863 EXPORT_SYMBOL(udp_seq_next
);
2865 void udp_seq_stop(struct seq_file
*seq
, void *v
)
2867 struct udp_seq_afinfo
*afinfo
= PDE_DATA(file_inode(seq
->file
));
2868 struct udp_iter_state
*state
= seq
->private;
2870 if (state
->bucket
<= afinfo
->udp_table
->mask
)
2871 spin_unlock_bh(&afinfo
->udp_table
->hash
[state
->bucket
].lock
);
2873 EXPORT_SYMBOL(udp_seq_stop
);
2875 /* ------------------------------------------------------------------------ */
2876 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
2879 struct inet_sock
*inet
= inet_sk(sp
);
2880 __be32 dest
= inet
->inet_daddr
;
2881 __be32 src
= inet
->inet_rcv_saddr
;
2882 __u16 destp
= ntohs(inet
->inet_dport
);
2883 __u16 srcp
= ntohs(inet
->inet_sport
);
2885 seq_printf(f
, "%5d: %08X:%04X %08X:%04X"
2886 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2887 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
2888 sk_wmem_alloc_get(sp
),
2891 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sp
)),
2893 refcount_read(&sp
->sk_refcnt
), sp
,
2894 atomic_read(&sp
->sk_drops
));
2897 int udp4_seq_show(struct seq_file
*seq
, void *v
)
2899 seq_setwidth(seq
, 127);
2900 if (v
== SEQ_START_TOKEN
)
2901 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2902 "rx_queue tr tm->when retrnsmt uid timeout "
2903 "inode ref pointer drops");
2905 struct udp_iter_state
*state
= seq
->private;
2907 udp4_format_sock(v
, seq
, state
->bucket
);
2913 const struct seq_operations udp_seq_ops
= {
2914 .start
= udp_seq_start
,
2915 .next
= udp_seq_next
,
2916 .stop
= udp_seq_stop
,
2917 .show
= udp4_seq_show
,
2919 EXPORT_SYMBOL(udp_seq_ops
);
2921 static struct udp_seq_afinfo udp4_seq_afinfo
= {
2923 .udp_table
= &udp_table
,
2926 static int __net_init
udp4_proc_init_net(struct net
*net
)
2928 if (!proc_create_net_data("udp", 0444, net
->proc_net
, &udp_seq_ops
,
2929 sizeof(struct udp_iter_state
), &udp4_seq_afinfo
))
2934 static void __net_exit
udp4_proc_exit_net(struct net
*net
)
2936 remove_proc_entry("udp", net
->proc_net
);
2939 static struct pernet_operations udp4_net_ops
= {
2940 .init
= udp4_proc_init_net
,
2941 .exit
= udp4_proc_exit_net
,
2944 int __init
udp4_proc_init(void)
2946 return register_pernet_subsys(&udp4_net_ops
);
2949 void udp4_proc_exit(void)
2951 unregister_pernet_subsys(&udp4_net_ops
);
2953 #endif /* CONFIG_PROC_FS */
2955 static __initdata
unsigned long uhash_entries
;
2956 static int __init
set_uhash_entries(char *str
)
2963 ret
= kstrtoul(str
, 0, &uhash_entries
);
2967 if (uhash_entries
&& uhash_entries
< UDP_HTABLE_SIZE_MIN
)
2968 uhash_entries
= UDP_HTABLE_SIZE_MIN
;
2971 __setup("uhash_entries=", set_uhash_entries
);
2973 void __init
udp_table_init(struct udp_table
*table
, const char *name
)
2977 table
->hash
= alloc_large_system_hash(name
,
2978 2 * sizeof(struct udp_hslot
),
2980 21, /* one slot per 2 MB */
2984 UDP_HTABLE_SIZE_MIN
,
2987 table
->hash2
= table
->hash
+ (table
->mask
+ 1);
2988 for (i
= 0; i
<= table
->mask
; i
++) {
2989 INIT_HLIST_HEAD(&table
->hash
[i
].head
);
2990 table
->hash
[i
].count
= 0;
2991 spin_lock_init(&table
->hash
[i
].lock
);
2993 for (i
= 0; i
<= table
->mask
; i
++) {
2994 INIT_HLIST_HEAD(&table
->hash2
[i
].head
);
2995 table
->hash2
[i
].count
= 0;
2996 spin_lock_init(&table
->hash2
[i
].lock
);
3000 u32
udp_flow_hashrnd(void)
3002 static u32 hashrnd __read_mostly
;
3004 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
3008 EXPORT_SYMBOL(udp_flow_hashrnd
);
3010 static void __udp_sysctl_init(struct net
*net
)
3012 net
->ipv4
.sysctl_udp_rmem_min
= SK_MEM_QUANTUM
;
3013 net
->ipv4
.sysctl_udp_wmem_min
= SK_MEM_QUANTUM
;
3015 #ifdef CONFIG_NET_L3_MASTER_DEV
3016 net
->ipv4
.sysctl_udp_l3mdev_accept
= 0;
3020 static int __net_init
udp_sysctl_init(struct net
*net
)
3022 __udp_sysctl_init(net
);
3026 static struct pernet_operations __net_initdata udp_sysctl_ops
= {
3027 .init
= udp_sysctl_init
,
3030 void __init
udp_init(void)
3032 unsigned long limit
;
3035 udp_table_init(&udp_table
, "UDP");
3036 limit
= nr_free_buffer_pages() / 8;
3037 limit
= max(limit
, 128UL);
3038 sysctl_udp_mem
[0] = limit
/ 4 * 3;
3039 sysctl_udp_mem
[1] = limit
;
3040 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
3042 __udp_sysctl_init(&init_net
);
3044 /* 16 spinlocks per cpu */
3045 udp_busylocks_log
= ilog2(nr_cpu_ids
) + 4;
3046 udp_busylocks
= kmalloc(sizeof(spinlock_t
) << udp_busylocks_log
,
3049 panic("UDP: failed to alloc udp_busylocks\n");
3050 for (i
= 0; i
< (1U << udp_busylocks_log
); i
++)
3051 spin_lock_init(udp_busylocks
+ i
);
3053 if (register_pernet_subsys(&udp_sysctl_ops
))
3054 panic("UDP: failed to init sysctl parameters.\n");