2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Hirokazu Takahashi, <taka@valinux.co.jp>
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
21 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
23 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
58 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71 * James Chapman : Add L2TP encapsulation type.
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
80 #define pr_fmt(fmt) "UDP: " fmt
82 #include <linux/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/bootmem.h>
85 #include <linux/highmem.h>
86 #include <linux/swap.h>
87 #include <linux/types.h>
88 #include <linux/fcntl.h>
89 #include <linux/module.h>
90 #include <linux/socket.h>
91 #include <linux/sockios.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
95 #include <linux/errno.h>
96 #include <linux/timer.h>
98 #include <linux/inet.h>
99 #include <linux/netdevice.h>
100 #include <linux/slab.h>
101 #include <net/tcp_states.h>
102 #include <linux/skbuff.h>
103 #include <linux/proc_fs.h>
104 #include <linux/seq_file.h>
105 #include <net/net_namespace.h>
106 #include <net/icmp.h>
107 #include <net/inet_hashtables.h>
108 #include <net/route.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <trace/events/udp.h>
112 #include <linux/static_key.h>
113 #include <trace/events/skb.h>
114 #include <net/busy_poll.h>
115 #include "udp_impl.h"
116 #include <net/sock_reuseport.h>
117 #include <net/addrconf.h>
119 struct udp_table udp_table __read_mostly
;
120 EXPORT_SYMBOL(udp_table
);
122 long sysctl_udp_mem
[3] __read_mostly
;
123 EXPORT_SYMBOL(sysctl_udp_mem
);
125 int sysctl_udp_rmem_min __read_mostly
;
126 EXPORT_SYMBOL(sysctl_udp_rmem_min
);
128 int sysctl_udp_wmem_min __read_mostly
;
129 EXPORT_SYMBOL(sysctl_udp_wmem_min
);
131 atomic_long_t udp_memory_allocated
;
132 EXPORT_SYMBOL(udp_memory_allocated
);
134 #define MAX_UDP_PORTS 65536
135 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
137 /* IPCB reference means this can not be used from early demux */
138 static bool udp_lib_exact_dif_match(struct net
*net
, struct sk_buff
*skb
)
140 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
141 if (!net
->ipv4
.sysctl_udp_l3mdev_accept
&&
142 skb
&& ipv4_l3mdev_skb(IPCB(skb
)->flags
))
148 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
149 const struct udp_hslot
*hslot
,
150 unsigned long *bitmap
,
151 struct sock
*sk
, unsigned int log
)
154 kuid_t uid
= sock_i_uid(sk
);
156 sk_for_each(sk2
, &hslot
->head
) {
157 if (net_eq(sock_net(sk2
), net
) &&
159 (bitmap
|| udp_sk(sk2
)->udp_port_hash
== num
) &&
160 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
161 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
162 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
163 inet_rcv_saddr_equal(sk
, sk2
, true)) {
164 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
165 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
166 uid_eq(uid
, sock_i_uid(sk2
))) {
172 __set_bit(udp_sk(sk2
)->udp_port_hash
>> log
,
181 * Note: we still hold spinlock of primary hash chain, so no other writer
182 * can insert/delete a socket with local_port == num
184 static int udp_lib_lport_inuse2(struct net
*net
, __u16 num
,
185 struct udp_hslot
*hslot2
,
189 kuid_t uid
= sock_i_uid(sk
);
192 spin_lock(&hslot2
->lock
);
193 udp_portaddr_for_each_entry(sk2
, &hslot2
->head
) {
194 if (net_eq(sock_net(sk2
), net
) &&
196 (udp_sk(sk2
)->udp_port_hash
== num
) &&
197 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
198 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
199 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
200 inet_rcv_saddr_equal(sk
, sk2
, true)) {
201 if (sk2
->sk_reuseport
&& sk
->sk_reuseport
&&
202 !rcu_access_pointer(sk
->sk_reuseport_cb
) &&
203 uid_eq(uid
, sock_i_uid(sk2
))) {
211 spin_unlock(&hslot2
->lock
);
215 static int udp_reuseport_add_sock(struct sock
*sk
, struct udp_hslot
*hslot
)
217 struct net
*net
= sock_net(sk
);
218 kuid_t uid
= sock_i_uid(sk
);
221 sk_for_each(sk2
, &hslot
->head
) {
222 if (net_eq(sock_net(sk2
), net
) &&
224 sk2
->sk_family
== sk
->sk_family
&&
225 ipv6_only_sock(sk2
) == ipv6_only_sock(sk
) &&
226 (udp_sk(sk2
)->udp_port_hash
== udp_sk(sk
)->udp_port_hash
) &&
227 (sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
228 sk2
->sk_reuseport
&& uid_eq(uid
, sock_i_uid(sk2
)) &&
229 inet_rcv_saddr_equal(sk
, sk2
, false)) {
230 return reuseport_add_sock(sk
, sk2
);
234 /* Initial allocation may have already happened via setsockopt */
235 if (!rcu_access_pointer(sk
->sk_reuseport_cb
))
236 return reuseport_alloc(sk
);
241 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
243 * @sk: socket struct in question
244 * @snum: port number to look up
245 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
248 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
249 unsigned int hash2_nulladdr
)
251 struct udp_hslot
*hslot
, *hslot2
;
252 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
254 struct net
*net
= sock_net(sk
);
257 int low
, high
, remaining
;
259 unsigned short first
, last
;
260 DECLARE_BITMAP(bitmap
, PORTS_PER_CHAIN
);
262 inet_get_local_port_range(net
, &low
, &high
);
263 remaining
= (high
- low
) + 1;
265 rand
= prandom_u32();
266 first
= reciprocal_scale(rand
, remaining
) + low
;
268 * force rand to be an odd multiple of UDP_HTABLE_SIZE
270 rand
= (rand
| 1) * (udptable
->mask
+ 1);
271 last
= first
+ udptable
->mask
+ 1;
273 hslot
= udp_hashslot(udptable
, net
, first
);
274 bitmap_zero(bitmap
, PORTS_PER_CHAIN
);
275 spin_lock_bh(&hslot
->lock
);
276 udp_lib_lport_inuse(net
, snum
, hslot
, bitmap
, sk
,
281 * Iterate on all possible values of snum for this hash.
282 * Using steps of an odd multiple of UDP_HTABLE_SIZE
283 * give us randomization and full range coverage.
286 if (low
<= snum
&& snum
<= high
&&
287 !test_bit(snum
>> udptable
->log
, bitmap
) &&
288 !inet_is_local_reserved_port(net
, snum
))
291 } while (snum
!= first
);
292 spin_unlock_bh(&hslot
->lock
);
294 } while (++first
!= last
);
297 hslot
= udp_hashslot(udptable
, net
, snum
);
298 spin_lock_bh(&hslot
->lock
);
299 if (hslot
->count
> 10) {
301 unsigned int slot2
= udp_sk(sk
)->udp_portaddr_hash
^ snum
;
303 slot2
&= udptable
->mask
;
304 hash2_nulladdr
&= udptable
->mask
;
306 hslot2
= udp_hashslot2(udptable
, slot2
);
307 if (hslot
->count
< hslot2
->count
)
308 goto scan_primary_hash
;
310 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
, sk
);
311 if (!exist
&& (hash2_nulladdr
!= slot2
)) {
312 hslot2
= udp_hashslot2(udptable
, hash2_nulladdr
);
313 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
322 if (udp_lib_lport_inuse(net
, snum
, hslot
, NULL
, sk
, 0))
326 inet_sk(sk
)->inet_num
= snum
;
327 udp_sk(sk
)->udp_port_hash
= snum
;
328 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
329 if (sk_unhashed(sk
)) {
330 if (sk
->sk_reuseport
&&
331 udp_reuseport_add_sock(sk
, hslot
)) {
332 inet_sk(sk
)->inet_num
= 0;
333 udp_sk(sk
)->udp_port_hash
= 0;
334 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
338 sk_add_node_rcu(sk
, &hslot
->head
);
340 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
342 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
343 spin_lock(&hslot2
->lock
);
344 if (IS_ENABLED(CONFIG_IPV6
) && sk
->sk_reuseport
&&
345 sk
->sk_family
== AF_INET6
)
346 hlist_add_tail_rcu(&udp_sk(sk
)->udp_portaddr_node
,
349 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
352 spin_unlock(&hslot2
->lock
);
354 sock_set_flag(sk
, SOCK_RCU_FREE
);
357 spin_unlock_bh(&hslot
->lock
);
361 EXPORT_SYMBOL(udp_lib_get_port
);
363 static u32
udp4_portaddr_hash(const struct net
*net
, __be32 saddr
,
366 return jhash_1word((__force u32
)saddr
, net_hash_mix(net
)) ^ port
;
369 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
371 unsigned int hash2_nulladdr
=
372 udp4_portaddr_hash(sock_net(sk
), htonl(INADDR_ANY
), snum
);
373 unsigned int hash2_partial
=
374 udp4_portaddr_hash(sock_net(sk
), inet_sk(sk
)->inet_rcv_saddr
, 0);
376 /* precompute partial secondary hash */
377 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
378 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
381 static int compute_score(struct sock
*sk
, struct net
*net
,
382 __be32 saddr
, __be16 sport
,
383 __be32 daddr
, unsigned short hnum
, int dif
,
387 struct inet_sock
*inet
;
389 if (!net_eq(sock_net(sk
), net
) ||
390 udp_sk(sk
)->udp_port_hash
!= hnum
||
394 score
= (sk
->sk_family
== PF_INET
) ? 2 : 1;
397 if (inet
->inet_rcv_saddr
) {
398 if (inet
->inet_rcv_saddr
!= daddr
)
403 if (inet
->inet_daddr
) {
404 if (inet
->inet_daddr
!= saddr
)
409 if (inet
->inet_dport
) {
410 if (inet
->inet_dport
!= sport
)
415 if (sk
->sk_bound_dev_if
|| exact_dif
) {
416 if (sk
->sk_bound_dev_if
!= dif
)
420 if (sk
->sk_incoming_cpu
== raw_smp_processor_id())
425 static u32
udp_ehashfn(const struct net
*net
, const __be32 laddr
,
426 const __u16 lport
, const __be32 faddr
,
429 static u32 udp_ehash_secret __read_mostly
;
431 net_get_random_once(&udp_ehash_secret
, sizeof(udp_ehash_secret
));
433 return __inet_ehashfn(laddr
, lport
, faddr
, fport
,
434 udp_ehash_secret
+ net_hash_mix(net
));
437 /* called with rcu_read_lock() */
438 static struct sock
*udp4_lib_lookup2(struct net
*net
,
439 __be32 saddr
, __be16 sport
,
440 __be32 daddr
, unsigned int hnum
, int dif
, bool exact_dif
,
441 struct udp_hslot
*hslot2
,
444 struct sock
*sk
, *result
;
445 int score
, badness
, matches
= 0, reuseport
= 0;
450 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
451 score
= compute_score(sk
, net
, saddr
, sport
,
452 daddr
, hnum
, dif
, exact_dif
);
453 if (score
> badness
) {
454 reuseport
= sk
->sk_reuseport
;
456 hash
= udp_ehashfn(net
, daddr
, hnum
,
458 result
= reuseport_select_sock(sk
, hash
, skb
,
459 sizeof(struct udphdr
));
466 } else if (score
== badness
&& reuseport
) {
468 if (reciprocal_scale(hash
, matches
) == 0)
470 hash
= next_pseudo_random32(hash
);
476 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
477 * harder than this. -DaveM
479 struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
,
480 __be16 sport
, __be32 daddr
, __be16 dport
,
481 int dif
, struct udp_table
*udptable
, struct sk_buff
*skb
)
483 struct sock
*sk
, *result
;
484 unsigned short hnum
= ntohs(dport
);
485 unsigned int hash2
, slot2
, slot
= udp_hashfn(net
, hnum
, udptable
->mask
);
486 struct udp_hslot
*hslot2
, *hslot
= &udptable
->hash
[slot
];
487 bool exact_dif
= udp_lib_exact_dif_match(net
, skb
);
488 int score
, badness
, matches
= 0, reuseport
= 0;
491 if (hslot
->count
> 10) {
492 hash2
= udp4_portaddr_hash(net
, daddr
, hnum
);
493 slot2
= hash2
& udptable
->mask
;
494 hslot2
= &udptable
->hash2
[slot2
];
495 if (hslot
->count
< hslot2
->count
)
498 result
= udp4_lib_lookup2(net
, saddr
, sport
,
500 exact_dif
, hslot2
, skb
);
502 unsigned int old_slot2
= slot2
;
503 hash2
= udp4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
);
504 slot2
= hash2
& udptable
->mask
;
505 /* avoid searching the same slot again. */
506 if (unlikely(slot2
== old_slot2
))
509 hslot2
= &udptable
->hash2
[slot2
];
510 if (hslot
->count
< hslot2
->count
)
513 result
= udp4_lib_lookup2(net
, saddr
, sport
,
515 exact_dif
, hslot2
, skb
);
522 sk_for_each_rcu(sk
, &hslot
->head
) {
523 score
= compute_score(sk
, net
, saddr
, sport
,
524 daddr
, hnum
, dif
, exact_dif
);
525 if (score
> badness
) {
526 reuseport
= sk
->sk_reuseport
;
528 hash
= udp_ehashfn(net
, daddr
, hnum
,
530 result
= reuseport_select_sock(sk
, hash
, skb
,
531 sizeof(struct udphdr
));
538 } else if (score
== badness
&& reuseport
) {
540 if (reciprocal_scale(hash
, matches
) == 0)
542 hash
= next_pseudo_random32(hash
);
547 EXPORT_SYMBOL_GPL(__udp4_lib_lookup
);
549 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
550 __be16 sport
, __be16 dport
,
551 struct udp_table
*udptable
)
553 const struct iphdr
*iph
= ip_hdr(skb
);
555 return __udp4_lib_lookup(dev_net(skb
->dev
), iph
->saddr
, sport
,
556 iph
->daddr
, dport
, inet_iif(skb
),
560 struct sock
*udp4_lib_lookup_skb(struct sk_buff
*skb
,
561 __be16 sport
, __be16 dport
)
563 return __udp4_lib_lookup_skb(skb
, sport
, dport
, &udp_table
);
565 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb
);
567 /* Must be called under rcu_read_lock().
568 * Does increment socket refcount.
570 #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
571 IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \
572 IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
573 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
574 __be32 daddr
, __be16 dport
, int dif
)
578 sk
= __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
579 dif
, &udp_table
, NULL
);
580 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
584 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
587 static inline bool __udp_is_mcast_sock(struct net
*net
, struct sock
*sk
,
588 __be16 loc_port
, __be32 loc_addr
,
589 __be16 rmt_port
, __be32 rmt_addr
,
590 int dif
, unsigned short hnum
)
592 struct inet_sock
*inet
= inet_sk(sk
);
594 if (!net_eq(sock_net(sk
), net
) ||
595 udp_sk(sk
)->udp_port_hash
!= hnum
||
596 (inet
->inet_daddr
&& inet
->inet_daddr
!= rmt_addr
) ||
597 (inet
->inet_dport
!= rmt_port
&& inet
->inet_dport
) ||
598 (inet
->inet_rcv_saddr
&& inet
->inet_rcv_saddr
!= loc_addr
) ||
599 ipv6_only_sock(sk
) ||
600 (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
))
602 if (!ip_mc_sf_allow(sk
, loc_addr
, rmt_addr
, dif
))
608 * This routine is called by the ICMP module when it gets some
609 * sort of error condition. If err < 0 then the socket should
610 * be closed and the error returned to the user. If err > 0
611 * it's just the icmp type << 8 | icmp code.
612 * Header points to the ip header of the error packet. We move
613 * on past this. Then (as it used to claim before adjustment)
614 * header points to the first 8 bytes of the udp header. We need
615 * to find the appropriate port.
618 void __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct udp_table
*udptable
)
620 struct inet_sock
*inet
;
621 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
622 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
623 const int type
= icmp_hdr(skb
)->type
;
624 const int code
= icmp_hdr(skb
)->code
;
628 struct net
*net
= dev_net(skb
->dev
);
630 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
631 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
, udptable
,
634 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
635 return; /* No socket for error */
644 case ICMP_TIME_EXCEEDED
:
647 case ICMP_SOURCE_QUENCH
:
649 case ICMP_PARAMETERPROB
:
653 case ICMP_DEST_UNREACH
:
654 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
655 ipv4_sk_update_pmtu(skb
, sk
, info
);
656 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
664 if (code
<= NR_ICMP_UNREACH
) {
665 harderr
= icmp_err_convert
[code
].fatal
;
666 err
= icmp_err_convert
[code
].errno
;
670 ipv4_sk_redirect(skb
, sk
);
675 * RFC1122: OK. Passes ICMP errors back to application, as per
678 if (!inet
->recverr
) {
679 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
682 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
685 sk
->sk_error_report(sk
);
690 void udp_err(struct sk_buff
*skb
, u32 info
)
692 __udp4_lib_err(skb
, info
, &udp_table
);
696 * Throw away all pending data and cancel the corking. Socket is locked.
698 void udp_flush_pending_frames(struct sock
*sk
)
700 struct udp_sock
*up
= udp_sk(sk
);
705 ip_flush_pending_frames(sk
);
708 EXPORT_SYMBOL(udp_flush_pending_frames
);
711 * udp4_hwcsum - handle outgoing HW checksumming
712 * @skb: sk_buff containing the filled-in UDP header
713 * (checksum field must be zeroed out)
714 * @src: source IP address
715 * @dst: destination IP address
717 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
)
719 struct udphdr
*uh
= udp_hdr(skb
);
720 int offset
= skb_transport_offset(skb
);
721 int len
= skb
->len
- offset
;
725 if (!skb_has_frag_list(skb
)) {
727 * Only one fragment on the socket.
729 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
730 skb
->csum_offset
= offsetof(struct udphdr
, check
);
731 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
,
734 struct sk_buff
*frags
;
737 * HW-checksum won't work as there are two or more
738 * fragments on the socket so that all csums of sk_buffs
741 skb_walk_frags(skb
, frags
) {
742 csum
= csum_add(csum
, frags
->csum
);
746 csum
= skb_checksum(skb
, offset
, hlen
, csum
);
747 skb
->ip_summed
= CHECKSUM_NONE
;
749 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
751 uh
->check
= CSUM_MANGLED_0
;
754 EXPORT_SYMBOL_GPL(udp4_hwcsum
);
756 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
757 * for the simple case like when setting the checksum for a UDP tunnel.
759 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
760 __be32 saddr
, __be32 daddr
, int len
)
762 struct udphdr
*uh
= udp_hdr(skb
);
766 } else if (skb_is_gso(skb
)) {
767 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
768 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
770 uh
->check
= udp_v4_check(len
, saddr
, daddr
, lco_csum(skb
));
772 uh
->check
= CSUM_MANGLED_0
;
774 skb
->ip_summed
= CHECKSUM_PARTIAL
;
775 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
776 skb
->csum_offset
= offsetof(struct udphdr
, check
);
777 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
780 EXPORT_SYMBOL(udp_set_csum
);
782 static int udp_send_skb(struct sk_buff
*skb
, struct flowi4
*fl4
)
784 struct sock
*sk
= skb
->sk
;
785 struct inet_sock
*inet
= inet_sk(sk
);
788 int is_udplite
= IS_UDPLITE(sk
);
789 int offset
= skb_transport_offset(skb
);
790 int len
= skb
->len
- offset
;
794 * Create a UDP header
797 uh
->source
= inet
->inet_sport
;
798 uh
->dest
= fl4
->fl4_dport
;
799 uh
->len
= htons(len
);
802 if (is_udplite
) /* UDP-Lite */
803 csum
= udplite_csum(skb
);
805 else if (sk
->sk_no_check_tx
) { /* UDP csum disabled */
807 skb
->ip_summed
= CHECKSUM_NONE
;
810 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
812 udp4_hwcsum(skb
, fl4
->saddr
, fl4
->daddr
);
816 csum
= udp_csum(skb
);
818 /* add protocol-dependent pseudo-header */
819 uh
->check
= csum_tcpudp_magic(fl4
->saddr
, fl4
->daddr
, len
,
820 sk
->sk_protocol
, csum
);
822 uh
->check
= CSUM_MANGLED_0
;
825 err
= ip_send_skb(sock_net(sk
), skb
);
827 if (err
== -ENOBUFS
&& !inet
->recverr
) {
828 UDP_INC_STATS(sock_net(sk
),
829 UDP_MIB_SNDBUFERRORS
, is_udplite
);
833 UDP_INC_STATS(sock_net(sk
),
834 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
839 * Push out all pending data as one UDP datagram. Socket is locked.
841 int udp_push_pending_frames(struct sock
*sk
)
843 struct udp_sock
*up
= udp_sk(sk
);
844 struct inet_sock
*inet
= inet_sk(sk
);
845 struct flowi4
*fl4
= &inet
->cork
.fl
.u
.ip4
;
849 skb
= ip_finish_skb(sk
, fl4
);
853 err
= udp_send_skb(skb
, fl4
);
860 EXPORT_SYMBOL(udp_push_pending_frames
);
862 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
864 struct inet_sock
*inet
= inet_sk(sk
);
865 struct udp_sock
*up
= udp_sk(sk
);
866 struct flowi4 fl4_stack
;
869 struct ipcm_cookie ipc
;
870 struct rtable
*rt
= NULL
;
873 __be32 daddr
, faddr
, saddr
;
876 int err
, is_udplite
= IS_UDPLITE(sk
);
877 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
878 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
880 struct ip_options_data opt_copy
;
889 if (msg
->msg_flags
& MSG_OOB
) /* Mirror BSD error message compatibility */
897 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
899 fl4
= &inet
->cork
.fl
.u
.ip4
;
902 * There are pending frames.
903 * The socket lock must be held while it's corked.
906 if (likely(up
->pending
)) {
907 if (unlikely(up
->pending
!= AF_INET
)) {
915 ulen
+= sizeof(struct udphdr
);
918 * Get and verify the address.
921 DECLARE_SOCKADDR(struct sockaddr_in
*, usin
, msg
->msg_name
);
922 if (msg
->msg_namelen
< sizeof(*usin
))
924 if (usin
->sin_family
!= AF_INET
) {
925 if (usin
->sin_family
!= AF_UNSPEC
)
926 return -EAFNOSUPPORT
;
929 daddr
= usin
->sin_addr
.s_addr
;
930 dport
= usin
->sin_port
;
934 if (sk
->sk_state
!= TCP_ESTABLISHED
)
935 return -EDESTADDRREQ
;
936 daddr
= inet
->inet_daddr
;
937 dport
= inet
->inet_dport
;
938 /* Open fast path for connected socket.
939 Route will not be used, if at least one option is set.
944 ipc
.sockc
.tsflags
= sk
->sk_tsflags
;
945 ipc
.addr
= inet
->inet_saddr
;
946 ipc
.oif
= sk
->sk_bound_dev_if
;
948 if (msg
->msg_controllen
) {
949 err
= ip_cmsg_send(sk
, msg
, &ipc
, sk
->sk_family
== AF_INET6
);
959 struct ip_options_rcu
*inet_opt
;
962 inet_opt
= rcu_dereference(inet
->inet_opt
);
964 memcpy(&opt_copy
, inet_opt
,
965 sizeof(*inet_opt
) + inet_opt
->opt
.optlen
);
966 ipc
.opt
= &opt_copy
.opt
;
972 ipc
.addr
= faddr
= daddr
;
974 sock_tx_timestamp(sk
, ipc
.sockc
.tsflags
, &ipc
.tx_flags
);
976 if (ipc
.opt
&& ipc
.opt
->opt
.srr
) {
979 faddr
= ipc
.opt
->opt
.faddr
;
982 tos
= get_rttos(&ipc
, inet
);
983 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
984 (msg
->msg_flags
& MSG_DONTROUTE
) ||
985 (ipc
.opt
&& ipc
.opt
->opt
.is_strictroute
)) {
990 if (ipv4_is_multicast(daddr
)) {
992 ipc
.oif
= inet
->mc_index
;
994 saddr
= inet
->mc_addr
;
997 ipc
.oif
= inet
->uc_index
;
1000 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
1003 struct net
*net
= sock_net(sk
);
1004 __u8 flow_flags
= inet_sk_flowi_flags(sk
);
1008 flowi4_init_output(fl4
, ipc
.oif
, sk
->sk_mark
, tos
,
1009 RT_SCOPE_UNIVERSE
, sk
->sk_protocol
,
1011 faddr
, saddr
, dport
, inet
->inet_sport
,
1014 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
1015 rt
= ip_route_output_flow(net
, fl4
, sk
);
1019 if (err
== -ENETUNREACH
)
1020 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
1025 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
1026 !sock_flag(sk
, SOCK_BROADCAST
))
1029 sk_dst_set(sk
, dst_clone(&rt
->dst
));
1032 if (msg
->msg_flags
&MSG_CONFIRM
)
1038 daddr
= ipc
.addr
= fl4
->daddr
;
1040 /* Lockless fast path for the non-corking case. */
1042 skb
= ip_make_skb(sk
, fl4
, getfrag
, msg
, ulen
,
1043 sizeof(struct udphdr
), &ipc
, &rt
,
1046 if (!IS_ERR_OR_NULL(skb
))
1047 err
= udp_send_skb(skb
, fl4
);
1052 if (unlikely(up
->pending
)) {
1053 /* The socket is already corked while preparing it. */
1054 /* ... which is an evident application bug. --ANK */
1057 net_dbg_ratelimited("cork app bug 2\n");
1062 * Now cork the socket to pend data.
1064 fl4
= &inet
->cork
.fl
.u
.ip4
;
1067 fl4
->fl4_dport
= dport
;
1068 fl4
->fl4_sport
= inet
->inet_sport
;
1069 up
->pending
= AF_INET
;
1073 err
= ip_append_data(sk
, fl4
, getfrag
, msg
, ulen
,
1074 sizeof(struct udphdr
), &ipc
, &rt
,
1075 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1077 udp_flush_pending_frames(sk
);
1079 err
= udp_push_pending_frames(sk
);
1080 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1091 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1092 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1093 * we don't have a good statistic (IpOutDiscards but it can be too many
1094 * things). We could add another new stat but at least for now that
1095 * seems like overkill.
1097 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1098 UDP_INC_STATS(sock_net(sk
),
1099 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1104 if (msg
->msg_flags
& MSG_PROBE
)
1105 dst_confirm_neigh(&rt
->dst
, &fl4
->daddr
);
1106 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1107 goto back_from_confirm
;
1111 EXPORT_SYMBOL(udp_sendmsg
);
1113 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1114 size_t size
, int flags
)
1116 struct inet_sock
*inet
= inet_sk(sk
);
1117 struct udp_sock
*up
= udp_sk(sk
);
1120 if (flags
& MSG_SENDPAGE_NOTLAST
)
1124 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
1126 /* Call udp_sendmsg to specify destination address which
1127 * sendpage interface can't pass.
1128 * This will succeed only when the socket is connected.
1130 ret
= udp_sendmsg(sk
, &msg
, 0);
1137 if (unlikely(!up
->pending
)) {
1140 net_dbg_ratelimited("udp cork app bug 3\n");
1144 ret
= ip_append_page(sk
, &inet
->cork
.fl
.u
.ip4
,
1145 page
, offset
, size
, flags
);
1146 if (ret
== -EOPNOTSUPP
) {
1148 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
1152 udp_flush_pending_frames(sk
);
1157 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
1158 ret
= udp_push_pending_frames(sk
);
1166 #if BITS_PER_LONG == 64
1167 static void udp_set_dev_scratch(struct sk_buff
*skb
)
1169 struct udp_dev_scratch
*scratch
;
1171 BUILD_BUG_ON(sizeof(struct udp_dev_scratch
) > sizeof(long));
1172 scratch
= (struct udp_dev_scratch
*)&skb
->dev_scratch
;
1173 scratch
->truesize
= skb
->truesize
;
1174 scratch
->len
= skb
->len
;
1175 scratch
->csum_unnecessary
= !!skb_csum_unnecessary(skb
);
1176 scratch
->is_linear
= !skb_is_nonlinear(skb
);
1179 static int udp_skb_truesize(struct sk_buff
*skb
)
1181 return ((struct udp_dev_scratch
*)&skb
->dev_scratch
)->truesize
;
1184 static void udp_set_dev_scratch(struct sk_buff
*skb
)
1186 skb
->dev_scratch
= skb
->truesize
;
1189 static int udp_skb_truesize(struct sk_buff
*skb
)
1191 return skb
->dev_scratch
;
1195 /* fully reclaim rmem/fwd memory allocated for skb */
1196 static void udp_rmem_release(struct sock
*sk
, int size
, int partial
,
1197 bool rx_queue_lock_held
)
1199 struct udp_sock
*up
= udp_sk(sk
);
1200 struct sk_buff_head
*sk_queue
;
1203 if (likely(partial
)) {
1204 up
->forward_deficit
+= size
;
1205 size
= up
->forward_deficit
;
1206 if (size
< (sk
->sk_rcvbuf
>> 2) &&
1207 !skb_queue_empty(&up
->reader_queue
))
1210 size
+= up
->forward_deficit
;
1212 up
->forward_deficit
= 0;
1214 /* acquire the sk_receive_queue for fwd allocated memory scheduling,
1215 * if the called don't held it already
1217 sk_queue
= &sk
->sk_receive_queue
;
1218 if (!rx_queue_lock_held
)
1219 spin_lock(&sk_queue
->lock
);
1222 sk
->sk_forward_alloc
+= size
;
1223 amt
= (sk
->sk_forward_alloc
- partial
) & ~(SK_MEM_QUANTUM
- 1);
1224 sk
->sk_forward_alloc
-= amt
;
1227 __sk_mem_reduce_allocated(sk
, amt
>> SK_MEM_QUANTUM_SHIFT
);
1229 atomic_sub(size
, &sk
->sk_rmem_alloc
);
1231 /* this can save us from acquiring the rx queue lock on next receive */
1232 skb_queue_splice_tail_init(sk_queue
, &up
->reader_queue
);
1234 if (!rx_queue_lock_held
)
1235 spin_unlock(&sk_queue
->lock
);
1238 /* Note: called with reader_queue.lock held.
1239 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
1240 * This avoids a cache line miss while receive_queue lock is held.
1241 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
1243 void udp_skb_destructor(struct sock
*sk
, struct sk_buff
*skb
)
1245 prefetch(&skb
->data
);
1246 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, false);
1248 EXPORT_SYMBOL(udp_skb_destructor
);
1250 /* as above, but the caller held the rx queue lock, too */
1251 static void udp_skb_dtor_locked(struct sock
*sk
, struct sk_buff
*skb
)
1253 prefetch(&skb
->data
);
1254 udp_rmem_release(sk
, udp_skb_truesize(skb
), 1, true);
1257 /* Idea of busylocks is to let producers grab an extra spinlock
1258 * to relieve pressure on the receive_queue spinlock shared by consumer.
1259 * Under flood, this means that only one producer can be in line
1260 * trying to acquire the receive_queue spinlock.
1261 * These busylock can be allocated on a per cpu manner, instead of a
1262 * per socket one (that would consume a cache line per socket)
1264 static int udp_busylocks_log __read_mostly
;
1265 static spinlock_t
*udp_busylocks __read_mostly
;
1267 static spinlock_t
*busylock_acquire(void *ptr
)
1271 busy
= udp_busylocks
+ hash_ptr(ptr
, udp_busylocks_log
);
1276 static void busylock_release(spinlock_t
*busy
)
1282 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
)
1284 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
1285 int rmem
, delta
, amt
, err
= -ENOMEM
;
1286 spinlock_t
*busy
= NULL
;
1289 /* try to avoid the costly atomic add/sub pair when the receive
1290 * queue is full; always allow at least a packet
1292 rmem
= atomic_read(&sk
->sk_rmem_alloc
);
1293 if (rmem
> sk
->sk_rcvbuf
)
1296 /* Under mem pressure, it might be helpful to help udp_recvmsg()
1297 * having linear skbs :
1298 * - Reduce memory overhead and thus increase receive queue capacity
1299 * - Less cache line misses at copyout() time
1300 * - Less work at consume_skb() (less alien page frag freeing)
1302 if (rmem
> (sk
->sk_rcvbuf
>> 1)) {
1305 busy
= busylock_acquire(sk
);
1307 size
= skb
->truesize
;
1308 udp_set_dev_scratch(skb
);
1310 /* we drop only if the receive buf is full and the receive
1311 * queue contains some other skb
1313 rmem
= atomic_add_return(size
, &sk
->sk_rmem_alloc
);
1314 if (rmem
> (size
+ sk
->sk_rcvbuf
))
1317 spin_lock(&list
->lock
);
1318 if (size
>= sk
->sk_forward_alloc
) {
1319 amt
= sk_mem_pages(size
);
1320 delta
= amt
<< SK_MEM_QUANTUM_SHIFT
;
1321 if (!__sk_mem_raise_allocated(sk
, delta
, amt
, SK_MEM_RECV
)) {
1323 spin_unlock(&list
->lock
);
1327 sk
->sk_forward_alloc
+= delta
;
1330 sk
->sk_forward_alloc
-= size
;
1332 /* no need to setup a destructor, we will explicitly release the
1333 * forward allocated memory on dequeue
1335 sock_skb_set_dropcount(sk
, skb
);
1337 __skb_queue_tail(list
, skb
);
1338 spin_unlock(&list
->lock
);
1340 if (!sock_flag(sk
, SOCK_DEAD
))
1341 sk
->sk_data_ready(sk
);
1343 busylock_release(busy
);
1347 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
1350 atomic_inc(&sk
->sk_drops
);
1351 busylock_release(busy
);
1354 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb
);
1356 void udp_destruct_sock(struct sock
*sk
)
1358 /* reclaim completely the forward allocated memory */
1359 struct udp_sock
*up
= udp_sk(sk
);
1360 unsigned int total
= 0;
1361 struct sk_buff
*skb
;
1363 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &up
->reader_queue
);
1364 while ((skb
= __skb_dequeue(&up
->reader_queue
)) != NULL
) {
1365 total
+= skb
->truesize
;
1368 udp_rmem_release(sk
, total
, 0, true);
1370 inet_sock_destruct(sk
);
1372 EXPORT_SYMBOL_GPL(udp_destruct_sock
);
1374 int udp_init_sock(struct sock
*sk
)
1376 skb_queue_head_init(&udp_sk(sk
)->reader_queue
);
1377 sk
->sk_destruct
= udp_destruct_sock
;
1380 EXPORT_SYMBOL_GPL(udp_init_sock
);
1382 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1384 if (unlikely(READ_ONCE(sk
->sk_peek_off
) >= 0)) {
1385 bool slow
= lock_sock_fast(sk
);
1387 sk_peek_offset_bwd(sk
, len
);
1388 unlock_sock_fast(sk
, slow
);
1391 /* we cleared the head states previously only if the skb lacks any IP
1392 * options, see __udp_queue_rcv_skb().
1394 if (unlikely(IPCB(skb
)->opt
.optlen
> 0))
1395 skb_release_head_state(skb
);
1396 consume_stateless_skb(skb
);
1398 EXPORT_SYMBOL_GPL(skb_consume_udp
);
1400 static struct sk_buff
*__first_packet_length(struct sock
*sk
,
1401 struct sk_buff_head
*rcvq
,
1404 struct sk_buff
*skb
;
1406 while ((skb
= skb_peek(rcvq
)) != NULL
) {
1407 if (udp_lib_checksum_complete(skb
)) {
1408 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
,
1410 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
1412 atomic_inc(&sk
->sk_drops
);
1413 __skb_unlink(skb
, rcvq
);
1414 *total
+= skb
->truesize
;
1417 /* the csum related bits could be changed, refresh
1420 udp_set_dev_scratch(skb
);
1428 * first_packet_length - return length of first packet in receive queue
1431 * Drops all bad checksum frames, until a valid one is found.
1432 * Returns the length of found skb, or -1 if none is found.
1434 static int first_packet_length(struct sock
*sk
)
1436 struct sk_buff_head
*rcvq
= &udp_sk(sk
)->reader_queue
;
1437 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1438 struct sk_buff
*skb
;
1442 spin_lock_bh(&rcvq
->lock
);
1443 skb
= __first_packet_length(sk
, rcvq
, &total
);
1444 if (!skb
&& !skb_queue_empty(sk_queue
)) {
1445 spin_lock(&sk_queue
->lock
);
1446 skb_queue_splice_tail_init(sk_queue
, rcvq
);
1447 spin_unlock(&sk_queue
->lock
);
1449 skb
= __first_packet_length(sk
, rcvq
, &total
);
1451 res
= skb
? skb
->len
: -1;
1453 udp_rmem_release(sk
, total
, 1, false);
1454 spin_unlock_bh(&rcvq
->lock
);
1459 * IOCTL requests applicable to the UDP protocol
1462 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1467 int amount
= sk_wmem_alloc_get(sk
);
1469 return put_user(amount
, (int __user
*)arg
);
1474 int amount
= max_t(int, 0, first_packet_length(sk
));
1476 return put_user(amount
, (int __user
*)arg
);
1480 return -ENOIOCTLCMD
;
1485 EXPORT_SYMBOL(udp_ioctl
);
1487 struct sk_buff
*__skb_recv_udp(struct sock
*sk
, unsigned int flags
,
1488 int noblock
, int *peeked
, int *off
, int *err
)
1490 struct sk_buff_head
*sk_queue
= &sk
->sk_receive_queue
;
1491 struct sk_buff_head
*queue
;
1492 struct sk_buff
*last
;
1496 queue
= &udp_sk(sk
)->reader_queue
;
1497 flags
|= noblock
? MSG_DONTWAIT
: 0;
1498 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1500 struct sk_buff
*skb
;
1502 error
= sock_error(sk
);
1509 spin_lock_bh(&queue
->lock
);
1510 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
,
1515 spin_unlock_bh(&queue
->lock
);
1519 if (skb_queue_empty(sk_queue
)) {
1520 spin_unlock_bh(&queue
->lock
);
1524 /* refill the reader queue and walk it again
1525 * keep both queues locked to avoid re-acquiring
1526 * the sk_receive_queue lock if fwd memory scheduling
1529 spin_lock(&sk_queue
->lock
);
1530 skb_queue_splice_tail_init(sk_queue
, queue
);
1532 skb
= __skb_try_recv_from_queue(sk
, queue
, flags
,
1533 udp_skb_dtor_locked
,
1536 spin_unlock(&sk_queue
->lock
);
1537 spin_unlock_bh(&queue
->lock
);
1542 if (!sk_can_busy_loop(sk
))
1545 sk_busy_loop(sk
, flags
& MSG_DONTWAIT
);
1546 } while (!skb_queue_empty(sk_queue
));
1548 /* sk_queue is empty, reader_queue may contain peeked packets */
1550 !__skb_wait_for_more_packets(sk
, &error
, &timeo
,
1551 (struct sk_buff
*)sk_queue
));
1556 EXPORT_SYMBOL_GPL(__skb_recv_udp
);
1559 * This should be easy, if there is something there we
1560 * return it, otherwise we block.
1563 int udp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int noblock
,
1564 int flags
, int *addr_len
)
1566 struct inet_sock
*inet
= inet_sk(sk
);
1567 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
1568 struct sk_buff
*skb
;
1569 unsigned int ulen
, copied
;
1570 int peeked
, peeking
, off
;
1572 int is_udplite
= IS_UDPLITE(sk
);
1573 bool checksum_valid
= false;
1575 if (flags
& MSG_ERRQUEUE
)
1576 return ip_recv_error(sk
, msg
, len
, addr_len
);
1579 peeking
= off
= sk_peek_offset(sk
, flags
);
1580 skb
= __skb_recv_udp(sk
, flags
, noblock
, &peeked
, &off
, &err
);
1584 ulen
= udp_skb_len(skb
);
1586 if (copied
> ulen
- off
)
1587 copied
= ulen
- off
;
1588 else if (copied
< ulen
)
1589 msg
->msg_flags
|= MSG_TRUNC
;
1592 * If checksum is needed at all, try to do it while copying the
1593 * data. If the data is truncated, or if we only want a partial
1594 * coverage checksum (UDP-Lite), do it before the copy.
1597 if (copied
< ulen
|| peeking
||
1598 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
1599 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
1600 !__udp_lib_checksum_complete(skb
);
1601 if (!checksum_valid
)
1605 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
1606 if (udp_skb_is_linear(skb
))
1607 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
1609 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
1611 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
1617 if (unlikely(err
)) {
1619 atomic_inc(&sk
->sk_drops
);
1620 UDP_INC_STATS(sock_net(sk
),
1621 UDP_MIB_INERRORS
, is_udplite
);
1628 UDP_INC_STATS(sock_net(sk
),
1629 UDP_MIB_INDATAGRAMS
, is_udplite
);
1631 sock_recv_ts_and_drops(msg
, sk
, skb
);
1633 /* Copy the address. */
1635 sin
->sin_family
= AF_INET
;
1636 sin
->sin_port
= udp_hdr(skb
)->source
;
1637 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
1638 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
1639 *addr_len
= sizeof(*sin
);
1641 if (inet
->cmsg_flags
)
1642 ip_cmsg_recv_offset(msg
, sk
, skb
, sizeof(struct udphdr
), off
);
1645 if (flags
& MSG_TRUNC
)
1648 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
1652 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
1653 udp_skb_destructor
)) {
1654 UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1655 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1659 /* starting over for a new packet, but check if we need to yield */
1661 msg
->msg_flags
&= ~MSG_TRUNC
;
1665 int __udp_disconnect(struct sock
*sk
, int flags
)
1667 struct inet_sock
*inet
= inet_sk(sk
);
1669 * 1003.1g - break association.
1672 sk
->sk_state
= TCP_CLOSE
;
1673 inet
->inet_daddr
= 0;
1674 inet
->inet_dport
= 0;
1675 sock_rps_reset_rxhash(sk
);
1676 sk
->sk_bound_dev_if
= 0;
1677 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
1678 inet_reset_saddr(sk
);
1680 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
1681 sk
->sk_prot
->unhash(sk
);
1682 inet
->inet_sport
= 0;
1687 EXPORT_SYMBOL(__udp_disconnect
);
1689 int udp_disconnect(struct sock
*sk
, int flags
)
1692 __udp_disconnect(sk
, flags
);
1696 EXPORT_SYMBOL(udp_disconnect
);
1698 void udp_lib_unhash(struct sock
*sk
)
1700 if (sk_hashed(sk
)) {
1701 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1702 struct udp_hslot
*hslot
, *hslot2
;
1704 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1705 udp_sk(sk
)->udp_port_hash
);
1706 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1708 spin_lock_bh(&hslot
->lock
);
1709 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1710 reuseport_detach_sock(sk
);
1711 if (sk_del_node_init_rcu(sk
)) {
1713 inet_sk(sk
)->inet_num
= 0;
1714 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
1716 spin_lock(&hslot2
->lock
);
1717 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1719 spin_unlock(&hslot2
->lock
);
1721 spin_unlock_bh(&hslot
->lock
);
1724 EXPORT_SYMBOL(udp_lib_unhash
);
1727 * inet_rcv_saddr was changed, we must rehash secondary hash
1729 void udp_lib_rehash(struct sock
*sk
, u16 newhash
)
1731 if (sk_hashed(sk
)) {
1732 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1733 struct udp_hslot
*hslot
, *hslot2
, *nhslot2
;
1735 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1736 nhslot2
= udp_hashslot2(udptable
, newhash
);
1737 udp_sk(sk
)->udp_portaddr_hash
= newhash
;
1739 if (hslot2
!= nhslot2
||
1740 rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1741 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1742 udp_sk(sk
)->udp_port_hash
);
1743 /* we must lock primary chain too */
1744 spin_lock_bh(&hslot
->lock
);
1745 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1746 reuseport_detach_sock(sk
);
1748 if (hslot2
!= nhslot2
) {
1749 spin_lock(&hslot2
->lock
);
1750 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1752 spin_unlock(&hslot2
->lock
);
1754 spin_lock(&nhslot2
->lock
);
1755 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
1758 spin_unlock(&nhslot2
->lock
);
1761 spin_unlock_bh(&hslot
->lock
);
1765 EXPORT_SYMBOL(udp_lib_rehash
);
1767 static void udp_v4_rehash(struct sock
*sk
)
1769 u16 new_hash
= udp4_portaddr_hash(sock_net(sk
),
1770 inet_sk(sk
)->inet_rcv_saddr
,
1771 inet_sk(sk
)->inet_num
);
1772 udp_lib_rehash(sk
, new_hash
);
1775 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1779 if (inet_sk(sk
)->inet_daddr
) {
1780 sock_rps_save_rxhash(sk
, skb
);
1781 sk_mark_napi_id(sk
, skb
);
1782 sk_incoming_cpu_update(sk
);
1784 sk_mark_napi_id_once(sk
, skb
);
1787 /* At recvmsg() time we need skb->dst to process IP options-related
1788 * cmsg, elsewhere can we clear all pending head states while they are
1791 if (likely(IPCB(skb
)->opt
.optlen
== 0))
1792 skb_release_head_state(skb
);
1794 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
1796 int is_udplite
= IS_UDPLITE(sk
);
1798 /* Note that an ENOMEM error is charged twice */
1800 UDP_INC_STATS(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
1802 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1804 trace_udp_fail_queue_rcv_skb(rc
, sk
);
1811 static struct static_key udp_encap_needed __read_mostly
;
1812 void udp_encap_enable(void)
1814 if (!static_key_enabled(&udp_encap_needed
))
1815 static_key_slow_inc(&udp_encap_needed
);
1817 EXPORT_SYMBOL(udp_encap_enable
);
1822 * >0: "udp encap" protocol resubmission
1824 * Note that in the success and error cases, the skb is assumed to
1825 * have either been requeued or freed.
1827 static int udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1829 struct udp_sock
*up
= udp_sk(sk
);
1830 int is_udplite
= IS_UDPLITE(sk
);
1833 * Charge it to the socket, dropping if the queue is full.
1835 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1839 if (static_key_false(&udp_encap_needed
) && up
->encap_type
) {
1840 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
1843 * This is an encapsulation socket so pass the skb to
1844 * the socket's udp_encap_rcv() hook. Otherwise, just
1845 * fall through and pass this up the UDP socket.
1846 * up->encap_rcv() returns the following value:
1847 * =0 if skb was successfully passed to the encap
1848 * handler or was discarded by it.
1849 * >0 if skb should be passed on to UDP.
1850 * <0 if skb should be resubmitted as proto -N
1853 /* if we're overly short, let UDP handle it */
1854 encap_rcv
= ACCESS_ONCE(up
->encap_rcv
);
1858 /* Verify checksum before giving to encap */
1859 if (udp_lib_checksum_complete(skb
))
1862 ret
= encap_rcv(sk
, skb
);
1864 __UDP_INC_STATS(sock_net(sk
),
1865 UDP_MIB_INDATAGRAMS
,
1871 /* FALLTHROUGH -- it's a UDP Packet */
1875 * UDP-Lite specific tests, ignored on UDP sockets
1877 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
1880 * MIB statistics other than incrementing the error count are
1881 * disabled for the following two types of errors: these depend
1882 * on the application settings, not on the functioning of the
1883 * protocol stack as such.
1885 * RFC 3828 here recommends (sec 3.3): "There should also be a
1886 * way ... to ... at least let the receiving application block
1887 * delivery of packets with coverage values less than a value
1888 * provided by the application."
1890 if (up
->pcrlen
== 0) { /* full coverage was set */
1891 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
1892 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
1895 /* The next case involves violating the min. coverage requested
1896 * by the receiver. This is subtle: if receiver wants x and x is
1897 * greater than the buffersize/MTU then receiver will complain
1898 * that it wants x while sender emits packets of smaller size y.
1899 * Therefore the above ...()->partial_cov statement is essential.
1901 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
1902 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
1903 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
1908 prefetch(&sk
->sk_rmem_alloc
);
1909 if (rcu_access_pointer(sk
->sk_filter
) &&
1910 udp_lib_checksum_complete(skb
))
1913 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
1916 udp_csum_pull_header(skb
);
1918 ipv4_pktinfo_prepare(sk
, skb
);
1919 return __udp_queue_rcv_skb(sk
, skb
);
1922 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1924 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1925 atomic_inc(&sk
->sk_drops
);
1930 /* For TCP sockets, sk_rx_dst is protected by socket lock
1931 * For UDP, we use xchg() to guard against concurrent changes.
1933 static void udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
1935 struct dst_entry
*old
;
1937 if (dst_hold_safe(dst
)) {
1938 old
= xchg(&sk
->sk_rx_dst
, dst
);
1944 * Multicasts and broadcasts go to each listener.
1946 * Note: called only from the BH handler context.
1948 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
1950 __be32 saddr
, __be32 daddr
,
1951 struct udp_table
*udptable
,
1954 struct sock
*sk
, *first
= NULL
;
1955 unsigned short hnum
= ntohs(uh
->dest
);
1956 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
1957 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
1958 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
1959 int dif
= skb
->dev
->ifindex
;
1960 struct hlist_node
*node
;
1961 struct sk_buff
*nskb
;
1964 hash2_any
= udp4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
) &
1966 hash2
= udp4_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
1968 hslot
= &udptable
->hash2
[hash2
];
1969 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
1972 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
1973 if (!__udp_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
1974 uh
->source
, saddr
, dif
, hnum
))
1981 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1983 if (unlikely(!nskb
)) {
1984 atomic_inc(&sk
->sk_drops
);
1985 __UDP_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
1987 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
,
1991 if (udp_queue_rcv_skb(sk
, nskb
) > 0)
1995 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
1996 if (use_hash2
&& hash2
!= hash2_any
) {
2002 if (udp_queue_rcv_skb(first
, skb
) > 0)
2006 __UDP_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
2007 proto
== IPPROTO_UDPLITE
);
2012 /* Initialize UDP checksum. If exited with zero value (success),
2013 * CHECKSUM_UNNECESSARY means, that no more checks are required.
2014 * Otherwise, csum completion requires chacksumming packet body,
2015 * including udp header and folding it to skb->csum.
2017 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
2022 UDP_SKB_CB(skb
)->partial_cov
= 0;
2023 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
2025 if (proto
== IPPROTO_UDPLITE
) {
2026 err
= udplite_checksum_init(skb
, uh
);
2031 /* Note, we are only interested in != 0 or == 0, thus the
2034 return (__force
int)skb_checksum_init_zero_check(skb
, proto
, uh
->check
,
2035 inet_compute_pseudo
);
2039 * All we need to do is get the socket, and then do a checksum.
2042 int __udp4_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
2047 unsigned short ulen
;
2048 struct rtable
*rt
= skb_rtable(skb
);
2049 __be32 saddr
, daddr
;
2050 struct net
*net
= dev_net(skb
->dev
);
2053 * Validate the packet.
2055 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
2056 goto drop
; /* No space for header. */
2059 ulen
= ntohs(uh
->len
);
2060 saddr
= ip_hdr(skb
)->saddr
;
2061 daddr
= ip_hdr(skb
)->daddr
;
2063 if (ulen
> skb
->len
)
2066 if (proto
== IPPROTO_UDP
) {
2067 /* UDP validates ulen. */
2068 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
2073 if (udp4_csum_init(skb
, uh
, proto
))
2076 sk
= skb_steal_sock(skb
);
2078 struct dst_entry
*dst
= skb_dst(skb
);
2081 if (unlikely(sk
->sk_rx_dst
!= dst
))
2082 udp_sk_rx_dst_set(sk
, dst
);
2084 ret
= udp_queue_rcv_skb(sk
, skb
);
2086 /* a return value > 0 means to resubmit the input, but
2087 * it wants the return to be -protocol, or 0
2094 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
2095 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
2096 saddr
, daddr
, udptable
, proto
);
2098 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
2102 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
2103 skb_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
2104 inet_compute_pseudo
);
2106 ret
= udp_queue_rcv_skb(sk
, skb
);
2108 /* a return value > 0 means to resubmit the input, but
2109 * it wants the return to be -protocol, or 0
2116 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2120 /* No socket. Drop packet silently, if checksum is wrong */
2121 if (udp_lib_checksum_complete(skb
))
2124 __UDP_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
2125 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
2128 * Hmm. We got an UDP packet to a port to which we
2129 * don't wanna listen. Ignore it.
2135 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
2136 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2137 &saddr
, ntohs(uh
->source
),
2139 &daddr
, ntohs(uh
->dest
));
2144 * RFC1122: OK. Discards the bad packet silently (as far as
2145 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
2147 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
2148 proto
== IPPROTO_UDPLITE
? "Lite" : "",
2149 &saddr
, ntohs(uh
->source
), &daddr
, ntohs(uh
->dest
),
2151 __UDP_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
2153 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
2158 /* We can only early demux multicast if there is a single matching socket.
2159 * If more than one socket found returns NULL
2161 static struct sock
*__udp4_lib_mcast_demux_lookup(struct net
*net
,
2162 __be16 loc_port
, __be32 loc_addr
,
2163 __be16 rmt_port
, __be32 rmt_addr
,
2166 struct sock
*sk
, *result
;
2167 unsigned short hnum
= ntohs(loc_port
);
2168 unsigned int slot
= udp_hashfn(net
, hnum
, udp_table
.mask
);
2169 struct udp_hslot
*hslot
= &udp_table
.hash
[slot
];
2171 /* Do not bother scanning a too big list */
2172 if (hslot
->count
> 10)
2176 sk_for_each_rcu(sk
, &hslot
->head
) {
2177 if (__udp_is_mcast_sock(net
, sk
, loc_port
, loc_addr
,
2178 rmt_port
, rmt_addr
, dif
, hnum
)) {
2188 /* For unicast we should only early demux connected sockets or we can
2189 * break forwarding setups. The chains here can be long so only check
2190 * if the first socket is an exact match and if not move on.
2192 static struct sock
*__udp4_lib_demux_lookup(struct net
*net
,
2193 __be16 loc_port
, __be32 loc_addr
,
2194 __be16 rmt_port
, __be32 rmt_addr
,
2197 unsigned short hnum
= ntohs(loc_port
);
2198 unsigned int hash2
= udp4_portaddr_hash(net
, loc_addr
, hnum
);
2199 unsigned int slot2
= hash2
& udp_table
.mask
;
2200 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
2201 INET_ADDR_COOKIE(acookie
, rmt_addr
, loc_addr
);
2202 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
2205 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
2206 if (INET_MATCH(sk
, net
, acookie
, rmt_addr
,
2207 loc_addr
, ports
, dif
))
2209 /* Only check first socket in chain */
2215 void udp_v4_early_demux(struct sk_buff
*skb
)
2217 struct net
*net
= dev_net(skb
->dev
);
2218 const struct iphdr
*iph
;
2219 const struct udphdr
*uh
;
2220 struct sock
*sk
= NULL
;
2221 struct dst_entry
*dst
;
2222 int dif
= skb
->dev
->ifindex
;
2225 /* validate the packet */
2226 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct udphdr
)))
2232 if (skb
->pkt_type
== PACKET_BROADCAST
||
2233 skb
->pkt_type
== PACKET_MULTICAST
) {
2234 struct in_device
*in_dev
= __in_dev_get_rcu(skb
->dev
);
2239 /* we are supposed to accept bcast packets */
2240 if (skb
->pkt_type
== PACKET_MULTICAST
) {
2241 ours
= ip_check_mc_rcu(in_dev
, iph
->daddr
, iph
->saddr
,
2247 sk
= __udp4_lib_mcast_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2248 uh
->source
, iph
->saddr
, dif
);
2249 } else if (skb
->pkt_type
== PACKET_HOST
) {
2250 sk
= __udp4_lib_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2251 uh
->source
, iph
->saddr
, dif
);
2254 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
2258 skb
->destructor
= sock_efree
;
2259 dst
= READ_ONCE(sk
->sk_rx_dst
);
2262 dst
= dst_check(dst
, 0);
2264 /* set noref for now.
2265 * any place which wants to hold dst has to call
2268 skb_dst_set_noref(skb
, dst
);
2272 int udp_rcv(struct sk_buff
*skb
)
2274 return __udp4_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
2277 void udp_destroy_sock(struct sock
*sk
)
2279 struct udp_sock
*up
= udp_sk(sk
);
2280 bool slow
= lock_sock_fast(sk
);
2281 udp_flush_pending_frames(sk
);
2282 unlock_sock_fast(sk
, slow
);
2283 if (static_key_false(&udp_encap_needed
) && up
->encap_type
) {
2284 void (*encap_destroy
)(struct sock
*sk
);
2285 encap_destroy
= ACCESS_ONCE(up
->encap_destroy
);
2292 * Socket option code for UDP
2294 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
2295 char __user
*optval
, unsigned int optlen
,
2296 int (*push_pending_frames
)(struct sock
*))
2298 struct udp_sock
*up
= udp_sk(sk
);
2301 int is_udplite
= IS_UDPLITE(sk
);
2303 if (optlen
< sizeof(int))
2306 if (get_user(val
, (int __user
*)optval
))
2309 valbool
= val
? 1 : 0;
2318 push_pending_frames(sk
);
2326 case UDP_ENCAP_ESPINUDP
:
2327 case UDP_ENCAP_ESPINUDP_NON_IKE
:
2328 up
->encap_rcv
= xfrm4_udp_encap_rcv
;
2330 case UDP_ENCAP_L2TPINUDP
:
2331 up
->encap_type
= val
;
2340 case UDP_NO_CHECK6_TX
:
2341 up
->no_check6_tx
= valbool
;
2344 case UDP_NO_CHECK6_RX
:
2345 up
->no_check6_rx
= valbool
;
2349 * UDP-Lite's partial checksum coverage (RFC 3828).
2351 /* The sender sets actual checksum coverage length via this option.
2352 * The case coverage > packet length is handled by send module. */
2353 case UDPLITE_SEND_CSCOV
:
2354 if (!is_udplite
) /* Disable the option on UDP sockets */
2355 return -ENOPROTOOPT
;
2356 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
2358 else if (val
> USHRT_MAX
)
2361 up
->pcflag
|= UDPLITE_SEND_CC
;
2364 /* The receiver specifies a minimum checksum coverage value. To make
2365 * sense, this should be set to at least 8 (as done below). If zero is
2366 * used, this again means full checksum coverage. */
2367 case UDPLITE_RECV_CSCOV
:
2368 if (!is_udplite
) /* Disable the option on UDP sockets */
2369 return -ENOPROTOOPT
;
2370 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
2372 else if (val
> USHRT_MAX
)
2375 up
->pcflag
|= UDPLITE_RECV_CC
;
2385 EXPORT_SYMBOL(udp_lib_setsockopt
);
2387 int udp_setsockopt(struct sock
*sk
, int level
, int optname
,
2388 char __user
*optval
, unsigned int optlen
)
2390 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2391 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
2392 udp_push_pending_frames
);
2393 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2396 #ifdef CONFIG_COMPAT
2397 int compat_udp_setsockopt(struct sock
*sk
, int level
, int optname
,
2398 char __user
*optval
, unsigned int optlen
)
2400 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2401 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
2402 udp_push_pending_frames
);
2403 return compat_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2407 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
2408 char __user
*optval
, int __user
*optlen
)
2410 struct udp_sock
*up
= udp_sk(sk
);
2413 if (get_user(len
, optlen
))
2416 len
= min_t(unsigned int, len
, sizeof(int));
2427 val
= up
->encap_type
;
2430 case UDP_NO_CHECK6_TX
:
2431 val
= up
->no_check6_tx
;
2434 case UDP_NO_CHECK6_RX
:
2435 val
= up
->no_check6_rx
;
2438 /* The following two cannot be changed on UDP sockets, the return is
2439 * always 0 (which corresponds to the full checksum coverage of UDP). */
2440 case UDPLITE_SEND_CSCOV
:
2444 case UDPLITE_RECV_CSCOV
:
2449 return -ENOPROTOOPT
;
2452 if (put_user(len
, optlen
))
2454 if (copy_to_user(optval
, &val
, len
))
2458 EXPORT_SYMBOL(udp_lib_getsockopt
);
2460 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2461 char __user
*optval
, int __user
*optlen
)
2463 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2464 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2465 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2468 #ifdef CONFIG_COMPAT
2469 int compat_udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2470 char __user
*optval
, int __user
*optlen
)
2472 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2473 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2474 return compat_ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2478 * udp_poll - wait for a UDP event.
2479 * @file - file struct
2481 * @wait - poll table
2483 * This is same as datagram poll, except for the special case of
2484 * blocking sockets. If application is using a blocking fd
2485 * and a packet with checksum error is in the queue;
2486 * then it could get return from select indicating data available
2487 * but then block when reading it. Add special case code
2488 * to work around these arguably broken applications.
2490 unsigned int udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2492 unsigned int mask
= datagram_poll(file
, sock
, wait
);
2493 struct sock
*sk
= sock
->sk
;
2495 if (!skb_queue_empty(&udp_sk(sk
)->reader_queue
))
2496 mask
|= POLLIN
| POLLRDNORM
;
2498 sock_rps_record_flow(sk
);
2500 /* Check for false positives due to checksum errors */
2501 if ((mask
& POLLRDNORM
) && !(file
->f_flags
& O_NONBLOCK
) &&
2502 !(sk
->sk_shutdown
& RCV_SHUTDOWN
) && first_packet_length(sk
) == -1)
2503 mask
&= ~(POLLIN
| POLLRDNORM
);
2508 EXPORT_SYMBOL(udp_poll
);
2510 int udp_abort(struct sock
*sk
, int err
)
2515 sk
->sk_error_report(sk
);
2516 __udp_disconnect(sk
, 0);
2522 EXPORT_SYMBOL_GPL(udp_abort
);
2524 struct proto udp_prot
= {
2526 .owner
= THIS_MODULE
,
2527 .close
= udp_lib_close
,
2528 .connect
= ip4_datagram_connect
,
2529 .disconnect
= udp_disconnect
,
2531 .init
= udp_init_sock
,
2532 .destroy
= udp_destroy_sock
,
2533 .setsockopt
= udp_setsockopt
,
2534 .getsockopt
= udp_getsockopt
,
2535 .sendmsg
= udp_sendmsg
,
2536 .recvmsg
= udp_recvmsg
,
2537 .sendpage
= udp_sendpage
,
2538 .release_cb
= ip4_datagram_release_cb
,
2539 .hash
= udp_lib_hash
,
2540 .unhash
= udp_lib_unhash
,
2541 .rehash
= udp_v4_rehash
,
2542 .get_port
= udp_v4_get_port
,
2543 .memory_allocated
= &udp_memory_allocated
,
2544 .sysctl_mem
= sysctl_udp_mem
,
2545 .sysctl_wmem
= &sysctl_udp_wmem_min
,
2546 .sysctl_rmem
= &sysctl_udp_rmem_min
,
2547 .obj_size
= sizeof(struct udp_sock
),
2548 .h
.udp_table
= &udp_table
,
2549 #ifdef CONFIG_COMPAT
2550 .compat_setsockopt
= compat_udp_setsockopt
,
2551 .compat_getsockopt
= compat_udp_getsockopt
,
2553 .diag_destroy
= udp_abort
,
2555 EXPORT_SYMBOL(udp_prot
);
2557 /* ------------------------------------------------------------------------ */
2558 #ifdef CONFIG_PROC_FS
2560 static struct sock
*udp_get_first(struct seq_file
*seq
, int start
)
2563 struct udp_iter_state
*state
= seq
->private;
2564 struct net
*net
= seq_file_net(seq
);
2566 for (state
->bucket
= start
; state
->bucket
<= state
->udp_table
->mask
;
2568 struct udp_hslot
*hslot
= &state
->udp_table
->hash
[state
->bucket
];
2570 if (hlist_empty(&hslot
->head
))
2573 spin_lock_bh(&hslot
->lock
);
2574 sk_for_each(sk
, &hslot
->head
) {
2575 if (!net_eq(sock_net(sk
), net
))
2577 if (sk
->sk_family
== state
->family
)
2580 spin_unlock_bh(&hslot
->lock
);
2587 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
2589 struct udp_iter_state
*state
= seq
->private;
2590 struct net
*net
= seq_file_net(seq
);
2594 } while (sk
&& (!net_eq(sock_net(sk
), net
) || sk
->sk_family
!= state
->family
));
2597 if (state
->bucket
<= state
->udp_table
->mask
)
2598 spin_unlock_bh(&state
->udp_table
->hash
[state
->bucket
].lock
);
2599 return udp_get_first(seq
, state
->bucket
+ 1);
2604 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
2606 struct sock
*sk
= udp_get_first(seq
, 0);
2609 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
2611 return pos
? NULL
: sk
;
2614 static void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2616 struct udp_iter_state
*state
= seq
->private;
2617 state
->bucket
= MAX_UDP_PORTS
;
2619 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
2622 static void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2626 if (v
== SEQ_START_TOKEN
)
2627 sk
= udp_get_idx(seq
, 0);
2629 sk
= udp_get_next(seq
, v
);
2635 static void udp_seq_stop(struct seq_file
*seq
, void *v
)
2637 struct udp_iter_state
*state
= seq
->private;
2639 if (state
->bucket
<= state
->udp_table
->mask
)
2640 spin_unlock_bh(&state
->udp_table
->hash
[state
->bucket
].lock
);
2643 int udp_seq_open(struct inode
*inode
, struct file
*file
)
2645 struct udp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2646 struct udp_iter_state
*s
;
2649 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2650 sizeof(struct udp_iter_state
));
2654 s
= ((struct seq_file
*)file
->private_data
)->private;
2655 s
->family
= afinfo
->family
;
2656 s
->udp_table
= afinfo
->udp_table
;
2659 EXPORT_SYMBOL(udp_seq_open
);
2661 /* ------------------------------------------------------------------------ */
2662 int udp_proc_register(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
2664 struct proc_dir_entry
*p
;
2667 afinfo
->seq_ops
.start
= udp_seq_start
;
2668 afinfo
->seq_ops
.next
= udp_seq_next
;
2669 afinfo
->seq_ops
.stop
= udp_seq_stop
;
2671 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2672 afinfo
->seq_fops
, afinfo
);
2677 EXPORT_SYMBOL(udp_proc_register
);
2679 void udp_proc_unregister(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
2681 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2683 EXPORT_SYMBOL(udp_proc_unregister
);
2685 /* ------------------------------------------------------------------------ */
2686 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
2689 struct inet_sock
*inet
= inet_sk(sp
);
2690 __be32 dest
= inet
->inet_daddr
;
2691 __be32 src
= inet
->inet_rcv_saddr
;
2692 __u16 destp
= ntohs(inet
->inet_dport
);
2693 __u16 srcp
= ntohs(inet
->inet_sport
);
2695 seq_printf(f
, "%5d: %08X:%04X %08X:%04X"
2696 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2697 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
2698 sk_wmem_alloc_get(sp
),
2699 sk_rmem_alloc_get(sp
),
2701 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sp
)),
2703 refcount_read(&sp
->sk_refcnt
), sp
,
2704 atomic_read(&sp
->sk_drops
));
2707 int udp4_seq_show(struct seq_file
*seq
, void *v
)
2709 seq_setwidth(seq
, 127);
2710 if (v
== SEQ_START_TOKEN
)
2711 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2712 "rx_queue tr tm->when retrnsmt uid timeout "
2713 "inode ref pointer drops");
2715 struct udp_iter_state
*state
= seq
->private;
2717 udp4_format_sock(v
, seq
, state
->bucket
);
2723 static const struct file_operations udp_afinfo_seq_fops
= {
2724 .owner
= THIS_MODULE
,
2725 .open
= udp_seq_open
,
2727 .llseek
= seq_lseek
,
2728 .release
= seq_release_net
2731 /* ------------------------------------------------------------------------ */
2732 static struct udp_seq_afinfo udp4_seq_afinfo
= {
2735 .udp_table
= &udp_table
,
2736 .seq_fops
= &udp_afinfo_seq_fops
,
2738 .show
= udp4_seq_show
,
2742 static int __net_init
udp4_proc_init_net(struct net
*net
)
2744 return udp_proc_register(net
, &udp4_seq_afinfo
);
2747 static void __net_exit
udp4_proc_exit_net(struct net
*net
)
2749 udp_proc_unregister(net
, &udp4_seq_afinfo
);
2752 static struct pernet_operations udp4_net_ops
= {
2753 .init
= udp4_proc_init_net
,
2754 .exit
= udp4_proc_exit_net
,
2757 int __init
udp4_proc_init(void)
2759 return register_pernet_subsys(&udp4_net_ops
);
2762 void udp4_proc_exit(void)
2764 unregister_pernet_subsys(&udp4_net_ops
);
2766 #endif /* CONFIG_PROC_FS */
2768 static __initdata
unsigned long uhash_entries
;
2769 static int __init
set_uhash_entries(char *str
)
2776 ret
= kstrtoul(str
, 0, &uhash_entries
);
2780 if (uhash_entries
&& uhash_entries
< UDP_HTABLE_SIZE_MIN
)
2781 uhash_entries
= UDP_HTABLE_SIZE_MIN
;
2784 __setup("uhash_entries=", set_uhash_entries
);
2786 void __init
udp_table_init(struct udp_table
*table
, const char *name
)
2790 table
->hash
= alloc_large_system_hash(name
,
2791 2 * sizeof(struct udp_hslot
),
2793 21, /* one slot per 2 MB */
2797 UDP_HTABLE_SIZE_MIN
,
2800 table
->hash2
= table
->hash
+ (table
->mask
+ 1);
2801 for (i
= 0; i
<= table
->mask
; i
++) {
2802 INIT_HLIST_HEAD(&table
->hash
[i
].head
);
2803 table
->hash
[i
].count
= 0;
2804 spin_lock_init(&table
->hash
[i
].lock
);
2806 for (i
= 0; i
<= table
->mask
; i
++) {
2807 INIT_HLIST_HEAD(&table
->hash2
[i
].head
);
2808 table
->hash2
[i
].count
= 0;
2809 spin_lock_init(&table
->hash2
[i
].lock
);
2813 u32
udp_flow_hashrnd(void)
2815 static u32 hashrnd __read_mostly
;
2817 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
2821 EXPORT_SYMBOL(udp_flow_hashrnd
);
2823 void __init
udp_init(void)
2825 unsigned long limit
;
2828 udp_table_init(&udp_table
, "UDP");
2829 limit
= nr_free_buffer_pages() / 8;
2830 limit
= max(limit
, 128UL);
2831 sysctl_udp_mem
[0] = limit
/ 4 * 3;
2832 sysctl_udp_mem
[1] = limit
;
2833 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
2835 sysctl_udp_rmem_min
= SK_MEM_QUANTUM
;
2836 sysctl_udp_wmem_min
= SK_MEM_QUANTUM
;
2838 /* 16 spinlocks per cpu */
2839 udp_busylocks_log
= ilog2(nr_cpu_ids
) + 4;
2840 udp_busylocks
= kmalloc(sizeof(spinlock_t
) << udp_busylocks_log
,
2843 panic("UDP: failed to alloc udp_busylocks\n");
2844 for (i
= 0; i
< (1U << udp_busylocks_log
); i
++)
2845 spin_lock_init(udp_busylocks
+ i
);