2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Hirokazu Takahashi, <taka@valinux.co.jp>
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
21 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
23 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
58 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71 * James Chapman : Add L2TP encapsulation type.
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
80 #define pr_fmt(fmt) "UDP: " fmt
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/bootmem.h>
85 #include <linux/highmem.h>
86 #include <linux/swap.h>
87 #include <linux/types.h>
88 #include <linux/fcntl.h>
89 #include <linux/module.h>
90 #include <linux/socket.h>
91 #include <linux/sockios.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
95 #include <linux/errno.h>
96 #include <linux/timer.h>
98 #include <linux/inet.h>
99 #include <linux/netdevice.h>
100 #include <linux/slab.h>
101 #include <net/tcp_states.h>
102 #include <linux/skbuff.h>
103 #include <linux/proc_fs.h>
104 #include <linux/seq_file.h>
105 #include <net/net_namespace.h>
106 #include <net/icmp.h>
107 #include <net/inet_hashtables.h>
108 #include <net/route.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <trace/events/udp.h>
112 #include <linux/static_key.h>
113 #include <trace/events/skb.h>
114 #include <net/busy_poll.h>
115 #include "udp_impl.h"
116 #include <net/sock_reuseport.h>
117 #include <net/addrconf.h>
119 struct udp_table udp_table __read_mostly
;
120 EXPORT_SYMBOL(udp_table
);
122 long sysctl_udp_mem
[3] __read_mostly
;
123 EXPORT_SYMBOL(sysctl_udp_mem
);
125 int sysctl_udp_rmem_min __read_mostly
;
126 EXPORT_SYMBOL(sysctl_udp_rmem_min
);
128 int sysctl_udp_wmem_min __read_mostly
;
129 EXPORT_SYMBOL(sysctl_udp_wmem_min
);
131 atomic_long_t udp_memory_allocated
;
132 EXPORT_SYMBOL(udp_memory_allocated
);
134 #define MAX_UDP_PORTS 65536
135 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
137 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
138 const struct udp_hslot
*hslot
,
139 unsigned long *bitmap
,
141 int (*saddr_comp
)(const struct sock
*sk1
,
142 const struct sock
*sk2
,
143 bool match_wildcard
),
147 kuid_t uid
= sock_i_uid(sk
);
149 sk_for_each(sk2
, &hslot
->head
) {
150 if (net_eq(sock_net(sk2
), net
) &&
152 (bitmap
|| udp_sk(sk2
)->udp_port_hash
== num
) &&
153 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
154 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
155 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
156 (!sk2
->sk_reuseport
|| !sk
->sk_reuseport
||
157 rcu_access_pointer(sk
->sk_reuseport_cb
) ||
158 !uid_eq(uid
, sock_i_uid(sk2
))) &&
159 saddr_comp(sk
, sk2
, true)) {
162 __set_bit(udp_sk(sk2
)->udp_port_hash
>> log
, bitmap
);
169 * Note: we still hold spinlock of primary hash chain, so no other writer
170 * can insert/delete a socket with local_port == num
172 static int udp_lib_lport_inuse2(struct net
*net
, __u16 num
,
173 struct udp_hslot
*hslot2
,
175 int (*saddr_comp
)(const struct sock
*sk1
,
176 const struct sock
*sk2
,
177 bool match_wildcard
))
180 kuid_t uid
= sock_i_uid(sk
);
183 spin_lock(&hslot2
->lock
);
184 udp_portaddr_for_each_entry(sk2
, &hslot2
->head
) {
185 if (net_eq(sock_net(sk2
), net
) &&
187 (udp_sk(sk2
)->udp_port_hash
== num
) &&
188 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
189 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
190 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
191 (!sk2
->sk_reuseport
|| !sk
->sk_reuseport
||
192 rcu_access_pointer(sk
->sk_reuseport_cb
) ||
193 !uid_eq(uid
, sock_i_uid(sk2
))) &&
194 saddr_comp(sk
, sk2
, true)) {
199 spin_unlock(&hslot2
->lock
);
203 static int udp_reuseport_add_sock(struct sock
*sk
, struct udp_hslot
*hslot
,
204 int (*saddr_same
)(const struct sock
*sk1
,
205 const struct sock
*sk2
,
206 bool match_wildcard
))
208 struct net
*net
= sock_net(sk
);
209 kuid_t uid
= sock_i_uid(sk
);
212 sk_for_each(sk2
, &hslot
->head
) {
213 if (net_eq(sock_net(sk2
), net
) &&
215 sk2
->sk_family
== sk
->sk_family
&&
216 ipv6_only_sock(sk2
) == ipv6_only_sock(sk
) &&
217 (udp_sk(sk2
)->udp_port_hash
== udp_sk(sk
)->udp_port_hash
) &&
218 (sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
219 sk2
->sk_reuseport
&& uid_eq(uid
, sock_i_uid(sk2
)) &&
220 (*saddr_same
)(sk
, sk2
, false)) {
221 return reuseport_add_sock(sk
, sk2
);
225 /* Initial allocation may have already happened via setsockopt */
226 if (!rcu_access_pointer(sk
->sk_reuseport_cb
))
227 return reuseport_alloc(sk
);
232 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
234 * @sk: socket struct in question
235 * @snum: port number to look up
236 * @saddr_comp: AF-dependent comparison of bound local IP addresses
237 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
240 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
241 int (*saddr_comp
)(const struct sock
*sk1
,
242 const struct sock
*sk2
,
243 bool match_wildcard
),
244 unsigned int hash2_nulladdr
)
246 struct udp_hslot
*hslot
, *hslot2
;
247 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
249 struct net
*net
= sock_net(sk
);
252 int low
, high
, remaining
;
254 unsigned short first
, last
;
255 DECLARE_BITMAP(bitmap
, PORTS_PER_CHAIN
);
257 inet_get_local_port_range(net
, &low
, &high
);
258 remaining
= (high
- low
) + 1;
260 rand
= prandom_u32();
261 first
= reciprocal_scale(rand
, remaining
) + low
;
263 * force rand to be an odd multiple of UDP_HTABLE_SIZE
265 rand
= (rand
| 1) * (udptable
->mask
+ 1);
266 last
= first
+ udptable
->mask
+ 1;
268 hslot
= udp_hashslot(udptable
, net
, first
);
269 bitmap_zero(bitmap
, PORTS_PER_CHAIN
);
270 spin_lock_bh(&hslot
->lock
);
271 udp_lib_lport_inuse(net
, snum
, hslot
, bitmap
, sk
,
272 saddr_comp
, udptable
->log
);
276 * Iterate on all possible values of snum for this hash.
277 * Using steps of an odd multiple of UDP_HTABLE_SIZE
278 * give us randomization and full range coverage.
281 if (low
<= snum
&& snum
<= high
&&
282 !test_bit(snum
>> udptable
->log
, bitmap
) &&
283 !inet_is_local_reserved_port(net
, snum
))
286 } while (snum
!= first
);
287 spin_unlock_bh(&hslot
->lock
);
288 } while (++first
!= last
);
291 hslot
= udp_hashslot(udptable
, net
, snum
);
292 spin_lock_bh(&hslot
->lock
);
293 if (hslot
->count
> 10) {
295 unsigned int slot2
= udp_sk(sk
)->udp_portaddr_hash
^ snum
;
297 slot2
&= udptable
->mask
;
298 hash2_nulladdr
&= udptable
->mask
;
300 hslot2
= udp_hashslot2(udptable
, slot2
);
301 if (hslot
->count
< hslot2
->count
)
302 goto scan_primary_hash
;
304 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
306 if (!exist
&& (hash2_nulladdr
!= slot2
)) {
307 hslot2
= udp_hashslot2(udptable
, hash2_nulladdr
);
308 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
317 if (udp_lib_lport_inuse(net
, snum
, hslot
, NULL
, sk
,
322 inet_sk(sk
)->inet_num
= snum
;
323 udp_sk(sk
)->udp_port_hash
= snum
;
324 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
325 if (sk_unhashed(sk
)) {
326 if (sk
->sk_reuseport
&&
327 udp_reuseport_add_sock(sk
, hslot
, saddr_comp
)) {
328 inet_sk(sk
)->inet_num
= 0;
329 udp_sk(sk
)->udp_port_hash
= 0;
330 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
334 sk_add_node_rcu(sk
, &hslot
->head
);
336 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
338 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
339 spin_lock(&hslot2
->lock
);
340 if (IS_ENABLED(CONFIG_IPV6
) && sk
->sk_reuseport
&&
341 sk
->sk_family
== AF_INET6
)
342 hlist_add_tail_rcu(&udp_sk(sk
)->udp_portaddr_node
,
345 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
348 spin_unlock(&hslot2
->lock
);
350 sock_set_flag(sk
, SOCK_RCU_FREE
);
353 spin_unlock_bh(&hslot
->lock
);
357 EXPORT_SYMBOL(udp_lib_get_port
);
359 /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
360 * match_wildcard == false: addresses must be exactly the same, i.e.
361 * 0.0.0.0 only equals to 0.0.0.0
363 int ipv4_rcv_saddr_equal(const struct sock
*sk1
, const struct sock
*sk2
,
366 struct inet_sock
*inet1
= inet_sk(sk1
), *inet2
= inet_sk(sk2
);
368 if (!ipv6_only_sock(sk2
)) {
369 if (inet1
->inet_rcv_saddr
== inet2
->inet_rcv_saddr
)
371 if (!inet1
->inet_rcv_saddr
|| !inet2
->inet_rcv_saddr
)
372 return match_wildcard
;
377 static u32
udp4_portaddr_hash(const struct net
*net
, __be32 saddr
,
380 return jhash_1word((__force u32
)saddr
, net_hash_mix(net
)) ^ port
;
383 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
385 unsigned int hash2_nulladdr
=
386 udp4_portaddr_hash(sock_net(sk
), htonl(INADDR_ANY
), snum
);
387 unsigned int hash2_partial
=
388 udp4_portaddr_hash(sock_net(sk
), inet_sk(sk
)->inet_rcv_saddr
, 0);
390 /* precompute partial secondary hash */
391 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
392 return udp_lib_get_port(sk
, snum
, ipv4_rcv_saddr_equal
, hash2_nulladdr
);
395 static int compute_score(struct sock
*sk
, struct net
*net
,
396 __be32 saddr
, __be16 sport
,
397 __be32 daddr
, unsigned short hnum
, int dif
)
400 struct inet_sock
*inet
;
402 if (!net_eq(sock_net(sk
), net
) ||
403 udp_sk(sk
)->udp_port_hash
!= hnum
||
407 score
= (sk
->sk_family
== PF_INET
) ? 2 : 1;
410 if (inet
->inet_rcv_saddr
) {
411 if (inet
->inet_rcv_saddr
!= daddr
)
416 if (inet
->inet_daddr
) {
417 if (inet
->inet_daddr
!= saddr
)
422 if (inet
->inet_dport
) {
423 if (inet
->inet_dport
!= sport
)
428 if (sk
->sk_bound_dev_if
) {
429 if (sk
->sk_bound_dev_if
!= dif
)
433 if (sk
->sk_incoming_cpu
== raw_smp_processor_id())
438 static u32
udp_ehashfn(const struct net
*net
, const __be32 laddr
,
439 const __u16 lport
, const __be32 faddr
,
442 static u32 udp_ehash_secret __read_mostly
;
444 net_get_random_once(&udp_ehash_secret
, sizeof(udp_ehash_secret
));
446 return __inet_ehashfn(laddr
, lport
, faddr
, fport
,
447 udp_ehash_secret
+ net_hash_mix(net
));
450 /* called with rcu_read_lock() */
451 static struct sock
*udp4_lib_lookup2(struct net
*net
,
452 __be32 saddr
, __be16 sport
,
453 __be32 daddr
, unsigned int hnum
, int dif
,
454 struct udp_hslot
*hslot2
,
457 struct sock
*sk
, *result
;
458 int score
, badness
, matches
= 0, reuseport
= 0;
463 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
464 score
= compute_score(sk
, net
, saddr
, sport
,
466 if (score
> badness
) {
467 reuseport
= sk
->sk_reuseport
;
469 hash
= udp_ehashfn(net
, daddr
, hnum
,
471 result
= reuseport_select_sock(sk
, hash
, skb
,
472 sizeof(struct udphdr
));
479 } else if (score
== badness
&& reuseport
) {
481 if (reciprocal_scale(hash
, matches
) == 0)
483 hash
= next_pseudo_random32(hash
);
489 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
490 * harder than this. -DaveM
492 struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
,
493 __be16 sport
, __be32 daddr
, __be16 dport
,
494 int dif
, struct udp_table
*udptable
, struct sk_buff
*skb
)
496 struct sock
*sk
, *result
;
497 unsigned short hnum
= ntohs(dport
);
498 unsigned int hash2
, slot2
, slot
= udp_hashfn(net
, hnum
, udptable
->mask
);
499 struct udp_hslot
*hslot2
, *hslot
= &udptable
->hash
[slot
];
500 int score
, badness
, matches
= 0, reuseport
= 0;
503 if (hslot
->count
> 10) {
504 hash2
= udp4_portaddr_hash(net
, daddr
, hnum
);
505 slot2
= hash2
& udptable
->mask
;
506 hslot2
= &udptable
->hash2
[slot2
];
507 if (hslot
->count
< hslot2
->count
)
510 result
= udp4_lib_lookup2(net
, saddr
, sport
,
514 unsigned int old_slot2
= slot2
;
515 hash2
= udp4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
);
516 slot2
= hash2
& udptable
->mask
;
517 /* avoid searching the same slot again. */
518 if (unlikely(slot2
== old_slot2
))
521 hslot2
= &udptable
->hash2
[slot2
];
522 if (hslot
->count
< hslot2
->count
)
525 result
= udp4_lib_lookup2(net
, saddr
, sport
,
534 sk_for_each_rcu(sk
, &hslot
->head
) {
535 score
= compute_score(sk
, net
, saddr
, sport
,
537 if (score
> badness
) {
538 reuseport
= sk
->sk_reuseport
;
540 hash
= udp_ehashfn(net
, daddr
, hnum
,
542 result
= reuseport_select_sock(sk
, hash
, skb
,
543 sizeof(struct udphdr
));
550 } else if (score
== badness
&& reuseport
) {
552 if (reciprocal_scale(hash
, matches
) == 0)
554 hash
= next_pseudo_random32(hash
);
559 EXPORT_SYMBOL_GPL(__udp4_lib_lookup
);
561 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
562 __be16 sport
, __be16 dport
,
563 struct udp_table
*udptable
)
565 const struct iphdr
*iph
= ip_hdr(skb
);
567 return __udp4_lib_lookup(dev_net(skb
->dev
), iph
->saddr
, sport
,
568 iph
->daddr
, dport
, inet_iif(skb
),
572 struct sock
*udp4_lib_lookup_skb(struct sk_buff
*skb
,
573 __be16 sport
, __be16 dport
)
575 return __udp4_lib_lookup_skb(skb
, sport
, dport
, &udp_table
);
577 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb
);
579 /* Must be called under rcu_read_lock().
580 * Does increment socket refcount.
582 #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
583 IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY)
584 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
585 __be32 daddr
, __be16 dport
, int dif
)
589 sk
= __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
590 dif
, &udp_table
, NULL
);
591 if (sk
&& !atomic_inc_not_zero(&sk
->sk_refcnt
))
595 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
598 static inline bool __udp_is_mcast_sock(struct net
*net
, struct sock
*sk
,
599 __be16 loc_port
, __be32 loc_addr
,
600 __be16 rmt_port
, __be32 rmt_addr
,
601 int dif
, unsigned short hnum
)
603 struct inet_sock
*inet
= inet_sk(sk
);
605 if (!net_eq(sock_net(sk
), net
) ||
606 udp_sk(sk
)->udp_port_hash
!= hnum
||
607 (inet
->inet_daddr
&& inet
->inet_daddr
!= rmt_addr
) ||
608 (inet
->inet_dport
!= rmt_port
&& inet
->inet_dport
) ||
609 (inet
->inet_rcv_saddr
&& inet
->inet_rcv_saddr
!= loc_addr
) ||
610 ipv6_only_sock(sk
) ||
611 (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
))
613 if (!ip_mc_sf_allow(sk
, loc_addr
, rmt_addr
, dif
))
619 * This routine is called by the ICMP module when it gets some
620 * sort of error condition. If err < 0 then the socket should
621 * be closed and the error returned to the user. If err > 0
622 * it's just the icmp type << 8 | icmp code.
623 * Header points to the ip header of the error packet. We move
624 * on past this. Then (as it used to claim before adjustment)
625 * header points to the first 8 bytes of the udp header. We need
626 * to find the appropriate port.
629 void __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct udp_table
*udptable
)
631 struct inet_sock
*inet
;
632 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
633 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
634 const int type
= icmp_hdr(skb
)->type
;
635 const int code
= icmp_hdr(skb
)->code
;
639 struct net
*net
= dev_net(skb
->dev
);
641 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
642 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
, udptable
,
645 __ICMP_INC_STATS(net
, ICMP_MIB_INERRORS
);
646 return; /* No socket for error */
655 case ICMP_TIME_EXCEEDED
:
658 case ICMP_SOURCE_QUENCH
:
660 case ICMP_PARAMETERPROB
:
664 case ICMP_DEST_UNREACH
:
665 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
666 ipv4_sk_update_pmtu(skb
, sk
, info
);
667 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
675 if (code
<= NR_ICMP_UNREACH
) {
676 harderr
= icmp_err_convert
[code
].fatal
;
677 err
= icmp_err_convert
[code
].errno
;
681 ipv4_sk_redirect(skb
, sk
);
686 * RFC1122: OK. Passes ICMP errors back to application, as per
689 if (!inet
->recverr
) {
690 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
693 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
696 sk
->sk_error_report(sk
);
701 void udp_err(struct sk_buff
*skb
, u32 info
)
703 __udp4_lib_err(skb
, info
, &udp_table
);
707 * Throw away all pending data and cancel the corking. Socket is locked.
709 void udp_flush_pending_frames(struct sock
*sk
)
711 struct udp_sock
*up
= udp_sk(sk
);
716 ip_flush_pending_frames(sk
);
719 EXPORT_SYMBOL(udp_flush_pending_frames
);
722 * udp4_hwcsum - handle outgoing HW checksumming
723 * @skb: sk_buff containing the filled-in UDP header
724 * (checksum field must be zeroed out)
725 * @src: source IP address
726 * @dst: destination IP address
728 void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
)
730 struct udphdr
*uh
= udp_hdr(skb
);
731 int offset
= skb_transport_offset(skb
);
732 int len
= skb
->len
- offset
;
736 if (!skb_has_frag_list(skb
)) {
738 * Only one fragment on the socket.
740 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
741 skb
->csum_offset
= offsetof(struct udphdr
, check
);
742 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
,
745 struct sk_buff
*frags
;
748 * HW-checksum won't work as there are two or more
749 * fragments on the socket so that all csums of sk_buffs
752 skb_walk_frags(skb
, frags
) {
753 csum
= csum_add(csum
, frags
->csum
);
757 csum
= skb_checksum(skb
, offset
, hlen
, csum
);
758 skb
->ip_summed
= CHECKSUM_NONE
;
760 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
762 uh
->check
= CSUM_MANGLED_0
;
765 EXPORT_SYMBOL_GPL(udp4_hwcsum
);
767 /* Function to set UDP checksum for an IPv4 UDP packet. This is intended
768 * for the simple case like when setting the checksum for a UDP tunnel.
770 void udp_set_csum(bool nocheck
, struct sk_buff
*skb
,
771 __be32 saddr
, __be32 daddr
, int len
)
773 struct udphdr
*uh
= udp_hdr(skb
);
777 } else if (skb_is_gso(skb
)) {
778 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
779 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
781 uh
->check
= udp_v4_check(len
, saddr
, daddr
, lco_csum(skb
));
783 uh
->check
= CSUM_MANGLED_0
;
785 skb
->ip_summed
= CHECKSUM_PARTIAL
;
786 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
787 skb
->csum_offset
= offsetof(struct udphdr
, check
);
788 uh
->check
= ~udp_v4_check(len
, saddr
, daddr
, 0);
791 EXPORT_SYMBOL(udp_set_csum
);
793 static int udp_send_skb(struct sk_buff
*skb
, struct flowi4
*fl4
)
795 struct sock
*sk
= skb
->sk
;
796 struct inet_sock
*inet
= inet_sk(sk
);
799 int is_udplite
= IS_UDPLITE(sk
);
800 int offset
= skb_transport_offset(skb
);
801 int len
= skb
->len
- offset
;
805 * Create a UDP header
808 uh
->source
= inet
->inet_sport
;
809 uh
->dest
= fl4
->fl4_dport
;
810 uh
->len
= htons(len
);
813 if (is_udplite
) /* UDP-Lite */
814 csum
= udplite_csum(skb
);
816 else if (sk
->sk_no_check_tx
) { /* UDP csum disabled */
818 skb
->ip_summed
= CHECKSUM_NONE
;
821 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
823 udp4_hwcsum(skb
, fl4
->saddr
, fl4
->daddr
);
827 csum
= udp_csum(skb
);
829 /* add protocol-dependent pseudo-header */
830 uh
->check
= csum_tcpudp_magic(fl4
->saddr
, fl4
->daddr
, len
,
831 sk
->sk_protocol
, csum
);
833 uh
->check
= CSUM_MANGLED_0
;
836 err
= ip_send_skb(sock_net(sk
), skb
);
838 if (err
== -ENOBUFS
&& !inet
->recverr
) {
839 UDP_INC_STATS(sock_net(sk
),
840 UDP_MIB_SNDBUFERRORS
, is_udplite
);
844 UDP_INC_STATS(sock_net(sk
),
845 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
850 * Push out all pending data as one UDP datagram. Socket is locked.
852 int udp_push_pending_frames(struct sock
*sk
)
854 struct udp_sock
*up
= udp_sk(sk
);
855 struct inet_sock
*inet
= inet_sk(sk
);
856 struct flowi4
*fl4
= &inet
->cork
.fl
.u
.ip4
;
860 skb
= ip_finish_skb(sk
, fl4
);
864 err
= udp_send_skb(skb
, fl4
);
871 EXPORT_SYMBOL(udp_push_pending_frames
);
873 int udp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
875 struct inet_sock
*inet
= inet_sk(sk
);
876 struct udp_sock
*up
= udp_sk(sk
);
877 struct flowi4 fl4_stack
;
880 struct ipcm_cookie ipc
;
881 struct rtable
*rt
= NULL
;
884 __be32 daddr
, faddr
, saddr
;
887 int err
, is_udplite
= IS_UDPLITE(sk
);
888 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
889 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
891 struct ip_options_data opt_copy
;
900 if (msg
->msg_flags
& MSG_OOB
) /* Mirror BSD error message compatibility */
908 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
910 fl4
= &inet
->cork
.fl
.u
.ip4
;
913 * There are pending frames.
914 * The socket lock must be held while it's corked.
917 if (likely(up
->pending
)) {
918 if (unlikely(up
->pending
!= AF_INET
)) {
926 ulen
+= sizeof(struct udphdr
);
929 * Get and verify the address.
932 DECLARE_SOCKADDR(struct sockaddr_in
*, usin
, msg
->msg_name
);
933 if (msg
->msg_namelen
< sizeof(*usin
))
935 if (usin
->sin_family
!= AF_INET
) {
936 if (usin
->sin_family
!= AF_UNSPEC
)
937 return -EAFNOSUPPORT
;
940 daddr
= usin
->sin_addr
.s_addr
;
941 dport
= usin
->sin_port
;
945 if (sk
->sk_state
!= TCP_ESTABLISHED
)
946 return -EDESTADDRREQ
;
947 daddr
= inet
->inet_daddr
;
948 dport
= inet
->inet_dport
;
949 /* Open fast path for connected socket.
950 Route will not be used, if at least one option is set.
955 ipc
.sockc
.tsflags
= sk
->sk_tsflags
;
956 ipc
.addr
= inet
->inet_saddr
;
957 ipc
.oif
= sk
->sk_bound_dev_if
;
959 if (msg
->msg_controllen
) {
960 err
= ip_cmsg_send(sk
, msg
, &ipc
, sk
->sk_family
== AF_INET6
);
970 struct ip_options_rcu
*inet_opt
;
973 inet_opt
= rcu_dereference(inet
->inet_opt
);
975 memcpy(&opt_copy
, inet_opt
,
976 sizeof(*inet_opt
) + inet_opt
->opt
.optlen
);
977 ipc
.opt
= &opt_copy
.opt
;
983 ipc
.addr
= faddr
= daddr
;
985 sock_tx_timestamp(sk
, ipc
.sockc
.tsflags
, &ipc
.tx_flags
);
987 if (ipc
.opt
&& ipc
.opt
->opt
.srr
) {
990 faddr
= ipc
.opt
->opt
.faddr
;
993 tos
= get_rttos(&ipc
, inet
);
994 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
995 (msg
->msg_flags
& MSG_DONTROUTE
) ||
996 (ipc
.opt
&& ipc
.opt
->opt
.is_strictroute
)) {
1001 if (ipv4_is_multicast(daddr
)) {
1003 ipc
.oif
= inet
->mc_index
;
1005 saddr
= inet
->mc_addr
;
1007 } else if (!ipc
.oif
)
1008 ipc
.oif
= inet
->uc_index
;
1011 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
1014 struct net
*net
= sock_net(sk
);
1015 __u8 flow_flags
= inet_sk_flowi_flags(sk
);
1019 flowi4_init_output(fl4
, ipc
.oif
, sk
->sk_mark
, tos
,
1020 RT_SCOPE_UNIVERSE
, sk
->sk_protocol
,
1022 faddr
, saddr
, dport
, inet
->inet_sport
);
1024 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
1025 rt
= ip_route_output_flow(net
, fl4
, sk
);
1029 if (err
== -ENETUNREACH
)
1030 IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
1035 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
1036 !sock_flag(sk
, SOCK_BROADCAST
))
1039 sk_dst_set(sk
, dst_clone(&rt
->dst
));
1042 if (msg
->msg_flags
&MSG_CONFIRM
)
1048 daddr
= ipc
.addr
= fl4
->daddr
;
1050 /* Lockless fast path for the non-corking case. */
1052 skb
= ip_make_skb(sk
, fl4
, getfrag
, msg
, ulen
,
1053 sizeof(struct udphdr
), &ipc
, &rt
,
1056 if (!IS_ERR_OR_NULL(skb
))
1057 err
= udp_send_skb(skb
, fl4
);
1062 if (unlikely(up
->pending
)) {
1063 /* The socket is already corked while preparing it. */
1064 /* ... which is an evident application bug. --ANK */
1067 net_dbg_ratelimited("cork app bug 2\n");
1072 * Now cork the socket to pend data.
1074 fl4
= &inet
->cork
.fl
.u
.ip4
;
1077 fl4
->fl4_dport
= dport
;
1078 fl4
->fl4_sport
= inet
->inet_sport
;
1079 up
->pending
= AF_INET
;
1083 err
= ip_append_data(sk
, fl4
, getfrag
, msg
, ulen
,
1084 sizeof(struct udphdr
), &ipc
, &rt
,
1085 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1087 udp_flush_pending_frames(sk
);
1089 err
= udp_push_pending_frames(sk
);
1090 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1101 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1102 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1103 * we don't have a good statistic (IpOutDiscards but it can be too many
1104 * things). We could add another new stat but at least for now that
1105 * seems like overkill.
1107 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1108 UDP_INC_STATS(sock_net(sk
),
1109 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1114 dst_confirm(&rt
->dst
);
1115 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1116 goto back_from_confirm
;
1120 EXPORT_SYMBOL(udp_sendmsg
);
1122 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1123 size_t size
, int flags
)
1125 struct inet_sock
*inet
= inet_sk(sk
);
1126 struct udp_sock
*up
= udp_sk(sk
);
1129 if (flags
& MSG_SENDPAGE_NOTLAST
)
1133 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
1135 /* Call udp_sendmsg to specify destination address which
1136 * sendpage interface can't pass.
1137 * This will succeed only when the socket is connected.
1139 ret
= udp_sendmsg(sk
, &msg
, 0);
1146 if (unlikely(!up
->pending
)) {
1149 net_dbg_ratelimited("udp cork app bug 3\n");
1153 ret
= ip_append_page(sk
, &inet
->cork
.fl
.u
.ip4
,
1154 page
, offset
, size
, flags
);
1155 if (ret
== -EOPNOTSUPP
) {
1157 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
1161 udp_flush_pending_frames(sk
);
1166 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
1167 ret
= udp_push_pending_frames(sk
);
1175 static void udp_rmem_release(struct sock
*sk
, int size
, int partial
)
1179 atomic_sub(size
, &sk
->sk_rmem_alloc
);
1181 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1182 sk
->sk_forward_alloc
+= size
;
1183 amt
= (sk
->sk_forward_alloc
- partial
) & ~(SK_MEM_QUANTUM
- 1);
1184 sk
->sk_forward_alloc
-= amt
;
1185 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1188 __sk_mem_reduce_allocated(sk
, amt
>> SK_MEM_QUANTUM_SHIFT
);
1191 static void udp_rmem_free(struct sk_buff
*skb
)
1193 udp_rmem_release(skb
->sk
, skb
->truesize
, 1);
1196 int __udp_enqueue_schedule_skb(struct sock
*sk
, struct sk_buff
*skb
)
1198 struct sk_buff_head
*list
= &sk
->sk_receive_queue
;
1199 int rmem
, delta
, amt
, err
= -ENOMEM
;
1200 int size
= skb
->truesize
;
1202 /* try to avoid the costly atomic add/sub pair when the receive
1203 * queue is full; always allow at least a packet
1205 rmem
= atomic_read(&sk
->sk_rmem_alloc
);
1206 if (rmem
&& (rmem
+ size
> sk
->sk_rcvbuf
))
1209 /* we drop only if the receive buf is full and the receive
1210 * queue contains some other skb
1212 rmem
= atomic_add_return(size
, &sk
->sk_rmem_alloc
);
1213 if ((rmem
> sk
->sk_rcvbuf
) && (rmem
> size
))
1216 spin_lock(&list
->lock
);
1217 if (size
>= sk
->sk_forward_alloc
) {
1218 amt
= sk_mem_pages(size
);
1219 delta
= amt
<< SK_MEM_QUANTUM_SHIFT
;
1220 if (!__sk_mem_raise_allocated(sk
, delta
, amt
, SK_MEM_RECV
)) {
1222 spin_unlock(&list
->lock
);
1226 sk
->sk_forward_alloc
+= delta
;
1229 sk
->sk_forward_alloc
-= size
;
1231 /* the skb owner in now the udp socket */
1233 skb
->destructor
= udp_rmem_free
;
1235 sock_skb_set_dropcount(sk
, skb
);
1237 __skb_queue_tail(list
, skb
);
1238 spin_unlock(&list
->lock
);
1240 if (!sock_flag(sk
, SOCK_DEAD
))
1241 sk
->sk_data_ready(sk
);
1246 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
1249 atomic_inc(&sk
->sk_drops
);
1252 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb
);
1254 static void udp_destruct_sock(struct sock
*sk
)
1256 /* reclaim completely the forward allocated memory */
1257 __skb_queue_purge(&sk
->sk_receive_queue
);
1258 udp_rmem_release(sk
, 0, 0);
1259 inet_sock_destruct(sk
);
1262 int udp_init_sock(struct sock
*sk
)
1264 sk
->sk_destruct
= udp_destruct_sock
;
1267 EXPORT_SYMBOL_GPL(udp_init_sock
);
1269 void skb_consume_udp(struct sock
*sk
, struct sk_buff
*skb
, int len
)
1271 if (unlikely(READ_ONCE(sk
->sk_peek_off
) >= 0)) {
1272 bool slow
= lock_sock_fast(sk
);
1274 sk_peek_offset_bwd(sk
, len
);
1275 unlock_sock_fast(sk
, slow
);
1279 EXPORT_SYMBOL_GPL(skb_consume_udp
);
1282 * first_packet_length - return length of first packet in receive queue
1285 * Drops all bad checksum frames, until a valid one is found.
1286 * Returns the length of found skb, or -1 if none is found.
1288 static int first_packet_length(struct sock
*sk
)
1290 struct sk_buff_head list_kill
, *rcvq
= &sk
->sk_receive_queue
;
1291 struct sk_buff
*skb
;
1294 __skb_queue_head_init(&list_kill
);
1296 spin_lock_bh(&rcvq
->lock
);
1297 while ((skb
= skb_peek(rcvq
)) != NULL
&&
1298 udp_lib_checksum_complete(skb
)) {
1299 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
,
1301 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
1303 atomic_inc(&sk
->sk_drops
);
1304 __skb_unlink(skb
, rcvq
);
1305 __skb_queue_tail(&list_kill
, skb
);
1307 res
= skb
? skb
->len
: -1;
1308 spin_unlock_bh(&rcvq
->lock
);
1310 if (!skb_queue_empty(&list_kill
)) {
1311 bool slow
= lock_sock_fast(sk
);
1313 __skb_queue_purge(&list_kill
);
1314 sk_mem_reclaim_partial(sk
);
1315 unlock_sock_fast(sk
, slow
);
1321 * IOCTL requests applicable to the UDP protocol
1324 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1329 int amount
= sk_wmem_alloc_get(sk
);
1331 return put_user(amount
, (int __user
*)arg
);
1336 int amount
= max_t(int, 0, first_packet_length(sk
));
1338 return put_user(amount
, (int __user
*)arg
);
1342 return -ENOIOCTLCMD
;
1347 EXPORT_SYMBOL(udp_ioctl
);
1350 * This should be easy, if there is something there we
1351 * return it, otherwise we block.
1354 int udp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
, int noblock
,
1355 int flags
, int *addr_len
)
1357 struct inet_sock
*inet
= inet_sk(sk
);
1358 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
1359 struct sk_buff
*skb
;
1360 unsigned int ulen
, copied
;
1361 int peeked
, peeking
, off
;
1363 int is_udplite
= IS_UDPLITE(sk
);
1364 bool checksum_valid
= false;
1367 if (flags
& MSG_ERRQUEUE
)
1368 return ip_recv_error(sk
, msg
, len
, addr_len
);
1371 peeking
= off
= sk_peek_offset(sk
, flags
);
1372 skb
= __skb_recv_datagram(sk
, flags
| (noblock
? MSG_DONTWAIT
: 0),
1373 &peeked
, &off
, &err
);
1379 if (copied
> ulen
- off
)
1380 copied
= ulen
- off
;
1381 else if (copied
< ulen
)
1382 msg
->msg_flags
|= MSG_TRUNC
;
1385 * If checksum is needed at all, try to do it while copying the
1386 * data. If the data is truncated, or if we only want a partial
1387 * coverage checksum (UDP-Lite), do it before the copy.
1390 if (copied
< ulen
|| UDP_SKB_CB(skb
)->partial_cov
|| peeking
) {
1391 checksum_valid
= !udp_lib_checksum_complete(skb
);
1392 if (!checksum_valid
)
1396 if (checksum_valid
|| skb_csum_unnecessary(skb
))
1397 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
1399 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
1405 if (unlikely(err
)) {
1406 trace_kfree_skb(skb
, udp_recvmsg
);
1408 atomic_inc(&sk
->sk_drops
);
1409 UDP_INC_STATS(sock_net(sk
),
1410 UDP_MIB_INERRORS
, is_udplite
);
1412 skb_free_datagram_locked(sk
, skb
);
1417 UDP_INC_STATS(sock_net(sk
),
1418 UDP_MIB_INDATAGRAMS
, is_udplite
);
1420 sock_recv_ts_and_drops(msg
, sk
, skb
);
1422 /* Copy the address. */
1424 sin
->sin_family
= AF_INET
;
1425 sin
->sin_port
= udp_hdr(skb
)->source
;
1426 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
1427 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
1428 *addr_len
= sizeof(*sin
);
1430 if (inet
->cmsg_flags
)
1431 ip_cmsg_recv_offset(msg
, skb
, sizeof(struct udphdr
) + off
);
1434 if (flags
& MSG_TRUNC
)
1437 __skb_free_datagram_locked(sk
, skb
, peeking
? -err
: err
);
1441 slow
= lock_sock_fast(sk
);
1442 if (!skb_kill_datagram(sk
, skb
, flags
)) {
1443 UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1444 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1446 unlock_sock_fast(sk
, slow
);
1448 /* starting over for a new packet, but check if we need to yield */
1450 msg
->msg_flags
&= ~MSG_TRUNC
;
1454 int udp_disconnect(struct sock
*sk
, int flags
)
1456 struct inet_sock
*inet
= inet_sk(sk
);
1458 * 1003.1g - break association.
1461 sk
->sk_state
= TCP_CLOSE
;
1462 inet
->inet_daddr
= 0;
1463 inet
->inet_dport
= 0;
1464 sock_rps_reset_rxhash(sk
);
1465 sk
->sk_bound_dev_if
= 0;
1466 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
1467 inet_reset_saddr(sk
);
1469 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
1470 sk
->sk_prot
->unhash(sk
);
1471 inet
->inet_sport
= 0;
1476 EXPORT_SYMBOL(udp_disconnect
);
1478 void udp_lib_unhash(struct sock
*sk
)
1480 if (sk_hashed(sk
)) {
1481 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1482 struct udp_hslot
*hslot
, *hslot2
;
1484 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1485 udp_sk(sk
)->udp_port_hash
);
1486 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1488 spin_lock_bh(&hslot
->lock
);
1489 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1490 reuseport_detach_sock(sk
);
1491 if (sk_del_node_init_rcu(sk
)) {
1493 inet_sk(sk
)->inet_num
= 0;
1494 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
1496 spin_lock(&hslot2
->lock
);
1497 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1499 spin_unlock(&hslot2
->lock
);
1501 spin_unlock_bh(&hslot
->lock
);
1504 EXPORT_SYMBOL(udp_lib_unhash
);
1507 * inet_rcv_saddr was changed, we must rehash secondary hash
1509 void udp_lib_rehash(struct sock
*sk
, u16 newhash
)
1511 if (sk_hashed(sk
)) {
1512 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1513 struct udp_hslot
*hslot
, *hslot2
, *nhslot2
;
1515 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1516 nhslot2
= udp_hashslot2(udptable
, newhash
);
1517 udp_sk(sk
)->udp_portaddr_hash
= newhash
;
1519 if (hslot2
!= nhslot2
||
1520 rcu_access_pointer(sk
->sk_reuseport_cb
)) {
1521 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1522 udp_sk(sk
)->udp_port_hash
);
1523 /* we must lock primary chain too */
1524 spin_lock_bh(&hslot
->lock
);
1525 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
1526 reuseport_detach_sock(sk
);
1528 if (hslot2
!= nhslot2
) {
1529 spin_lock(&hslot2
->lock
);
1530 hlist_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1532 spin_unlock(&hslot2
->lock
);
1534 spin_lock(&nhslot2
->lock
);
1535 hlist_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
1538 spin_unlock(&nhslot2
->lock
);
1541 spin_unlock_bh(&hslot
->lock
);
1545 EXPORT_SYMBOL(udp_lib_rehash
);
1547 static void udp_v4_rehash(struct sock
*sk
)
1549 u16 new_hash
= udp4_portaddr_hash(sock_net(sk
),
1550 inet_sk(sk
)->inet_rcv_saddr
,
1551 inet_sk(sk
)->inet_num
);
1552 udp_lib_rehash(sk
, new_hash
);
1555 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1559 if (inet_sk(sk
)->inet_daddr
) {
1560 sock_rps_save_rxhash(sk
, skb
);
1561 sk_mark_napi_id(sk
, skb
);
1562 sk_incoming_cpu_update(sk
);
1565 rc
= __sock_queue_rcv_skb(sk
, skb
);
1567 int is_udplite
= IS_UDPLITE(sk
);
1569 /* Note that an ENOMEM error is charged twice */
1571 UDP_INC_STATS(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
1573 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1575 trace_udp_fail_queue_rcv_skb(rc
, sk
);
1583 static struct static_key udp_encap_needed __read_mostly
;
1584 void udp_encap_enable(void)
1586 if (!static_key_enabled(&udp_encap_needed
))
1587 static_key_slow_inc(&udp_encap_needed
);
1589 EXPORT_SYMBOL(udp_encap_enable
);
1594 * >0: "udp encap" protocol resubmission
1596 * Note that in the success and error cases, the skb is assumed to
1597 * have either been requeued or freed.
1599 int udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1601 struct udp_sock
*up
= udp_sk(sk
);
1603 int is_udplite
= IS_UDPLITE(sk
);
1606 * Charge it to the socket, dropping if the queue is full.
1608 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1612 if (static_key_false(&udp_encap_needed
) && up
->encap_type
) {
1613 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
1616 * This is an encapsulation socket so pass the skb to
1617 * the socket's udp_encap_rcv() hook. Otherwise, just
1618 * fall through and pass this up the UDP socket.
1619 * up->encap_rcv() returns the following value:
1620 * =0 if skb was successfully passed to the encap
1621 * handler or was discarded by it.
1622 * >0 if skb should be passed on to UDP.
1623 * <0 if skb should be resubmitted as proto -N
1626 /* if we're overly short, let UDP handle it */
1627 encap_rcv
= ACCESS_ONCE(up
->encap_rcv
);
1631 /* Verify checksum before giving to encap */
1632 if (udp_lib_checksum_complete(skb
))
1635 ret
= encap_rcv(sk
, skb
);
1637 __UDP_INC_STATS(sock_net(sk
),
1638 UDP_MIB_INDATAGRAMS
,
1644 /* FALLTHROUGH -- it's a UDP Packet */
1648 * UDP-Lite specific tests, ignored on UDP sockets
1650 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
1653 * MIB statistics other than incrementing the error count are
1654 * disabled for the following two types of errors: these depend
1655 * on the application settings, not on the functioning of the
1656 * protocol stack as such.
1658 * RFC 3828 here recommends (sec 3.3): "There should also be a
1659 * way ... to ... at least let the receiving application block
1660 * delivery of packets with coverage values less than a value
1661 * provided by the application."
1663 if (up
->pcrlen
== 0) { /* full coverage was set */
1664 net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
1665 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
1668 /* The next case involves violating the min. coverage requested
1669 * by the receiver. This is subtle: if receiver wants x and x is
1670 * greater than the buffersize/MTU then receiver will complain
1671 * that it wants x while sender emits packets of smaller size y.
1672 * Therefore the above ...()->partial_cov statement is essential.
1674 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
1675 net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
1676 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
1681 if (rcu_access_pointer(sk
->sk_filter
) &&
1682 udp_lib_checksum_complete(skb
))
1685 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
1688 udp_csum_pull_header(skb
);
1689 if (sk_rcvqueues_full(sk
, sk
->sk_rcvbuf
)) {
1690 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
1697 ipv4_pktinfo_prepare(sk
, skb
);
1699 if (!sock_owned_by_user(sk
))
1700 rc
= __udp_queue_rcv_skb(sk
, skb
);
1701 else if (sk_add_backlog(sk
, skb
, sk
->sk_rcvbuf
)) {
1710 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1712 __UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1713 atomic_inc(&sk
->sk_drops
);
1718 /* For TCP sockets, sk_rx_dst is protected by socket lock
1719 * For UDP, we use xchg() to guard against concurrent changes.
1721 static void udp_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
1723 struct dst_entry
*old
;
1726 old
= xchg(&sk
->sk_rx_dst
, dst
);
1731 * Multicasts and broadcasts go to each listener.
1733 * Note: called only from the BH handler context.
1735 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
1737 __be32 saddr
, __be32 daddr
,
1738 struct udp_table
*udptable
,
1741 struct sock
*sk
, *first
= NULL
;
1742 unsigned short hnum
= ntohs(uh
->dest
);
1743 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
1744 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
1745 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
1746 int dif
= skb
->dev
->ifindex
;
1747 struct hlist_node
*node
;
1748 struct sk_buff
*nskb
;
1751 hash2_any
= udp4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
) &
1753 hash2
= udp4_portaddr_hash(net
, daddr
, hnum
) & udp_table
.mask
;
1755 hslot
= &udp_table
.hash2
[hash2
];
1756 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
1759 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
1760 if (!__udp_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
1761 uh
->source
, saddr
, dif
, hnum
))
1768 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1770 if (unlikely(!nskb
)) {
1771 atomic_inc(&sk
->sk_drops
);
1772 __UDP_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
1774 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
,
1778 if (udp_queue_rcv_skb(sk
, nskb
) > 0)
1782 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
1783 if (use_hash2
&& hash2
!= hash2_any
) {
1789 if (udp_queue_rcv_skb(first
, skb
) > 0)
1793 __UDP_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
1794 proto
== IPPROTO_UDPLITE
);
1799 /* Initialize UDP checksum. If exited with zero value (success),
1800 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1801 * Otherwise, csum completion requires chacksumming packet body,
1802 * including udp header and folding it to skb->csum.
1804 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
1809 UDP_SKB_CB(skb
)->partial_cov
= 0;
1810 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
1812 if (proto
== IPPROTO_UDPLITE
) {
1813 err
= udplite_checksum_init(skb
, uh
);
1818 /* Note, we are only interested in != 0 or == 0, thus the
1821 return (__force
int)skb_checksum_init_zero_check(skb
, proto
, uh
->check
,
1822 inet_compute_pseudo
);
1826 * All we need to do is get the socket, and then do a checksum.
1829 int __udp4_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
1834 unsigned short ulen
;
1835 struct rtable
*rt
= skb_rtable(skb
);
1836 __be32 saddr
, daddr
;
1837 struct net
*net
= dev_net(skb
->dev
);
1840 * Validate the packet.
1842 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
1843 goto drop
; /* No space for header. */
1846 ulen
= ntohs(uh
->len
);
1847 saddr
= ip_hdr(skb
)->saddr
;
1848 daddr
= ip_hdr(skb
)->daddr
;
1850 if (ulen
> skb
->len
)
1853 if (proto
== IPPROTO_UDP
) {
1854 /* UDP validates ulen. */
1855 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
1860 if (udp4_csum_init(skb
, uh
, proto
))
1863 sk
= skb_steal_sock(skb
);
1865 struct dst_entry
*dst
= skb_dst(skb
);
1868 if (unlikely(sk
->sk_rx_dst
!= dst
))
1869 udp_sk_rx_dst_set(sk
, dst
);
1871 ret
= udp_queue_rcv_skb(sk
, skb
);
1873 /* a return value > 0 means to resubmit the input, but
1874 * it wants the return to be -protocol, or 0
1881 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
1882 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
1883 saddr
, daddr
, udptable
, proto
);
1885 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
1889 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
1890 skb_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
1891 inet_compute_pseudo
);
1893 ret
= udp_queue_rcv_skb(sk
, skb
);
1895 /* a return value > 0 means to resubmit the input, but
1896 * it wants the return to be -protocol, or 0
1903 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1907 /* No socket. Drop packet silently, if checksum is wrong */
1908 if (udp_lib_checksum_complete(skb
))
1911 __UDP_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
1912 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
1915 * Hmm. We got an UDP packet to a port to which we
1916 * don't wanna listen. Ignore it.
1922 net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1923 proto
== IPPROTO_UDPLITE
? "Lite" : "",
1924 &saddr
, ntohs(uh
->source
),
1926 &daddr
, ntohs(uh
->dest
));
1931 * RFC1122: OK. Discards the bad packet silently (as far as
1932 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1934 net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1935 proto
== IPPROTO_UDPLITE
? "Lite" : "",
1936 &saddr
, ntohs(uh
->source
), &daddr
, ntohs(uh
->dest
),
1938 __UDP_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
1940 __UDP_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
1945 /* We can only early demux multicast if there is a single matching socket.
1946 * If more than one socket found returns NULL
1948 static struct sock
*__udp4_lib_mcast_demux_lookup(struct net
*net
,
1949 __be16 loc_port
, __be32 loc_addr
,
1950 __be16 rmt_port
, __be32 rmt_addr
,
1953 struct sock
*sk
, *result
;
1954 unsigned short hnum
= ntohs(loc_port
);
1955 unsigned int slot
= udp_hashfn(net
, hnum
, udp_table
.mask
);
1956 struct udp_hslot
*hslot
= &udp_table
.hash
[slot
];
1958 /* Do not bother scanning a too big list */
1959 if (hslot
->count
> 10)
1963 sk_for_each_rcu(sk
, &hslot
->head
) {
1964 if (__udp_is_mcast_sock(net
, sk
, loc_port
, loc_addr
,
1965 rmt_port
, rmt_addr
, dif
, hnum
)) {
1975 /* For unicast we should only early demux connected sockets or we can
1976 * break forwarding setups. The chains here can be long so only check
1977 * if the first socket is an exact match and if not move on.
1979 static struct sock
*__udp4_lib_demux_lookup(struct net
*net
,
1980 __be16 loc_port
, __be32 loc_addr
,
1981 __be16 rmt_port
, __be32 rmt_addr
,
1984 unsigned short hnum
= ntohs(loc_port
);
1985 unsigned int hash2
= udp4_portaddr_hash(net
, loc_addr
, hnum
);
1986 unsigned int slot2
= hash2
& udp_table
.mask
;
1987 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
1988 INET_ADDR_COOKIE(acookie
, rmt_addr
, loc_addr
);
1989 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
1992 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
1993 if (INET_MATCH(sk
, net
, acookie
, rmt_addr
,
1994 loc_addr
, ports
, dif
))
1996 /* Only check first socket in chain */
2002 void udp_v4_early_demux(struct sk_buff
*skb
)
2004 struct net
*net
= dev_net(skb
->dev
);
2005 const struct iphdr
*iph
;
2006 const struct udphdr
*uh
;
2007 struct sock
*sk
= NULL
;
2008 struct dst_entry
*dst
;
2009 int dif
= skb
->dev
->ifindex
;
2012 /* validate the packet */
2013 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct udphdr
)))
2019 if (skb
->pkt_type
== PACKET_BROADCAST
||
2020 skb
->pkt_type
== PACKET_MULTICAST
) {
2021 struct in_device
*in_dev
= __in_dev_get_rcu(skb
->dev
);
2026 /* we are supposed to accept bcast packets */
2027 if (skb
->pkt_type
== PACKET_MULTICAST
) {
2028 ours
= ip_check_mc_rcu(in_dev
, iph
->daddr
, iph
->saddr
,
2034 sk
= __udp4_lib_mcast_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2035 uh
->source
, iph
->saddr
, dif
);
2036 } else if (skb
->pkt_type
== PACKET_HOST
) {
2037 sk
= __udp4_lib_demux_lookup(net
, uh
->dest
, iph
->daddr
,
2038 uh
->source
, iph
->saddr
, dif
);
2041 if (!sk
|| !atomic_inc_not_zero_hint(&sk
->sk_refcnt
, 2))
2045 skb
->destructor
= sock_efree
;
2046 dst
= READ_ONCE(sk
->sk_rx_dst
);
2049 dst
= dst_check(dst
, 0);
2051 /* DST_NOCACHE can not be used without taking a reference */
2052 if (dst
->flags
& DST_NOCACHE
) {
2053 if (likely(atomic_inc_not_zero(&dst
->__refcnt
)))
2054 skb_dst_set(skb
, dst
);
2056 skb_dst_set_noref(skb
, dst
);
2061 int udp_rcv(struct sk_buff
*skb
)
2063 return __udp4_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
2066 void udp_destroy_sock(struct sock
*sk
)
2068 struct udp_sock
*up
= udp_sk(sk
);
2069 bool slow
= lock_sock_fast(sk
);
2070 udp_flush_pending_frames(sk
);
2071 unlock_sock_fast(sk
, slow
);
2072 if (static_key_false(&udp_encap_needed
) && up
->encap_type
) {
2073 void (*encap_destroy
)(struct sock
*sk
);
2074 encap_destroy
= ACCESS_ONCE(up
->encap_destroy
);
2081 * Socket option code for UDP
2083 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
2084 char __user
*optval
, unsigned int optlen
,
2085 int (*push_pending_frames
)(struct sock
*))
2087 struct udp_sock
*up
= udp_sk(sk
);
2090 int is_udplite
= IS_UDPLITE(sk
);
2092 if (optlen
< sizeof(int))
2095 if (get_user(val
, (int __user
*)optval
))
2098 valbool
= val
? 1 : 0;
2107 push_pending_frames(sk
);
2115 case UDP_ENCAP_ESPINUDP
:
2116 case UDP_ENCAP_ESPINUDP_NON_IKE
:
2117 up
->encap_rcv
= xfrm4_udp_encap_rcv
;
2119 case UDP_ENCAP_L2TPINUDP
:
2120 up
->encap_type
= val
;
2129 case UDP_NO_CHECK6_TX
:
2130 up
->no_check6_tx
= valbool
;
2133 case UDP_NO_CHECK6_RX
:
2134 up
->no_check6_rx
= valbool
;
2138 * UDP-Lite's partial checksum coverage (RFC 3828).
2140 /* The sender sets actual checksum coverage length via this option.
2141 * The case coverage > packet length is handled by send module. */
2142 case UDPLITE_SEND_CSCOV
:
2143 if (!is_udplite
) /* Disable the option on UDP sockets */
2144 return -ENOPROTOOPT
;
2145 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
2147 else if (val
> USHRT_MAX
)
2150 up
->pcflag
|= UDPLITE_SEND_CC
;
2153 /* The receiver specifies a minimum checksum coverage value. To make
2154 * sense, this should be set to at least 8 (as done below). If zero is
2155 * used, this again means full checksum coverage. */
2156 case UDPLITE_RECV_CSCOV
:
2157 if (!is_udplite
) /* Disable the option on UDP sockets */
2158 return -ENOPROTOOPT
;
2159 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
2161 else if (val
> USHRT_MAX
)
2164 up
->pcflag
|= UDPLITE_RECV_CC
;
2174 EXPORT_SYMBOL(udp_lib_setsockopt
);
2176 int udp_setsockopt(struct sock
*sk
, int level
, int optname
,
2177 char __user
*optval
, unsigned int optlen
)
2179 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2180 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
2181 udp_push_pending_frames
);
2182 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2185 #ifdef CONFIG_COMPAT
2186 int compat_udp_setsockopt(struct sock
*sk
, int level
, int optname
,
2187 char __user
*optval
, unsigned int optlen
)
2189 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2190 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
2191 udp_push_pending_frames
);
2192 return compat_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
2196 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
2197 char __user
*optval
, int __user
*optlen
)
2199 struct udp_sock
*up
= udp_sk(sk
);
2202 if (get_user(len
, optlen
))
2205 len
= min_t(unsigned int, len
, sizeof(int));
2216 val
= up
->encap_type
;
2219 case UDP_NO_CHECK6_TX
:
2220 val
= up
->no_check6_tx
;
2223 case UDP_NO_CHECK6_RX
:
2224 val
= up
->no_check6_rx
;
2227 /* The following two cannot be changed on UDP sockets, the return is
2228 * always 0 (which corresponds to the full checksum coverage of UDP). */
2229 case UDPLITE_SEND_CSCOV
:
2233 case UDPLITE_RECV_CSCOV
:
2238 return -ENOPROTOOPT
;
2241 if (put_user(len
, optlen
))
2243 if (copy_to_user(optval
, &val
, len
))
2247 EXPORT_SYMBOL(udp_lib_getsockopt
);
2249 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2250 char __user
*optval
, int __user
*optlen
)
2252 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2253 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2254 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2257 #ifdef CONFIG_COMPAT
2258 int compat_udp_getsockopt(struct sock
*sk
, int level
, int optname
,
2259 char __user
*optval
, int __user
*optlen
)
2261 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
2262 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
2263 return compat_ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
2267 * udp_poll - wait for a UDP event.
2268 * @file - file struct
2270 * @wait - poll table
2272 * This is same as datagram poll, except for the special case of
2273 * blocking sockets. If application is using a blocking fd
2274 * and a packet with checksum error is in the queue;
2275 * then it could get return from select indicating data available
2276 * but then block when reading it. Add special case code
2277 * to work around these arguably broken applications.
2279 unsigned int udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2281 unsigned int mask
= datagram_poll(file
, sock
, wait
);
2282 struct sock
*sk
= sock
->sk
;
2284 sock_rps_record_flow(sk
);
2286 /* Check for false positives due to checksum errors */
2287 if ((mask
& POLLRDNORM
) && !(file
->f_flags
& O_NONBLOCK
) &&
2288 !(sk
->sk_shutdown
& RCV_SHUTDOWN
) && first_packet_length(sk
) == -1)
2289 mask
&= ~(POLLIN
| POLLRDNORM
);
2294 EXPORT_SYMBOL(udp_poll
);
2296 int udp_abort(struct sock
*sk
, int err
)
2301 sk
->sk_error_report(sk
);
2302 udp_disconnect(sk
, 0);
2308 EXPORT_SYMBOL_GPL(udp_abort
);
2310 struct proto udp_prot
= {
2312 .owner
= THIS_MODULE
,
2313 .close
= udp_lib_close
,
2314 .connect
= ip4_datagram_connect
,
2315 .disconnect
= udp_disconnect
,
2317 .destroy
= udp_destroy_sock
,
2318 .setsockopt
= udp_setsockopt
,
2319 .getsockopt
= udp_getsockopt
,
2320 .sendmsg
= udp_sendmsg
,
2321 .recvmsg
= udp_recvmsg
,
2322 .sendpage
= udp_sendpage
,
2323 .backlog_rcv
= __udp_queue_rcv_skb
,
2324 .release_cb
= ip4_datagram_release_cb
,
2325 .hash
= udp_lib_hash
,
2326 .unhash
= udp_lib_unhash
,
2327 .rehash
= udp_v4_rehash
,
2328 .get_port
= udp_v4_get_port
,
2329 .memory_allocated
= &udp_memory_allocated
,
2330 .sysctl_mem
= sysctl_udp_mem
,
2331 .sysctl_wmem
= &sysctl_udp_wmem_min
,
2332 .sysctl_rmem
= &sysctl_udp_rmem_min
,
2333 .obj_size
= sizeof(struct udp_sock
),
2334 .h
.udp_table
= &udp_table
,
2335 #ifdef CONFIG_COMPAT
2336 .compat_setsockopt
= compat_udp_setsockopt
,
2337 .compat_getsockopt
= compat_udp_getsockopt
,
2339 .diag_destroy
= udp_abort
,
2341 EXPORT_SYMBOL(udp_prot
);
2343 /* ------------------------------------------------------------------------ */
2344 #ifdef CONFIG_PROC_FS
2346 static struct sock
*udp_get_first(struct seq_file
*seq
, int start
)
2349 struct udp_iter_state
*state
= seq
->private;
2350 struct net
*net
= seq_file_net(seq
);
2352 for (state
->bucket
= start
; state
->bucket
<= state
->udp_table
->mask
;
2354 struct udp_hslot
*hslot
= &state
->udp_table
->hash
[state
->bucket
];
2356 if (hlist_empty(&hslot
->head
))
2359 spin_lock_bh(&hslot
->lock
);
2360 sk_for_each(sk
, &hslot
->head
) {
2361 if (!net_eq(sock_net(sk
), net
))
2363 if (sk
->sk_family
== state
->family
)
2366 spin_unlock_bh(&hslot
->lock
);
2373 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
2375 struct udp_iter_state
*state
= seq
->private;
2376 struct net
*net
= seq_file_net(seq
);
2380 } while (sk
&& (!net_eq(sock_net(sk
), net
) || sk
->sk_family
!= state
->family
));
2383 if (state
->bucket
<= state
->udp_table
->mask
)
2384 spin_unlock_bh(&state
->udp_table
->hash
[state
->bucket
].lock
);
2385 return udp_get_first(seq
, state
->bucket
+ 1);
2390 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
2392 struct sock
*sk
= udp_get_first(seq
, 0);
2395 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
2397 return pos
? NULL
: sk
;
2400 static void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2402 struct udp_iter_state
*state
= seq
->private;
2403 state
->bucket
= MAX_UDP_PORTS
;
2405 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
2408 static void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2412 if (v
== SEQ_START_TOKEN
)
2413 sk
= udp_get_idx(seq
, 0);
2415 sk
= udp_get_next(seq
, v
);
2421 static void udp_seq_stop(struct seq_file
*seq
, void *v
)
2423 struct udp_iter_state
*state
= seq
->private;
2425 if (state
->bucket
<= state
->udp_table
->mask
)
2426 spin_unlock_bh(&state
->udp_table
->hash
[state
->bucket
].lock
);
2429 int udp_seq_open(struct inode
*inode
, struct file
*file
)
2431 struct udp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2432 struct udp_iter_state
*s
;
2435 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2436 sizeof(struct udp_iter_state
));
2440 s
= ((struct seq_file
*)file
->private_data
)->private;
2441 s
->family
= afinfo
->family
;
2442 s
->udp_table
= afinfo
->udp_table
;
2445 EXPORT_SYMBOL(udp_seq_open
);
2447 /* ------------------------------------------------------------------------ */
2448 int udp_proc_register(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
2450 struct proc_dir_entry
*p
;
2453 afinfo
->seq_ops
.start
= udp_seq_start
;
2454 afinfo
->seq_ops
.next
= udp_seq_next
;
2455 afinfo
->seq_ops
.stop
= udp_seq_stop
;
2457 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2458 afinfo
->seq_fops
, afinfo
);
2463 EXPORT_SYMBOL(udp_proc_register
);
2465 void udp_proc_unregister(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
2467 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2469 EXPORT_SYMBOL(udp_proc_unregister
);
2471 /* ------------------------------------------------------------------------ */
2472 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
2475 struct inet_sock
*inet
= inet_sk(sp
);
2476 __be32 dest
= inet
->inet_daddr
;
2477 __be32 src
= inet
->inet_rcv_saddr
;
2478 __u16 destp
= ntohs(inet
->inet_dport
);
2479 __u16 srcp
= ntohs(inet
->inet_sport
);
2481 seq_printf(f
, "%5d: %08X:%04X %08X:%04X"
2482 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2483 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
2484 sk_wmem_alloc_get(sp
),
2485 sk_rmem_alloc_get(sp
),
2487 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sp
)),
2489 atomic_read(&sp
->sk_refcnt
), sp
,
2490 atomic_read(&sp
->sk_drops
));
2493 int udp4_seq_show(struct seq_file
*seq
, void *v
)
2495 seq_setwidth(seq
, 127);
2496 if (v
== SEQ_START_TOKEN
)
2497 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2498 "rx_queue tr tm->when retrnsmt uid timeout "
2499 "inode ref pointer drops");
2501 struct udp_iter_state
*state
= seq
->private;
2503 udp4_format_sock(v
, seq
, state
->bucket
);
2509 static const struct file_operations udp_afinfo_seq_fops
= {
2510 .owner
= THIS_MODULE
,
2511 .open
= udp_seq_open
,
2513 .llseek
= seq_lseek
,
2514 .release
= seq_release_net
2517 /* ------------------------------------------------------------------------ */
2518 static struct udp_seq_afinfo udp4_seq_afinfo
= {
2521 .udp_table
= &udp_table
,
2522 .seq_fops
= &udp_afinfo_seq_fops
,
2524 .show
= udp4_seq_show
,
2528 static int __net_init
udp4_proc_init_net(struct net
*net
)
2530 return udp_proc_register(net
, &udp4_seq_afinfo
);
2533 static void __net_exit
udp4_proc_exit_net(struct net
*net
)
2535 udp_proc_unregister(net
, &udp4_seq_afinfo
);
2538 static struct pernet_operations udp4_net_ops
= {
2539 .init
= udp4_proc_init_net
,
2540 .exit
= udp4_proc_exit_net
,
2543 int __init
udp4_proc_init(void)
2545 return register_pernet_subsys(&udp4_net_ops
);
2548 void udp4_proc_exit(void)
2550 unregister_pernet_subsys(&udp4_net_ops
);
2552 #endif /* CONFIG_PROC_FS */
2554 static __initdata
unsigned long uhash_entries
;
2555 static int __init
set_uhash_entries(char *str
)
2562 ret
= kstrtoul(str
, 0, &uhash_entries
);
2566 if (uhash_entries
&& uhash_entries
< UDP_HTABLE_SIZE_MIN
)
2567 uhash_entries
= UDP_HTABLE_SIZE_MIN
;
2570 __setup("uhash_entries=", set_uhash_entries
);
2572 void __init
udp_table_init(struct udp_table
*table
, const char *name
)
2576 table
->hash
= alloc_large_system_hash(name
,
2577 2 * sizeof(struct udp_hslot
),
2579 21, /* one slot per 2 MB */
2583 UDP_HTABLE_SIZE_MIN
,
2586 table
->hash2
= table
->hash
+ (table
->mask
+ 1);
2587 for (i
= 0; i
<= table
->mask
; i
++) {
2588 INIT_HLIST_HEAD(&table
->hash
[i
].head
);
2589 table
->hash
[i
].count
= 0;
2590 spin_lock_init(&table
->hash
[i
].lock
);
2592 for (i
= 0; i
<= table
->mask
; i
++) {
2593 INIT_HLIST_HEAD(&table
->hash2
[i
].head
);
2594 table
->hash2
[i
].count
= 0;
2595 spin_lock_init(&table
->hash2
[i
].lock
);
2599 u32
udp_flow_hashrnd(void)
2601 static u32 hashrnd __read_mostly
;
2603 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
2607 EXPORT_SYMBOL(udp_flow_hashrnd
);
2609 void __init
udp_init(void)
2611 unsigned long limit
;
2613 udp_table_init(&udp_table
, "UDP");
2614 limit
= nr_free_buffer_pages() / 8;
2615 limit
= max(limit
, 128UL);
2616 sysctl_udp_mem
[0] = limit
/ 4 * 3;
2617 sysctl_udp_mem
[1] = limit
;
2618 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
2620 sysctl_udp_rmem_min
= SK_MEM_QUANTUM
;
2621 sysctl_udp_wmem_min
= SK_MEM_QUANTUM
;