]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * The User Datagram Protocol (UDP). | |
7 | * | |
8 | * Authors: Ross Biro | |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | |
10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
11 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> | |
12 | * Hirokazu Takahashi, <taka@valinux.co.jp> | |
13 | * | |
14 | * Fixes: | |
15 | * Alan Cox : verify_area() calls | |
16 | * Alan Cox : stopped close while in use off icmp | |
17 | * messages. Not a fix but a botch that | |
18 | * for udp at least is 'valid'. | |
19 | * Alan Cox : Fixed icmp handling properly | |
20 | * Alan Cox : Correct error for oversized datagrams | |
21 | * Alan Cox : Tidied select() semantics. | |
22 | * Alan Cox : udp_err() fixed properly, also now | |
23 | * select and read wake correctly on errors | |
24 | * Alan Cox : udp_send verify_area moved to avoid mem leak | |
25 | * Alan Cox : UDP can count its memory | |
26 | * Alan Cox : send to an unknown connection causes | |
27 | * an ECONNREFUSED off the icmp, but | |
28 | * does NOT close. | |
29 | * Alan Cox : Switched to new sk_buff handlers. No more backlog! | |
30 | * Alan Cox : Using generic datagram code. Even smaller and the PEEK | |
31 | * bug no longer crashes it. | |
32 | * Fred Van Kempen : Net2e support for sk->broadcast. | |
33 | * Alan Cox : Uses skb_free_datagram | |
34 | * Alan Cox : Added get/set sockopt support. | |
35 | * Alan Cox : Broadcasting without option set returns EACCES. | |
36 | * Alan Cox : No wakeup calls. Instead we now use the callbacks. | |
37 | * Alan Cox : Use ip_tos and ip_ttl | |
38 | * Alan Cox : SNMP Mibs | |
39 | * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. | |
40 | * Matt Dillon : UDP length checks. | |
41 | * Alan Cox : Smarter af_inet used properly. | |
42 | * Alan Cox : Use new kernel side addressing. | |
43 | * Alan Cox : Incorrect return on truncated datagram receive. | |
44 | * Arnt Gulbrandsen : New udp_send and stuff | |
45 | * Alan Cox : Cache last socket | |
46 | * Alan Cox : Route cache | |
47 | * Jon Peatfield : Minor efficiency fix to sendto(). | |
48 | * Mike Shaver : RFC1122 checks. | |
49 | * Alan Cox : Nonblocking error fix. | |
50 | * Willy Konynenberg : Transparent proxying support. | |
51 | * Mike McLagan : Routing by source | |
52 | * David S. Miller : New socket lookup architecture. | |
53 | * Last socket cache retained as it | |
54 | * does have a high hit rate. | |
55 | * Olaf Kirch : Don't linearise iovec on sendmsg. | |
56 | * Andi Kleen : Some cleanups, cache destination entry | |
57 | * for connect. | |
58 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. | |
59 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), | |
60 | * return ENOTCONN for unconnected sockets (POSIX) | |
61 | * Janos Farkas : don't deliver multi/broadcasts to a different | |
62 | * bound-to-device socket | |
63 | * Hirokazu Takahashi : HW checksumming for outgoing UDP | |
64 | * datagrams. | |
65 | * Hirokazu Takahashi : sendfile() on UDP works now. | |
66 | * Arnaldo C. Melo : convert /proc/net/udp to seq_file | |
67 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which | |
68 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind | |
69 | * a single port at the same time. | |
70 | * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support | |
71 | * James Chapman : Add L2TP encapsulation type. | |
72 | * | |
73 | * | |
74 | * This program is free software; you can redistribute it and/or | |
75 | * modify it under the terms of the GNU General Public License | |
76 | * as published by the Free Software Foundation; either version | |
77 | * 2 of the License, or (at your option) any later version. | |
78 | */ | |
79 | ||
80 | #include <asm/system.h> | |
81 | #include <asm/uaccess.h> | |
82 | #include <asm/ioctls.h> | |
83 | #include <linux/bootmem.h> | |
84 | #include <linux/highmem.h> | |
85 | #include <linux/swap.h> | |
86 | #include <linux/types.h> | |
87 | #include <linux/fcntl.h> | |
88 | #include <linux/module.h> | |
89 | #include <linux/socket.h> | |
90 | #include <linux/sockios.h> | |
91 | #include <linux/igmp.h> | |
92 | #include <linux/in.h> | |
93 | #include <linux/errno.h> | |
94 | #include <linux/timer.h> | |
95 | #include <linux/mm.h> | |
96 | #include <linux/inet.h> | |
97 | #include <linux/netdevice.h> | |
98 | #include <linux/slab.h> | |
99 | #include <net/tcp_states.h> | |
100 | #include <linux/skbuff.h> | |
101 | #include <linux/proc_fs.h> | |
102 | #include <linux/seq_file.h> | |
103 | #include <net/net_namespace.h> | |
104 | #include <net/icmp.h> | |
105 | #include <net/route.h> | |
106 | #include <net/checksum.h> | |
107 | #include <net/xfrm.h> | |
108 | #include "udp_impl.h" | |
109 | ||
110 | struct udp_table udp_table __read_mostly; | |
111 | EXPORT_SYMBOL(udp_table); | |
112 | ||
113 | int sysctl_udp_mem[3] __read_mostly; | |
114 | EXPORT_SYMBOL(sysctl_udp_mem); | |
115 | ||
116 | int sysctl_udp_rmem_min __read_mostly; | |
117 | EXPORT_SYMBOL(sysctl_udp_rmem_min); | |
118 | ||
119 | int sysctl_udp_wmem_min __read_mostly; | |
120 | EXPORT_SYMBOL(sysctl_udp_wmem_min); | |
121 | ||
122 | atomic_t udp_memory_allocated; | |
123 | EXPORT_SYMBOL(udp_memory_allocated); | |
124 | ||
125 | #define MAX_UDP_PORTS 65536 | |
126 | #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) | |
127 | ||
128 | static int udp_lib_lport_inuse(struct net *net, __u16 num, | |
129 | const struct udp_hslot *hslot, | |
130 | unsigned long *bitmap, | |
131 | struct sock *sk, | |
132 | int (*saddr_comp)(const struct sock *sk1, | |
133 | const struct sock *sk2), | |
134 | unsigned int log) | |
135 | { | |
136 | struct sock *sk2; | |
137 | struct hlist_nulls_node *node; | |
138 | ||
139 | sk_nulls_for_each(sk2, node, &hslot->head) | |
140 | if (net_eq(sock_net(sk2), net) && | |
141 | sk2 != sk && | |
142 | (bitmap || udp_sk(sk2)->udp_port_hash == num) && | |
143 | (!sk2->sk_reuse || !sk->sk_reuse) && | |
144 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || | |
145 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | |
146 | (*saddr_comp)(sk, sk2)) { | |
147 | if (bitmap) | |
148 | __set_bit(udp_sk(sk2)->udp_port_hash >> log, | |
149 | bitmap); | |
150 | else | |
151 | return 1; | |
152 | } | |
153 | return 0; | |
154 | } | |
155 | ||
156 | /* | |
157 | * Note: we still hold spinlock of primary hash chain, so no other writer | |
158 | * can insert/delete a socket with local_port == num | |
159 | */ | |
160 | static int udp_lib_lport_inuse2(struct net *net, __u16 num, | |
161 | struct udp_hslot *hslot2, | |
162 | struct sock *sk, | |
163 | int (*saddr_comp)(const struct sock *sk1, | |
164 | const struct sock *sk2)) | |
165 | { | |
166 | struct sock *sk2; | |
167 | struct hlist_nulls_node *node; | |
168 | int res = 0; | |
169 | ||
170 | spin_lock(&hslot2->lock); | |
171 | udp_portaddr_for_each_entry(sk2, node, &hslot2->head) | |
172 | if (net_eq(sock_net(sk2), net) && | |
173 | sk2 != sk && | |
174 | (udp_sk(sk2)->udp_port_hash == num) && | |
175 | (!sk2->sk_reuse || !sk->sk_reuse) && | |
176 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || | |
177 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | |
178 | (*saddr_comp)(sk, sk2)) { | |
179 | res = 1; | |
180 | break; | |
181 | } | |
182 | spin_unlock(&hslot2->lock); | |
183 | return res; | |
184 | } | |
185 | ||
186 | /** | |
187 | * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 | |
188 | * | |
189 | * @sk: socket struct in question | |
190 | * @snum: port number to look up | |
191 | * @saddr_comp: AF-dependent comparison of bound local IP addresses | |
192 | * @hash2_nulladdr: AF-dependant hash value in secondary hash chains, | |
193 | * with NULL address | |
194 | */ | |
195 | int udp_lib_get_port(struct sock *sk, unsigned short snum, | |
196 | int (*saddr_comp)(const struct sock *sk1, | |
197 | const struct sock *sk2), | |
198 | unsigned int hash2_nulladdr) | |
199 | { | |
200 | struct udp_hslot *hslot, *hslot2; | |
201 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | |
202 | int error = 1; | |
203 | struct net *net = sock_net(sk); | |
204 | ||
205 | if (!snum) { | |
206 | int low, high, remaining; | |
207 | unsigned rand; | |
208 | unsigned short first, last; | |
209 | DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); | |
210 | ||
211 | inet_get_local_port_range(&low, &high); | |
212 | remaining = (high - low) + 1; | |
213 | ||
214 | rand = net_random(); | |
215 | first = (((u64)rand * remaining) >> 32) + low; | |
216 | /* | |
217 | * force rand to be an odd multiple of UDP_HTABLE_SIZE | |
218 | */ | |
219 | rand = (rand | 1) * (udptable->mask + 1); | |
220 | last = first + udptable->mask + 1; | |
221 | do { | |
222 | hslot = udp_hashslot(udptable, net, first); | |
223 | bitmap_zero(bitmap, PORTS_PER_CHAIN); | |
224 | spin_lock_bh(&hslot->lock); | |
225 | udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, | |
226 | saddr_comp, udptable->log); | |
227 | ||
228 | snum = first; | |
229 | /* | |
230 | * Iterate on all possible values of snum for this hash. | |
231 | * Using steps of an odd multiple of UDP_HTABLE_SIZE | |
232 | * give us randomization and full range coverage. | |
233 | */ | |
234 | do { | |
235 | if (low <= snum && snum <= high && | |
236 | !test_bit(snum >> udptable->log, bitmap) && | |
237 | !inet_is_reserved_local_port(snum)) | |
238 | goto found; | |
239 | snum += rand; | |
240 | } while (snum != first); | |
241 | spin_unlock_bh(&hslot->lock); | |
242 | } while (++first != last); | |
243 | goto fail; | |
244 | } else { | |
245 | hslot = udp_hashslot(udptable, net, snum); | |
246 | spin_lock_bh(&hslot->lock); | |
247 | if (hslot->count > 10) { | |
248 | int exist; | |
249 | unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; | |
250 | ||
251 | slot2 &= udptable->mask; | |
252 | hash2_nulladdr &= udptable->mask; | |
253 | ||
254 | hslot2 = udp_hashslot2(udptable, slot2); | |
255 | if (hslot->count < hslot2->count) | |
256 | goto scan_primary_hash; | |
257 | ||
258 | exist = udp_lib_lport_inuse2(net, snum, hslot2, | |
259 | sk, saddr_comp); | |
260 | if (!exist && (hash2_nulladdr != slot2)) { | |
261 | hslot2 = udp_hashslot2(udptable, hash2_nulladdr); | |
262 | exist = udp_lib_lport_inuse2(net, snum, hslot2, | |
263 | sk, saddr_comp); | |
264 | } | |
265 | if (exist) | |
266 | goto fail_unlock; | |
267 | else | |
268 | goto found; | |
269 | } | |
270 | scan_primary_hash: | |
271 | if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, | |
272 | saddr_comp, 0)) | |
273 | goto fail_unlock; | |
274 | } | |
275 | found: | |
276 | inet_sk(sk)->inet_num = snum; | |
277 | udp_sk(sk)->udp_port_hash = snum; | |
278 | udp_sk(sk)->udp_portaddr_hash ^= snum; | |
279 | if (sk_unhashed(sk)) { | |
280 | sk_nulls_add_node_rcu(sk, &hslot->head); | |
281 | hslot->count++; | |
282 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | |
283 | ||
284 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | |
285 | spin_lock(&hslot2->lock); | |
286 | hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, | |
287 | &hslot2->head); | |
288 | hslot2->count++; | |
289 | spin_unlock(&hslot2->lock); | |
290 | } | |
291 | error = 0; | |
292 | fail_unlock: | |
293 | spin_unlock_bh(&hslot->lock); | |
294 | fail: | |
295 | return error; | |
296 | } | |
297 | EXPORT_SYMBOL(udp_lib_get_port); | |
298 | ||
299 | static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | |
300 | { | |
301 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); | |
302 | ||
303 | return (!ipv6_only_sock(sk2) && | |
304 | (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || | |
305 | inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); | |
306 | } | |
307 | ||
308 | static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, | |
309 | unsigned int port) | |
310 | { | |
311 | return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; | |
312 | } | |
313 | ||
314 | int udp_v4_get_port(struct sock *sk, unsigned short snum) | |
315 | { | |
316 | unsigned int hash2_nulladdr = | |
317 | udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); | |
318 | unsigned int hash2_partial = | |
319 | udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); | |
320 | ||
321 | /* precompute partial secondary hash */ | |
322 | udp_sk(sk)->udp_portaddr_hash = hash2_partial; | |
323 | return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); | |
324 | } | |
325 | ||
326 | static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, | |
327 | unsigned short hnum, | |
328 | __be16 sport, __be32 daddr, __be16 dport, int dif) | |
329 | { | |
330 | int score = -1; | |
331 | ||
332 | if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && | |
333 | !ipv6_only_sock(sk)) { | |
334 | struct inet_sock *inet = inet_sk(sk); | |
335 | ||
336 | score = (sk->sk_family == PF_INET ? 1 : 0); | |
337 | if (inet->inet_rcv_saddr) { | |
338 | if (inet->inet_rcv_saddr != daddr) | |
339 | return -1; | |
340 | score += 2; | |
341 | } | |
342 | if (inet->inet_daddr) { | |
343 | if (inet->inet_daddr != saddr) | |
344 | return -1; | |
345 | score += 2; | |
346 | } | |
347 | if (inet->inet_dport) { | |
348 | if (inet->inet_dport != sport) | |
349 | return -1; | |
350 | score += 2; | |
351 | } | |
352 | if (sk->sk_bound_dev_if) { | |
353 | if (sk->sk_bound_dev_if != dif) | |
354 | return -1; | |
355 | score += 2; | |
356 | } | |
357 | } | |
358 | return score; | |
359 | } | |
360 | ||
361 | /* | |
362 | * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) | |
363 | */ | |
364 | #define SCORE2_MAX (1 + 2 + 2 + 2) | |
365 | static inline int compute_score2(struct sock *sk, struct net *net, | |
366 | __be32 saddr, __be16 sport, | |
367 | __be32 daddr, unsigned int hnum, int dif) | |
368 | { | |
369 | int score = -1; | |
370 | ||
371 | if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) { | |
372 | struct inet_sock *inet = inet_sk(sk); | |
373 | ||
374 | if (inet->inet_rcv_saddr != daddr) | |
375 | return -1; | |
376 | if (inet->inet_num != hnum) | |
377 | return -1; | |
378 | ||
379 | score = (sk->sk_family == PF_INET ? 1 : 0); | |
380 | if (inet->inet_daddr) { | |
381 | if (inet->inet_daddr != saddr) | |
382 | return -1; | |
383 | score += 2; | |
384 | } | |
385 | if (inet->inet_dport) { | |
386 | if (inet->inet_dport != sport) | |
387 | return -1; | |
388 | score += 2; | |
389 | } | |
390 | if (sk->sk_bound_dev_if) { | |
391 | if (sk->sk_bound_dev_if != dif) | |
392 | return -1; | |
393 | score += 2; | |
394 | } | |
395 | } | |
396 | return score; | |
397 | } | |
398 | ||
399 | ||
400 | /* called with read_rcu_lock() */ | |
401 | static struct sock *udp4_lib_lookup2(struct net *net, | |
402 | __be32 saddr, __be16 sport, | |
403 | __be32 daddr, unsigned int hnum, int dif, | |
404 | struct udp_hslot *hslot2, unsigned int slot2) | |
405 | { | |
406 | struct sock *sk, *result; | |
407 | struct hlist_nulls_node *node; | |
408 | int score, badness; | |
409 | ||
410 | begin: | |
411 | result = NULL; | |
412 | badness = -1; | |
413 | udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { | |
414 | score = compute_score2(sk, net, saddr, sport, | |
415 | daddr, hnum, dif); | |
416 | if (score > badness) { | |
417 | result = sk; | |
418 | badness = score; | |
419 | if (score == SCORE2_MAX) | |
420 | goto exact_match; | |
421 | } | |
422 | } | |
423 | /* | |
424 | * if the nulls value we got at the end of this lookup is | |
425 | * not the expected one, we must restart lookup. | |
426 | * We probably met an item that was moved to another chain. | |
427 | */ | |
428 | if (get_nulls_value(node) != slot2) | |
429 | goto begin; | |
430 | ||
431 | if (result) { | |
432 | exact_match: | |
433 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | |
434 | result = NULL; | |
435 | else if (unlikely(compute_score2(result, net, saddr, sport, | |
436 | daddr, hnum, dif) < badness)) { | |
437 | sock_put(result); | |
438 | goto begin; | |
439 | } | |
440 | } | |
441 | return result; | |
442 | } | |
443 | ||
444 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try | |
445 | * harder than this. -DaveM | |
446 | */ | |
447 | static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, | |
448 | __be16 sport, __be32 daddr, __be16 dport, | |
449 | int dif, struct udp_table *udptable) | |
450 | { | |
451 | struct sock *sk, *result; | |
452 | struct hlist_nulls_node *node; | |
453 | unsigned short hnum = ntohs(dport); | |
454 | unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); | |
455 | struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; | |
456 | int score, badness; | |
457 | ||
458 | rcu_read_lock(); | |
459 | if (hslot->count > 10) { | |
460 | hash2 = udp4_portaddr_hash(net, daddr, hnum); | |
461 | slot2 = hash2 & udptable->mask; | |
462 | hslot2 = &udptable->hash2[slot2]; | |
463 | if (hslot->count < hslot2->count) | |
464 | goto begin; | |
465 | ||
466 | result = udp4_lib_lookup2(net, saddr, sport, | |
467 | daddr, hnum, dif, | |
468 | hslot2, slot2); | |
469 | if (!result) { | |
470 | hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); | |
471 | slot2 = hash2 & udptable->mask; | |
472 | hslot2 = &udptable->hash2[slot2]; | |
473 | if (hslot->count < hslot2->count) | |
474 | goto begin; | |
475 | ||
476 | result = udp4_lib_lookup2(net, saddr, sport, | |
477 | htonl(INADDR_ANY), hnum, dif, | |
478 | hslot2, slot2); | |
479 | } | |
480 | rcu_read_unlock(); | |
481 | return result; | |
482 | } | |
483 | begin: | |
484 | result = NULL; | |
485 | badness = -1; | |
486 | sk_nulls_for_each_rcu(sk, node, &hslot->head) { | |
487 | score = compute_score(sk, net, saddr, hnum, sport, | |
488 | daddr, dport, dif); | |
489 | if (score > badness) { | |
490 | result = sk; | |
491 | badness = score; | |
492 | } | |
493 | } | |
494 | /* | |
495 | * if the nulls value we got at the end of this lookup is | |
496 | * not the expected one, we must restart lookup. | |
497 | * We probably met an item that was moved to another chain. | |
498 | */ | |
499 | if (get_nulls_value(node) != slot) | |
500 | goto begin; | |
501 | ||
502 | if (result) { | |
503 | if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) | |
504 | result = NULL; | |
505 | else if (unlikely(compute_score(result, net, saddr, hnum, sport, | |
506 | daddr, dport, dif) < badness)) { | |
507 | sock_put(result); | |
508 | goto begin; | |
509 | } | |
510 | } | |
511 | rcu_read_unlock(); | |
512 | return result; | |
513 | } | |
514 | ||
515 | static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, | |
516 | __be16 sport, __be16 dport, | |
517 | struct udp_table *udptable) | |
518 | { | |
519 | struct sock *sk; | |
520 | const struct iphdr *iph = ip_hdr(skb); | |
521 | ||
522 | if (unlikely(sk = skb_steal_sock(skb))) | |
523 | return sk; | |
524 | else | |
525 | return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, | |
526 | iph->daddr, dport, inet_iif(skb), | |
527 | udptable); | |
528 | } | |
529 | ||
530 | struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, | |
531 | __be32 daddr, __be16 dport, int dif) | |
532 | { | |
533 | return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); | |
534 | } | |
535 | EXPORT_SYMBOL_GPL(udp4_lib_lookup); | |
536 | ||
537 | static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, | |
538 | __be16 loc_port, __be32 loc_addr, | |
539 | __be16 rmt_port, __be32 rmt_addr, | |
540 | int dif) | |
541 | { | |
542 | struct hlist_nulls_node *node; | |
543 | struct sock *s = sk; | |
544 | unsigned short hnum = ntohs(loc_port); | |
545 | ||
546 | sk_nulls_for_each_from(s, node) { | |
547 | struct inet_sock *inet = inet_sk(s); | |
548 | ||
549 | if (!net_eq(sock_net(s), net) || | |
550 | udp_sk(s)->udp_port_hash != hnum || | |
551 | (inet->inet_daddr && inet->inet_daddr != rmt_addr) || | |
552 | (inet->inet_dport != rmt_port && inet->inet_dport) || | |
553 | (inet->inet_rcv_saddr && | |
554 | inet->inet_rcv_saddr != loc_addr) || | |
555 | ipv6_only_sock(s) || | |
556 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) | |
557 | continue; | |
558 | if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) | |
559 | continue; | |
560 | goto found; | |
561 | } | |
562 | s = NULL; | |
563 | found: | |
564 | return s; | |
565 | } | |
566 | ||
567 | /* | |
568 | * This routine is called by the ICMP module when it gets some | |
569 | * sort of error condition. If err < 0 then the socket should | |
570 | * be closed and the error returned to the user. If err > 0 | |
571 | * it's just the icmp type << 8 | icmp code. | |
572 | * Header points to the ip header of the error packet. We move | |
573 | * on past this. Then (as it used to claim before adjustment) | |
574 | * header points to the first 8 bytes of the udp header. We need | |
575 | * to find the appropriate port. | |
576 | */ | |
577 | ||
578 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |
579 | { | |
580 | struct inet_sock *inet; | |
581 | struct iphdr *iph = (struct iphdr *)skb->data; | |
582 | struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); | |
583 | const int type = icmp_hdr(skb)->type; | |
584 | const int code = icmp_hdr(skb)->code; | |
585 | struct sock *sk; | |
586 | int harderr; | |
587 | int err; | |
588 | struct net *net = dev_net(skb->dev); | |
589 | ||
590 | sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, | |
591 | iph->saddr, uh->source, skb->dev->ifindex, udptable); | |
592 | if (sk == NULL) { | |
593 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); | |
594 | return; /* No socket for error */ | |
595 | } | |
596 | ||
597 | err = 0; | |
598 | harderr = 0; | |
599 | inet = inet_sk(sk); | |
600 | ||
601 | switch (type) { | |
602 | default: | |
603 | case ICMP_TIME_EXCEEDED: | |
604 | err = EHOSTUNREACH; | |
605 | break; | |
606 | case ICMP_SOURCE_QUENCH: | |
607 | goto out; | |
608 | case ICMP_PARAMETERPROB: | |
609 | err = EPROTO; | |
610 | harderr = 1; | |
611 | break; | |
612 | case ICMP_DEST_UNREACH: | |
613 | if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ | |
614 | if (inet->pmtudisc != IP_PMTUDISC_DONT) { | |
615 | err = EMSGSIZE; | |
616 | harderr = 1; | |
617 | break; | |
618 | } | |
619 | goto out; | |
620 | } | |
621 | err = EHOSTUNREACH; | |
622 | if (code <= NR_ICMP_UNREACH) { | |
623 | harderr = icmp_err_convert[code].fatal; | |
624 | err = icmp_err_convert[code].errno; | |
625 | } | |
626 | break; | |
627 | } | |
628 | ||
629 | /* | |
630 | * RFC1122: OK. Passes ICMP errors back to application, as per | |
631 | * 4.1.3.3. | |
632 | */ | |
633 | if (!inet->recverr) { | |
634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | |
635 | goto out; | |
636 | } else { | |
637 | bh_lock_sock(sk); | |
638 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | |
639 | bh_unlock_sock(sk); | |
640 | } | |
641 | sk->sk_err = err; | |
642 | sk->sk_error_report(sk); | |
643 | out: | |
644 | sock_put(sk); | |
645 | } | |
646 | ||
647 | void udp_err(struct sk_buff *skb, u32 info) | |
648 | { | |
649 | __udp4_lib_err(skb, info, &udp_table); | |
650 | } | |
651 | ||
652 | /* | |
653 | * Throw away all pending data and cancel the corking. Socket is locked. | |
654 | */ | |
655 | void udp_flush_pending_frames(struct sock *sk) | |
656 | { | |
657 | struct udp_sock *up = udp_sk(sk); | |
658 | ||
659 | if (up->pending) { | |
660 | up->len = 0; | |
661 | up->pending = 0; | |
662 | ip_flush_pending_frames(sk); | |
663 | } | |
664 | } | |
665 | EXPORT_SYMBOL(udp_flush_pending_frames); | |
666 | ||
667 | /** | |
668 | * udp4_hwcsum_outgoing - handle outgoing HW checksumming | |
669 | * @sk: socket we are sending on | |
670 | * @skb: sk_buff containing the filled-in UDP header | |
671 | * (checksum field must be zeroed out) | |
672 | */ | |
673 | static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | |
674 | __be32 src, __be32 dst, int len) | |
675 | { | |
676 | unsigned int offset; | |
677 | struct udphdr *uh = udp_hdr(skb); | |
678 | __wsum csum = 0; | |
679 | ||
680 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | |
681 | /* | |
682 | * Only one fragment on the socket. | |
683 | */ | |
684 | skb->csum_start = skb_transport_header(skb) - skb->head; | |
685 | skb->csum_offset = offsetof(struct udphdr, check); | |
686 | uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); | |
687 | } else { | |
688 | /* | |
689 | * HW-checksum won't work as there are two or more | |
690 | * fragments on the socket so that all csums of sk_buffs | |
691 | * should be together | |
692 | */ | |
693 | offset = skb_transport_offset(skb); | |
694 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | |
695 | ||
696 | skb->ip_summed = CHECKSUM_NONE; | |
697 | ||
698 | skb_queue_walk(&sk->sk_write_queue, skb) { | |
699 | csum = csum_add(csum, skb->csum); | |
700 | } | |
701 | ||
702 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); | |
703 | if (uh->check == 0) | |
704 | uh->check = CSUM_MANGLED_0; | |
705 | } | |
706 | } | |
707 | ||
708 | /* | |
709 | * Push out all pending data as one UDP datagram. Socket is locked. | |
710 | */ | |
711 | static int udp_push_pending_frames(struct sock *sk) | |
712 | { | |
713 | struct udp_sock *up = udp_sk(sk); | |
714 | struct inet_sock *inet = inet_sk(sk); | |
715 | struct flowi *fl = &inet->cork.fl; | |
716 | struct sk_buff *skb; | |
717 | struct udphdr *uh; | |
718 | int err = 0; | |
719 | int is_udplite = IS_UDPLITE(sk); | |
720 | __wsum csum = 0; | |
721 | ||
722 | /* Grab the skbuff where UDP header space exists. */ | |
723 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | |
724 | goto out; | |
725 | ||
726 | /* | |
727 | * Create a UDP header | |
728 | */ | |
729 | uh = udp_hdr(skb); | |
730 | uh->source = fl->fl_ip_sport; | |
731 | uh->dest = fl->fl_ip_dport; | |
732 | uh->len = htons(up->len); | |
733 | uh->check = 0; | |
734 | ||
735 | if (is_udplite) /* UDP-Lite */ | |
736 | csum = udplite_csum_outgoing(sk, skb); | |
737 | ||
738 | else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ | |
739 | ||
740 | skb->ip_summed = CHECKSUM_NONE; | |
741 | goto send; | |
742 | ||
743 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ | |
744 | ||
745 | udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len); | |
746 | goto send; | |
747 | ||
748 | } else /* `normal' UDP */ | |
749 | csum = udp_csum_outgoing(sk, skb); | |
750 | ||
751 | /* add protocol-dependent pseudo-header */ | |
752 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, | |
753 | sk->sk_protocol, csum); | |
754 | if (uh->check == 0) | |
755 | uh->check = CSUM_MANGLED_0; | |
756 | ||
757 | send: | |
758 | err = ip_push_pending_frames(sk); | |
759 | if (err) { | |
760 | if (err == -ENOBUFS && !inet->recverr) { | |
761 | UDP_INC_STATS_USER(sock_net(sk), | |
762 | UDP_MIB_SNDBUFERRORS, is_udplite); | |
763 | err = 0; | |
764 | } | |
765 | } else | |
766 | UDP_INC_STATS_USER(sock_net(sk), | |
767 | UDP_MIB_OUTDATAGRAMS, is_udplite); | |
768 | out: | |
769 | up->len = 0; | |
770 | up->pending = 0; | |
771 | return err; | |
772 | } | |
773 | ||
774 | int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
775 | size_t len) | |
776 | { | |
777 | struct inet_sock *inet = inet_sk(sk); | |
778 | struct udp_sock *up = udp_sk(sk); | |
779 | int ulen = len; | |
780 | struct ipcm_cookie ipc; | |
781 | struct rtable *rt = NULL; | |
782 | int free = 0; | |
783 | int connected = 0; | |
784 | __be32 daddr, faddr, saddr; | |
785 | __be16 dport; | |
786 | u8 tos; | |
787 | int err, is_udplite = IS_UDPLITE(sk); | |
788 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | |
789 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); | |
790 | ||
791 | if (len > 0xFFFF) | |
792 | return -EMSGSIZE; | |
793 | ||
794 | /* | |
795 | * Check the flags. | |
796 | */ | |
797 | ||
798 | if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ | |
799 | return -EOPNOTSUPP; | |
800 | ||
801 | ipc.opt = NULL; | |
802 | ipc.shtx.flags = 0; | |
803 | ||
804 | if (up->pending) { | |
805 | /* | |
806 | * There are pending frames. | |
807 | * The socket lock must be held while it's corked. | |
808 | */ | |
809 | lock_sock(sk); | |
810 | if (likely(up->pending)) { | |
811 | if (unlikely(up->pending != AF_INET)) { | |
812 | release_sock(sk); | |
813 | return -EINVAL; | |
814 | } | |
815 | goto do_append_data; | |
816 | } | |
817 | release_sock(sk); | |
818 | } | |
819 | ulen += sizeof(struct udphdr); | |
820 | ||
821 | /* | |
822 | * Get and verify the address. | |
823 | */ | |
824 | if (msg->msg_name) { | |
825 | struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name; | |
826 | if (msg->msg_namelen < sizeof(*usin)) | |
827 | return -EINVAL; | |
828 | if (usin->sin_family != AF_INET) { | |
829 | if (usin->sin_family != AF_UNSPEC) | |
830 | return -EAFNOSUPPORT; | |
831 | } | |
832 | ||
833 | daddr = usin->sin_addr.s_addr; | |
834 | dport = usin->sin_port; | |
835 | if (dport == 0) | |
836 | return -EINVAL; | |
837 | } else { | |
838 | if (sk->sk_state != TCP_ESTABLISHED) | |
839 | return -EDESTADDRREQ; | |
840 | daddr = inet->inet_daddr; | |
841 | dport = inet->inet_dport; | |
842 | /* Open fast path for connected socket. | |
843 | Route will not be used, if at least one option is set. | |
844 | */ | |
845 | connected = 1; | |
846 | } | |
847 | ipc.addr = inet->inet_saddr; | |
848 | ||
849 | ipc.oif = sk->sk_bound_dev_if; | |
850 | err = sock_tx_timestamp(msg, sk, &ipc.shtx); | |
851 | if (err) | |
852 | return err; | |
853 | if (msg->msg_controllen) { | |
854 | err = ip_cmsg_send(sock_net(sk), msg, &ipc); | |
855 | if (err) | |
856 | return err; | |
857 | if (ipc.opt) | |
858 | free = 1; | |
859 | connected = 0; | |
860 | } | |
861 | if (!ipc.opt) | |
862 | ipc.opt = inet->opt; | |
863 | ||
864 | saddr = ipc.addr; | |
865 | ipc.addr = faddr = daddr; | |
866 | ||
867 | if (ipc.opt && ipc.opt->srr) { | |
868 | if (!daddr) | |
869 | return -EINVAL; | |
870 | faddr = ipc.opt->faddr; | |
871 | connected = 0; | |
872 | } | |
873 | tos = RT_TOS(inet->tos); | |
874 | if (sock_flag(sk, SOCK_LOCALROUTE) || | |
875 | (msg->msg_flags & MSG_DONTROUTE) || | |
876 | (ipc.opt && ipc.opt->is_strictroute)) { | |
877 | tos |= RTO_ONLINK; | |
878 | connected = 0; | |
879 | } | |
880 | ||
881 | if (ipv4_is_multicast(daddr)) { | |
882 | if (!ipc.oif) | |
883 | ipc.oif = inet->mc_index; | |
884 | if (!saddr) | |
885 | saddr = inet->mc_addr; | |
886 | connected = 0; | |
887 | } | |
888 | ||
889 | if (connected) | |
890 | rt = (struct rtable *)sk_dst_check(sk, 0); | |
891 | ||
892 | if (rt == NULL) { | |
893 | struct flowi fl = { .oif = ipc.oif, | |
894 | .mark = sk->sk_mark, | |
895 | .nl_u = { .ip4_u = | |
896 | { .daddr = faddr, | |
897 | .saddr = saddr, | |
898 | .tos = tos } }, | |
899 | .proto = sk->sk_protocol, | |
900 | .flags = inet_sk_flowi_flags(sk), | |
901 | .uli_u = { .ports = | |
902 | { .sport = inet->inet_sport, | |
903 | .dport = dport } } }; | |
904 | struct net *net = sock_net(sk); | |
905 | ||
906 | security_sk_classify_flow(sk, &fl); | |
907 | err = ip_route_output_flow(net, &rt, &fl, sk, 1); | |
908 | if (err) { | |
909 | if (err == -ENETUNREACH) | |
910 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | |
911 | goto out; | |
912 | } | |
913 | ||
914 | err = -EACCES; | |
915 | if ((rt->rt_flags & RTCF_BROADCAST) && | |
916 | !sock_flag(sk, SOCK_BROADCAST)) | |
917 | goto out; | |
918 | if (connected) | |
919 | sk_dst_set(sk, dst_clone(&rt->u.dst)); | |
920 | } | |
921 | ||
922 | if (msg->msg_flags&MSG_CONFIRM) | |
923 | goto do_confirm; | |
924 | back_from_confirm: | |
925 | ||
926 | saddr = rt->rt_src; | |
927 | if (!ipc.addr) | |
928 | daddr = ipc.addr = rt->rt_dst; | |
929 | ||
930 | lock_sock(sk); | |
931 | if (unlikely(up->pending)) { | |
932 | /* The socket is already corked while preparing it. */ | |
933 | /* ... which is an evident application bug. --ANK */ | |
934 | release_sock(sk); | |
935 | ||
936 | LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); | |
937 | err = -EINVAL; | |
938 | goto out; | |
939 | } | |
940 | /* | |
941 | * Now cork the socket to pend data. | |
942 | */ | |
943 | inet->cork.fl.fl4_dst = daddr; | |
944 | inet->cork.fl.fl_ip_dport = dport; | |
945 | inet->cork.fl.fl4_src = saddr; | |
946 | inet->cork.fl.fl_ip_sport = inet->inet_sport; | |
947 | up->pending = AF_INET; | |
948 | ||
949 | do_append_data: | |
950 | up->len += ulen; | |
951 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; | |
952 | err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, | |
953 | sizeof(struct udphdr), &ipc, &rt, | |
954 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | |
955 | if (err) | |
956 | udp_flush_pending_frames(sk); | |
957 | else if (!corkreq) | |
958 | err = udp_push_pending_frames(sk); | |
959 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) | |
960 | up->pending = 0; | |
961 | release_sock(sk); | |
962 | ||
963 | out: | |
964 | ip_rt_put(rt); | |
965 | if (free) | |
966 | kfree(ipc.opt); | |
967 | if (!err) | |
968 | return len; | |
969 | /* | |
970 | * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting | |
971 | * ENOBUFS might not be good (it's not tunable per se), but otherwise | |
972 | * we don't have a good statistic (IpOutDiscards but it can be too many | |
973 | * things). We could add another new stat but at least for now that | |
974 | * seems like overkill. | |
975 | */ | |
976 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | |
977 | UDP_INC_STATS_USER(sock_net(sk), | |
978 | UDP_MIB_SNDBUFERRORS, is_udplite); | |
979 | } | |
980 | return err; | |
981 | ||
982 | do_confirm: | |
983 | dst_confirm(&rt->u.dst); | |
984 | if (!(msg->msg_flags&MSG_PROBE) || len) | |
985 | goto back_from_confirm; | |
986 | err = 0; | |
987 | goto out; | |
988 | } | |
989 | EXPORT_SYMBOL(udp_sendmsg); | |
990 | ||
991 | int udp_sendpage(struct sock *sk, struct page *page, int offset, | |
992 | size_t size, int flags) | |
993 | { | |
994 | struct udp_sock *up = udp_sk(sk); | |
995 | int ret; | |
996 | ||
997 | if (!up->pending) { | |
998 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; | |
999 | ||
1000 | /* Call udp_sendmsg to specify destination address which | |
1001 | * sendpage interface can't pass. | |
1002 | * This will succeed only when the socket is connected. | |
1003 | */ | |
1004 | ret = udp_sendmsg(NULL, sk, &msg, 0); | |
1005 | if (ret < 0) | |
1006 | return ret; | |
1007 | } | |
1008 | ||
1009 | lock_sock(sk); | |
1010 | ||
1011 | if (unlikely(!up->pending)) { | |
1012 | release_sock(sk); | |
1013 | ||
1014 | LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); | |
1015 | return -EINVAL; | |
1016 | } | |
1017 | ||
1018 | ret = ip_append_page(sk, page, offset, size, flags); | |
1019 | if (ret == -EOPNOTSUPP) { | |
1020 | release_sock(sk); | |
1021 | return sock_no_sendpage(sk->sk_socket, page, offset, | |
1022 | size, flags); | |
1023 | } | |
1024 | if (ret < 0) { | |
1025 | udp_flush_pending_frames(sk); | |
1026 | goto out; | |
1027 | } | |
1028 | ||
1029 | up->len += size; | |
1030 | if (!(up->corkflag || (flags&MSG_MORE))) | |
1031 | ret = udp_push_pending_frames(sk); | |
1032 | if (!ret) | |
1033 | ret = size; | |
1034 | out: | |
1035 | release_sock(sk); | |
1036 | return ret; | |
1037 | } | |
1038 | ||
1039 | ||
1040 | /** | |
1041 | * first_packet_length - return length of first packet in receive queue | |
1042 | * @sk: socket | |
1043 | * | |
1044 | * Drops all bad checksum frames, until a valid one is found. | |
1045 | * Returns the length of found skb, or 0 if none is found. | |
1046 | */ | |
1047 | static unsigned int first_packet_length(struct sock *sk) | |
1048 | { | |
1049 | struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; | |
1050 | struct sk_buff *skb; | |
1051 | unsigned int res; | |
1052 | ||
1053 | __skb_queue_head_init(&list_kill); | |
1054 | ||
1055 | spin_lock_bh(&rcvq->lock); | |
1056 | while ((skb = skb_peek(rcvq)) != NULL && | |
1057 | udp_lib_checksum_complete(skb)) { | |
1058 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | |
1059 | IS_UDPLITE(sk)); | |
1060 | atomic_inc(&sk->sk_drops); | |
1061 | __skb_unlink(skb, rcvq); | |
1062 | __skb_queue_tail(&list_kill, skb); | |
1063 | } | |
1064 | res = skb ? skb->len : 0; | |
1065 | spin_unlock_bh(&rcvq->lock); | |
1066 | ||
1067 | if (!skb_queue_empty(&list_kill)) { | |
1068 | bool slow = lock_sock_fast(sk); | |
1069 | ||
1070 | __skb_queue_purge(&list_kill); | |
1071 | sk_mem_reclaim_partial(sk); | |
1072 | unlock_sock_fast(sk, slow); | |
1073 | } | |
1074 | return res; | |
1075 | } | |
1076 | ||
1077 | /* | |
1078 | * IOCTL requests applicable to the UDP protocol | |
1079 | */ | |
1080 | ||
1081 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |
1082 | { | |
1083 | switch (cmd) { | |
1084 | case SIOCOUTQ: | |
1085 | { | |
1086 | int amount = sk_wmem_alloc_get(sk); | |
1087 | ||
1088 | return put_user(amount, (int __user *)arg); | |
1089 | } | |
1090 | ||
1091 | case SIOCINQ: | |
1092 | { | |
1093 | unsigned int amount = first_packet_length(sk); | |
1094 | ||
1095 | if (amount) | |
1096 | /* | |
1097 | * We will only return the amount | |
1098 | * of this packet since that is all | |
1099 | * that will be read. | |
1100 | */ | |
1101 | amount -= sizeof(struct udphdr); | |
1102 | ||
1103 | return put_user(amount, (int __user *)arg); | |
1104 | } | |
1105 | ||
1106 | default: | |
1107 | return -ENOIOCTLCMD; | |
1108 | } | |
1109 | ||
1110 | return 0; | |
1111 | } | |
1112 | EXPORT_SYMBOL(udp_ioctl); | |
1113 | ||
1114 | /* | |
1115 | * This should be easy, if there is something there we | |
1116 | * return it, otherwise we block. | |
1117 | */ | |
1118 | ||
1119 | int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
1120 | size_t len, int noblock, int flags, int *addr_len) | |
1121 | { | |
1122 | struct inet_sock *inet = inet_sk(sk); | |
1123 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | |
1124 | struct sk_buff *skb; | |
1125 | unsigned int ulen; | |
1126 | int peeked; | |
1127 | int err; | |
1128 | int is_udplite = IS_UDPLITE(sk); | |
1129 | bool slow; | |
1130 | ||
1131 | /* | |
1132 | * Check any passed addresses | |
1133 | */ | |
1134 | if (addr_len) | |
1135 | *addr_len = sizeof(*sin); | |
1136 | ||
1137 | if (flags & MSG_ERRQUEUE) | |
1138 | return ip_recv_error(sk, msg, len); | |
1139 | ||
1140 | try_again: | |
1141 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | |
1142 | &peeked, &err); | |
1143 | if (!skb) | |
1144 | goto out; | |
1145 | ||
1146 | ulen = skb->len - sizeof(struct udphdr); | |
1147 | if (len > ulen) | |
1148 | len = ulen; | |
1149 | else if (len < ulen) | |
1150 | msg->msg_flags |= MSG_TRUNC; | |
1151 | ||
1152 | /* | |
1153 | * If checksum is needed at all, try to do it while copying the | |
1154 | * data. If the data is truncated, or if we only want a partial | |
1155 | * coverage checksum (UDP-Lite), do it before the copy. | |
1156 | */ | |
1157 | ||
1158 | if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { | |
1159 | if (udp_lib_checksum_complete(skb)) | |
1160 | goto csum_copy_err; | |
1161 | } | |
1162 | ||
1163 | if (skb_csum_unnecessary(skb)) | |
1164 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | |
1165 | msg->msg_iov, len); | |
1166 | else { | |
1167 | err = skb_copy_and_csum_datagram_iovec(skb, | |
1168 | sizeof(struct udphdr), | |
1169 | msg->msg_iov); | |
1170 | ||
1171 | if (err == -EINVAL) | |
1172 | goto csum_copy_err; | |
1173 | } | |
1174 | ||
1175 | if (err) | |
1176 | goto out_free; | |
1177 | ||
1178 | if (!peeked) | |
1179 | UDP_INC_STATS_USER(sock_net(sk), | |
1180 | UDP_MIB_INDATAGRAMS, is_udplite); | |
1181 | ||
1182 | sock_recv_ts_and_drops(msg, sk, skb); | |
1183 | ||
1184 | /* Copy the address. */ | |
1185 | if (sin) { | |
1186 | sin->sin_family = AF_INET; | |
1187 | sin->sin_port = udp_hdr(skb)->source; | |
1188 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | |
1189 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | |
1190 | } | |
1191 | if (inet->cmsg_flags) | |
1192 | ip_cmsg_recv(msg, skb); | |
1193 | ||
1194 | err = len; | |
1195 | if (flags & MSG_TRUNC) | |
1196 | err = ulen; | |
1197 | ||
1198 | out_free: | |
1199 | skb_free_datagram_locked(sk, skb); | |
1200 | out: | |
1201 | return err; | |
1202 | ||
1203 | csum_copy_err: | |
1204 | slow = lock_sock_fast(sk); | |
1205 | if (!skb_kill_datagram(sk, skb, flags)) | |
1206 | UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | |
1207 | unlock_sock_fast(sk, slow); | |
1208 | ||
1209 | if (noblock) | |
1210 | return -EAGAIN; | |
1211 | goto try_again; | |
1212 | } | |
1213 | ||
1214 | ||
1215 | int udp_disconnect(struct sock *sk, int flags) | |
1216 | { | |
1217 | struct inet_sock *inet = inet_sk(sk); | |
1218 | /* | |
1219 | * 1003.1g - break association. | |
1220 | */ | |
1221 | ||
1222 | sk->sk_state = TCP_CLOSE; | |
1223 | inet->inet_daddr = 0; | |
1224 | inet->inet_dport = 0; | |
1225 | sock_rps_save_rxhash(sk, 0); | |
1226 | sk->sk_bound_dev_if = 0; | |
1227 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | |
1228 | inet_reset_saddr(sk); | |
1229 | ||
1230 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { | |
1231 | sk->sk_prot->unhash(sk); | |
1232 | inet->inet_sport = 0; | |
1233 | } | |
1234 | sk_dst_reset(sk); | |
1235 | return 0; | |
1236 | } | |
1237 | EXPORT_SYMBOL(udp_disconnect); | |
1238 | ||
1239 | void udp_lib_unhash(struct sock *sk) | |
1240 | { | |
1241 | if (sk_hashed(sk)) { | |
1242 | struct udp_table *udptable = sk->sk_prot->h.udp_table; | |
1243 | struct udp_hslot *hslot, *hslot2; | |
1244 | ||
1245 | hslot = udp_hashslot(udptable, sock_net(sk), | |
1246 | udp_sk(sk)->udp_port_hash); | |
1247 | hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); | |
1248 | ||
1249 | spin_lock_bh(&hslot->lock); | |
1250 | if (sk_nulls_del_node_init_rcu(sk)) { | |
1251 | hslot->count--; | |
1252 | inet_sk(sk)->inet_num = 0; | |
1253 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | |
1254 | ||
1255 | spin_lock(&hslot2->lock); | |
1256 | hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); | |
1257 | hslot2->count--; | |
1258 | spin_unlock(&hslot2->lock); | |
1259 | } | |
1260 | spin_unlock_bh(&hslot->lock); | |
1261 | } | |
1262 | } | |
1263 | EXPORT_SYMBOL(udp_lib_unhash); | |
1264 | ||
1265 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |
1266 | { | |
1267 | int rc; | |
1268 | ||
1269 | if (inet_sk(sk)->inet_daddr) | |
1270 | sock_rps_save_rxhash(sk, skb->rxhash); | |
1271 | ||
1272 | rc = ip_queue_rcv_skb(sk, skb); | |
1273 | if (rc < 0) { | |
1274 | int is_udplite = IS_UDPLITE(sk); | |
1275 | ||
1276 | /* Note that an ENOMEM error is charged twice */ | |
1277 | if (rc == -ENOMEM) | |
1278 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | |
1279 | is_udplite); | |
1280 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | |
1281 | kfree_skb(skb); | |
1282 | return -1; | |
1283 | } | |
1284 | ||
1285 | return 0; | |
1286 | ||
1287 | } | |
1288 | ||
1289 | /* returns: | |
1290 | * -1: error | |
1291 | * 0: success | |
1292 | * >0: "udp encap" protocol resubmission | |
1293 | * | |
1294 | * Note that in the success and error cases, the skb is assumed to | |
1295 | * have either been requeued or freed. | |
1296 | */ | |
1297 | int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |
1298 | { | |
1299 | struct udp_sock *up = udp_sk(sk); | |
1300 | int rc; | |
1301 | int is_udplite = IS_UDPLITE(sk); | |
1302 | ||
1303 | /* | |
1304 | * Charge it to the socket, dropping if the queue is full. | |
1305 | */ | |
1306 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | |
1307 | goto drop; | |
1308 | nf_reset(skb); | |
1309 | ||
1310 | if (up->encap_type) { | |
1311 | /* | |
1312 | * This is an encapsulation socket so pass the skb to | |
1313 | * the socket's udp_encap_rcv() hook. Otherwise, just | |
1314 | * fall through and pass this up the UDP socket. | |
1315 | * up->encap_rcv() returns the following value: | |
1316 | * =0 if skb was successfully passed to the encap | |
1317 | * handler or was discarded by it. | |
1318 | * >0 if skb should be passed on to UDP. | |
1319 | * <0 if skb should be resubmitted as proto -N | |
1320 | */ | |
1321 | ||
1322 | /* if we're overly short, let UDP handle it */ | |
1323 | if (skb->len > sizeof(struct udphdr) && | |
1324 | up->encap_rcv != NULL) { | |
1325 | int ret; | |
1326 | ||
1327 | ret = (*up->encap_rcv)(sk, skb); | |
1328 | if (ret <= 0) { | |
1329 | UDP_INC_STATS_BH(sock_net(sk), | |
1330 | UDP_MIB_INDATAGRAMS, | |
1331 | is_udplite); | |
1332 | return -ret; | |
1333 | } | |
1334 | } | |
1335 | ||
1336 | /* FALLTHROUGH -- it's a UDP Packet */ | |
1337 | } | |
1338 | ||
1339 | /* | |
1340 | * UDP-Lite specific tests, ignored on UDP sockets | |
1341 | */ | |
1342 | if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { | |
1343 | ||
1344 | /* | |
1345 | * MIB statistics other than incrementing the error count are | |
1346 | * disabled for the following two types of errors: these depend | |
1347 | * on the application settings, not on the functioning of the | |
1348 | * protocol stack as such. | |
1349 | * | |
1350 | * RFC 3828 here recommends (sec 3.3): "There should also be a | |
1351 | * way ... to ... at least let the receiving application block | |
1352 | * delivery of packets with coverage values less than a value | |
1353 | * provided by the application." | |
1354 | */ | |
1355 | if (up->pcrlen == 0) { /* full coverage was set */ | |
1356 | LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " | |
1357 | "%d while full coverage %d requested\n", | |
1358 | UDP_SKB_CB(skb)->cscov, skb->len); | |
1359 | goto drop; | |
1360 | } | |
1361 | /* The next case involves violating the min. coverage requested | |
1362 | * by the receiver. This is subtle: if receiver wants x and x is | |
1363 | * greater than the buffersize/MTU then receiver will complain | |
1364 | * that it wants x while sender emits packets of smaller size y. | |
1365 | * Therefore the above ...()->partial_cov statement is essential. | |
1366 | */ | |
1367 | if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { | |
1368 | LIMIT_NETDEBUG(KERN_WARNING | |
1369 | "UDPLITE: coverage %d too small, need min %d\n", | |
1370 | UDP_SKB_CB(skb)->cscov, up->pcrlen); | |
1371 | goto drop; | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | if (sk->sk_filter) { | |
1376 | if (udp_lib_checksum_complete(skb)) | |
1377 | goto drop; | |
1378 | } | |
1379 | ||
1380 | ||
1381 | if (sk_rcvqueues_full(sk, skb)) | |
1382 | goto drop; | |
1383 | ||
1384 | rc = 0; | |
1385 | ||
1386 | bh_lock_sock(sk); | |
1387 | if (!sock_owned_by_user(sk)) | |
1388 | rc = __udp_queue_rcv_skb(sk, skb); | |
1389 | else if (sk_add_backlog(sk, skb)) { | |
1390 | bh_unlock_sock(sk); | |
1391 | goto drop; | |
1392 | } | |
1393 | bh_unlock_sock(sk); | |
1394 | ||
1395 | return rc; | |
1396 | ||
1397 | drop: | |
1398 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); | |
1399 | atomic_inc(&sk->sk_drops); | |
1400 | kfree_skb(skb); | |
1401 | return -1; | |
1402 | } | |
1403 | ||
1404 | ||
1405 | static void flush_stack(struct sock **stack, unsigned int count, | |
1406 | struct sk_buff *skb, unsigned int final) | |
1407 | { | |
1408 | unsigned int i; | |
1409 | struct sk_buff *skb1 = NULL; | |
1410 | struct sock *sk; | |
1411 | ||
1412 | for (i = 0; i < count; i++) { | |
1413 | sk = stack[i]; | |
1414 | if (likely(skb1 == NULL)) | |
1415 | skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); | |
1416 | ||
1417 | if (!skb1) { | |
1418 | atomic_inc(&sk->sk_drops); | |
1419 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, | |
1420 | IS_UDPLITE(sk)); | |
1421 | UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, | |
1422 | IS_UDPLITE(sk)); | |
1423 | } | |
1424 | ||
1425 | if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) | |
1426 | skb1 = NULL; | |
1427 | } | |
1428 | if (unlikely(skb1)) | |
1429 | kfree_skb(skb1); | |
1430 | } | |
1431 | ||
1432 | /* | |
1433 | * Multicasts and broadcasts go to each listener. | |
1434 | * | |
1435 | * Note: called only from the BH handler context. | |
1436 | */ | |
1437 | static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |
1438 | struct udphdr *uh, | |
1439 | __be32 saddr, __be32 daddr, | |
1440 | struct udp_table *udptable) | |
1441 | { | |
1442 | struct sock *sk, *stack[256 / sizeof(struct sock *)]; | |
1443 | struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); | |
1444 | int dif; | |
1445 | unsigned int i, count = 0; | |
1446 | ||
1447 | spin_lock(&hslot->lock); | |
1448 | sk = sk_nulls_head(&hslot->head); | |
1449 | dif = skb->dev->ifindex; | |
1450 | sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); | |
1451 | while (sk) { | |
1452 | stack[count++] = sk; | |
1453 | sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, | |
1454 | daddr, uh->source, saddr, dif); | |
1455 | if (unlikely(count == ARRAY_SIZE(stack))) { | |
1456 | if (!sk) | |
1457 | break; | |
1458 | flush_stack(stack, count, skb, ~0); | |
1459 | count = 0; | |
1460 | } | |
1461 | } | |
1462 | /* | |
1463 | * before releasing chain lock, we must take a reference on sockets | |
1464 | */ | |
1465 | for (i = 0; i < count; i++) | |
1466 | sock_hold(stack[i]); | |
1467 | ||
1468 | spin_unlock(&hslot->lock); | |
1469 | ||
1470 | /* | |
1471 | * do the slow work with no lock held | |
1472 | */ | |
1473 | if (count) { | |
1474 | flush_stack(stack, count, skb, count - 1); | |
1475 | ||
1476 | for (i = 0; i < count; i++) | |
1477 | sock_put(stack[i]); | |
1478 | } else { | |
1479 | kfree_skb(skb); | |
1480 | } | |
1481 | return 0; | |
1482 | } | |
1483 | ||
1484 | /* Initialize UDP checksum. If exited with zero value (success), | |
1485 | * CHECKSUM_UNNECESSARY means, that no more checks are required. | |
1486 | * Otherwise, csum completion requires chacksumming packet body, | |
1487 | * including udp header and folding it to skb->csum. | |
1488 | */ | |
1489 | static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, | |
1490 | int proto) | |
1491 | { | |
1492 | const struct iphdr *iph; | |
1493 | int err; | |
1494 | ||
1495 | UDP_SKB_CB(skb)->partial_cov = 0; | |
1496 | UDP_SKB_CB(skb)->cscov = skb->len; | |
1497 | ||
1498 | if (proto == IPPROTO_UDPLITE) { | |
1499 | err = udplite_checksum_init(skb, uh); | |
1500 | if (err) | |
1501 | return err; | |
1502 | } | |
1503 | ||
1504 | iph = ip_hdr(skb); | |
1505 | if (uh->check == 0) { | |
1506 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1507 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { | |
1508 | if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, | |
1509 | proto, skb->csum)) | |
1510 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1511 | } | |
1512 | if (!skb_csum_unnecessary(skb)) | |
1513 | skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, | |
1514 | skb->len, proto, 0); | |
1515 | /* Probably, we should checksum udp header (it should be in cache | |
1516 | * in any case) and data in tiny packets (< rx copybreak). | |
1517 | */ | |
1518 | ||
1519 | return 0; | |
1520 | } | |
1521 | ||
1522 | /* | |
1523 | * All we need to do is get the socket, and then do a checksum. | |
1524 | */ | |
1525 | ||
1526 | int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |
1527 | int proto) | |
1528 | { | |
1529 | struct sock *sk; | |
1530 | struct udphdr *uh; | |
1531 | unsigned short ulen; | |
1532 | struct rtable *rt = skb_rtable(skb); | |
1533 | __be32 saddr, daddr; | |
1534 | struct net *net = dev_net(skb->dev); | |
1535 | ||
1536 | /* | |
1537 | * Validate the packet. | |
1538 | */ | |
1539 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | |
1540 | goto drop; /* No space for header. */ | |
1541 | ||
1542 | uh = udp_hdr(skb); | |
1543 | ulen = ntohs(uh->len); | |
1544 | saddr = ip_hdr(skb)->saddr; | |
1545 | daddr = ip_hdr(skb)->daddr; | |
1546 | ||
1547 | if (ulen > skb->len) | |
1548 | goto short_packet; | |
1549 | ||
1550 | if (proto == IPPROTO_UDP) { | |
1551 | /* UDP validates ulen. */ | |
1552 | if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) | |
1553 | goto short_packet; | |
1554 | uh = udp_hdr(skb); | |
1555 | } | |
1556 | ||
1557 | if (udp4_csum_init(skb, uh, proto)) | |
1558 | goto csum_error; | |
1559 | ||
1560 | if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | |
1561 | return __udp4_lib_mcast_deliver(net, skb, uh, | |
1562 | saddr, daddr, udptable); | |
1563 | ||
1564 | sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); | |
1565 | ||
1566 | if (sk != NULL) { | |
1567 | int ret = udp_queue_rcv_skb(sk, skb); | |
1568 | sock_put(sk); | |
1569 | ||
1570 | /* a return value > 0 means to resubmit the input, but | |
1571 | * it wants the return to be -protocol, or 0 | |
1572 | */ | |
1573 | if (ret > 0) | |
1574 | return -ret; | |
1575 | return 0; | |
1576 | } | |
1577 | ||
1578 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | |
1579 | goto drop; | |
1580 | nf_reset(skb); | |
1581 | ||
1582 | /* No socket. Drop packet silently, if checksum is wrong */ | |
1583 | if (udp_lib_checksum_complete(skb)) | |
1584 | goto csum_error; | |
1585 | ||
1586 | UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); | |
1587 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | |
1588 | ||
1589 | /* | |
1590 | * Hmm. We got an UDP packet to a port to which we | |
1591 | * don't wanna listen. Ignore it. | |
1592 | */ | |
1593 | kfree_skb(skb); | |
1594 | return 0; | |
1595 | ||
1596 | short_packet: | |
1597 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", | |
1598 | proto == IPPROTO_UDPLITE ? "-Lite" : "", | |
1599 | &saddr, | |
1600 | ntohs(uh->source), | |
1601 | ulen, | |
1602 | skb->len, | |
1603 | &daddr, | |
1604 | ntohs(uh->dest)); | |
1605 | goto drop; | |
1606 | ||
1607 | csum_error: | |
1608 | /* | |
1609 | * RFC1122: OK. Discards the bad packet silently (as far as | |
1610 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). | |
1611 | */ | |
1612 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", | |
1613 | proto == IPPROTO_UDPLITE ? "-Lite" : "", | |
1614 | &saddr, | |
1615 | ntohs(uh->source), | |
1616 | &daddr, | |
1617 | ntohs(uh->dest), | |
1618 | ulen); | |
1619 | drop: | |
1620 | UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); | |
1621 | kfree_skb(skb); | |
1622 | return 0; | |
1623 | } | |
1624 | ||
1625 | int udp_rcv(struct sk_buff *skb) | |
1626 | { | |
1627 | return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); | |
1628 | } | |
1629 | ||
1630 | void udp_destroy_sock(struct sock *sk) | |
1631 | { | |
1632 | bool slow = lock_sock_fast(sk); | |
1633 | udp_flush_pending_frames(sk); | |
1634 | unlock_sock_fast(sk, slow); | |
1635 | } | |
1636 | ||
1637 | /* | |
1638 | * Socket option code for UDP | |
1639 | */ | |
1640 | int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |
1641 | char __user *optval, unsigned int optlen, | |
1642 | int (*push_pending_frames)(struct sock *)) | |
1643 | { | |
1644 | struct udp_sock *up = udp_sk(sk); | |
1645 | int val; | |
1646 | int err = 0; | |
1647 | int is_udplite = IS_UDPLITE(sk); | |
1648 | ||
1649 | if (optlen < sizeof(int)) | |
1650 | return -EINVAL; | |
1651 | ||
1652 | if (get_user(val, (int __user *)optval)) | |
1653 | return -EFAULT; | |
1654 | ||
1655 | switch (optname) { | |
1656 | case UDP_CORK: | |
1657 | if (val != 0) { | |
1658 | up->corkflag = 1; | |
1659 | } else { | |
1660 | up->corkflag = 0; | |
1661 | lock_sock(sk); | |
1662 | (*push_pending_frames)(sk); | |
1663 | release_sock(sk); | |
1664 | } | |
1665 | break; | |
1666 | ||
1667 | case UDP_ENCAP: | |
1668 | switch (val) { | |
1669 | case 0: | |
1670 | case UDP_ENCAP_ESPINUDP: | |
1671 | case UDP_ENCAP_ESPINUDP_NON_IKE: | |
1672 | up->encap_rcv = xfrm4_udp_encap_rcv; | |
1673 | /* FALLTHROUGH */ | |
1674 | case UDP_ENCAP_L2TPINUDP: | |
1675 | up->encap_type = val; | |
1676 | break; | |
1677 | default: | |
1678 | err = -ENOPROTOOPT; | |
1679 | break; | |
1680 | } | |
1681 | break; | |
1682 | ||
1683 | /* | |
1684 | * UDP-Lite's partial checksum coverage (RFC 3828). | |
1685 | */ | |
1686 | /* The sender sets actual checksum coverage length via this option. | |
1687 | * The case coverage > packet length is handled by send module. */ | |
1688 | case UDPLITE_SEND_CSCOV: | |
1689 | if (!is_udplite) /* Disable the option on UDP sockets */ | |
1690 | return -ENOPROTOOPT; | |
1691 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ | |
1692 | val = 8; | |
1693 | else if (val > USHRT_MAX) | |
1694 | val = USHRT_MAX; | |
1695 | up->pcslen = val; | |
1696 | up->pcflag |= UDPLITE_SEND_CC; | |
1697 | break; | |
1698 | ||
1699 | /* The receiver specifies a minimum checksum coverage value. To make | |
1700 | * sense, this should be set to at least 8 (as done below). If zero is | |
1701 | * used, this again means full checksum coverage. */ | |
1702 | case UDPLITE_RECV_CSCOV: | |
1703 | if (!is_udplite) /* Disable the option on UDP sockets */ | |
1704 | return -ENOPROTOOPT; | |
1705 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ | |
1706 | val = 8; | |
1707 | else if (val > USHRT_MAX) | |
1708 | val = USHRT_MAX; | |
1709 | up->pcrlen = val; | |
1710 | up->pcflag |= UDPLITE_RECV_CC; | |
1711 | break; | |
1712 | ||
1713 | default: | |
1714 | err = -ENOPROTOOPT; | |
1715 | break; | |
1716 | } | |
1717 | ||
1718 | return err; | |
1719 | } | |
1720 | EXPORT_SYMBOL(udp_lib_setsockopt); | |
1721 | ||
1722 | int udp_setsockopt(struct sock *sk, int level, int optname, | |
1723 | char __user *optval, unsigned int optlen) | |
1724 | { | |
1725 | if (level == SOL_UDP || level == SOL_UDPLITE) | |
1726 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, | |
1727 | udp_push_pending_frames); | |
1728 | return ip_setsockopt(sk, level, optname, optval, optlen); | |
1729 | } | |
1730 | ||
1731 | #ifdef CONFIG_COMPAT | |
1732 | int compat_udp_setsockopt(struct sock *sk, int level, int optname, | |
1733 | char __user *optval, unsigned int optlen) | |
1734 | { | |
1735 | if (level == SOL_UDP || level == SOL_UDPLITE) | |
1736 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, | |
1737 | udp_push_pending_frames); | |
1738 | return compat_ip_setsockopt(sk, level, optname, optval, optlen); | |
1739 | } | |
1740 | #endif | |
1741 | ||
1742 | int udp_lib_getsockopt(struct sock *sk, int level, int optname, | |
1743 | char __user *optval, int __user *optlen) | |
1744 | { | |
1745 | struct udp_sock *up = udp_sk(sk); | |
1746 | int val, len; | |
1747 | ||
1748 | if (get_user(len, optlen)) | |
1749 | return -EFAULT; | |
1750 | ||
1751 | len = min_t(unsigned int, len, sizeof(int)); | |
1752 | ||
1753 | if (len < 0) | |
1754 | return -EINVAL; | |
1755 | ||
1756 | switch (optname) { | |
1757 | case UDP_CORK: | |
1758 | val = up->corkflag; | |
1759 | break; | |
1760 | ||
1761 | case UDP_ENCAP: | |
1762 | val = up->encap_type; | |
1763 | break; | |
1764 | ||
1765 | /* The following two cannot be changed on UDP sockets, the return is | |
1766 | * always 0 (which corresponds to the full checksum coverage of UDP). */ | |
1767 | case UDPLITE_SEND_CSCOV: | |
1768 | val = up->pcslen; | |
1769 | break; | |
1770 | ||
1771 | case UDPLITE_RECV_CSCOV: | |
1772 | val = up->pcrlen; | |
1773 | break; | |
1774 | ||
1775 | default: | |
1776 | return -ENOPROTOOPT; | |
1777 | } | |
1778 | ||
1779 | if (put_user(len, optlen)) | |
1780 | return -EFAULT; | |
1781 | if (copy_to_user(optval, &val, len)) | |
1782 | return -EFAULT; | |
1783 | return 0; | |
1784 | } | |
1785 | EXPORT_SYMBOL(udp_lib_getsockopt); | |
1786 | ||
1787 | int udp_getsockopt(struct sock *sk, int level, int optname, | |
1788 | char __user *optval, int __user *optlen) | |
1789 | { | |
1790 | if (level == SOL_UDP || level == SOL_UDPLITE) | |
1791 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); | |
1792 | return ip_getsockopt(sk, level, optname, optval, optlen); | |
1793 | } | |
1794 | ||
1795 | #ifdef CONFIG_COMPAT | |
1796 | int compat_udp_getsockopt(struct sock *sk, int level, int optname, | |
1797 | char __user *optval, int __user *optlen) | |
1798 | { | |
1799 | if (level == SOL_UDP || level == SOL_UDPLITE) | |
1800 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); | |
1801 | return compat_ip_getsockopt(sk, level, optname, optval, optlen); | |
1802 | } | |
1803 | #endif | |
1804 | /** | |
1805 | * udp_poll - wait for a UDP event. | |
1806 | * @file - file struct | |
1807 | * @sock - socket | |
1808 | * @wait - poll table | |
1809 | * | |
1810 | * This is same as datagram poll, except for the special case of | |
1811 | * blocking sockets. If application is using a blocking fd | |
1812 | * and a packet with checksum error is in the queue; | |
1813 | * then it could get return from select indicating data available | |
1814 | * but then block when reading it. Add special case code | |
1815 | * to work around these arguably broken applications. | |
1816 | */ | |
1817 | unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |
1818 | { | |
1819 | unsigned int mask = datagram_poll(file, sock, wait); | |
1820 | struct sock *sk = sock->sk; | |
1821 | ||
1822 | /* Check for false positives due to checksum errors */ | |
1823 | if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && | |
1824 | !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) | |
1825 | mask &= ~(POLLIN | POLLRDNORM); | |
1826 | ||
1827 | return mask; | |
1828 | ||
1829 | } | |
1830 | EXPORT_SYMBOL(udp_poll); | |
1831 | ||
1832 | struct proto udp_prot = { | |
1833 | .name = "UDP", | |
1834 | .owner = THIS_MODULE, | |
1835 | .close = udp_lib_close, | |
1836 | .connect = ip4_datagram_connect, | |
1837 | .disconnect = udp_disconnect, | |
1838 | .ioctl = udp_ioctl, | |
1839 | .destroy = udp_destroy_sock, | |
1840 | .setsockopt = udp_setsockopt, | |
1841 | .getsockopt = udp_getsockopt, | |
1842 | .sendmsg = udp_sendmsg, | |
1843 | .recvmsg = udp_recvmsg, | |
1844 | .sendpage = udp_sendpage, | |
1845 | .backlog_rcv = __udp_queue_rcv_skb, | |
1846 | .hash = udp_lib_hash, | |
1847 | .unhash = udp_lib_unhash, | |
1848 | .get_port = udp_v4_get_port, | |
1849 | .memory_allocated = &udp_memory_allocated, | |
1850 | .sysctl_mem = sysctl_udp_mem, | |
1851 | .sysctl_wmem = &sysctl_udp_wmem_min, | |
1852 | .sysctl_rmem = &sysctl_udp_rmem_min, | |
1853 | .obj_size = sizeof(struct udp_sock), | |
1854 | .slab_flags = SLAB_DESTROY_BY_RCU, | |
1855 | .h.udp_table = &udp_table, | |
1856 | #ifdef CONFIG_COMPAT | |
1857 | .compat_setsockopt = compat_udp_setsockopt, | |
1858 | .compat_getsockopt = compat_udp_getsockopt, | |
1859 | #endif | |
1860 | }; | |
1861 | EXPORT_SYMBOL(udp_prot); | |
1862 | ||
1863 | /* ------------------------------------------------------------------------ */ | |
1864 | #ifdef CONFIG_PROC_FS | |
1865 | ||
1866 | static struct sock *udp_get_first(struct seq_file *seq, int start) | |
1867 | { | |
1868 | struct sock *sk; | |
1869 | struct udp_iter_state *state = seq->private; | |
1870 | struct net *net = seq_file_net(seq); | |
1871 | ||
1872 | for (state->bucket = start; state->bucket <= state->udp_table->mask; | |
1873 | ++state->bucket) { | |
1874 | struct hlist_nulls_node *node; | |
1875 | struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; | |
1876 | ||
1877 | if (hlist_nulls_empty(&hslot->head)) | |
1878 | continue; | |
1879 | ||
1880 | spin_lock_bh(&hslot->lock); | |
1881 | sk_nulls_for_each(sk, node, &hslot->head) { | |
1882 | if (!net_eq(sock_net(sk), net)) | |
1883 | continue; | |
1884 | if (sk->sk_family == state->family) | |
1885 | goto found; | |
1886 | } | |
1887 | spin_unlock_bh(&hslot->lock); | |
1888 | } | |
1889 | sk = NULL; | |
1890 | found: | |
1891 | return sk; | |
1892 | } | |
1893 | ||
1894 | static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) | |
1895 | { | |
1896 | struct udp_iter_state *state = seq->private; | |
1897 | struct net *net = seq_file_net(seq); | |
1898 | ||
1899 | do { | |
1900 | sk = sk_nulls_next(sk); | |
1901 | } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); | |
1902 | ||
1903 | if (!sk) { | |
1904 | if (state->bucket <= state->udp_table->mask) | |
1905 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | |
1906 | return udp_get_first(seq, state->bucket + 1); | |
1907 | } | |
1908 | return sk; | |
1909 | } | |
1910 | ||
1911 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) | |
1912 | { | |
1913 | struct sock *sk = udp_get_first(seq, 0); | |
1914 | ||
1915 | if (sk) | |
1916 | while (pos && (sk = udp_get_next(seq, sk)) != NULL) | |
1917 | --pos; | |
1918 | return pos ? NULL : sk; | |
1919 | } | |
1920 | ||
1921 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) | |
1922 | { | |
1923 | struct udp_iter_state *state = seq->private; | |
1924 | state->bucket = MAX_UDP_PORTS; | |
1925 | ||
1926 | return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; | |
1927 | } | |
1928 | ||
1929 | static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1930 | { | |
1931 | struct sock *sk; | |
1932 | ||
1933 | if (v == SEQ_START_TOKEN) | |
1934 | sk = udp_get_idx(seq, 0); | |
1935 | else | |
1936 | sk = udp_get_next(seq, v); | |
1937 | ||
1938 | ++*pos; | |
1939 | return sk; | |
1940 | } | |
1941 | ||
1942 | static void udp_seq_stop(struct seq_file *seq, void *v) | |
1943 | { | |
1944 | struct udp_iter_state *state = seq->private; | |
1945 | ||
1946 | if (state->bucket <= state->udp_table->mask) | |
1947 | spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); | |
1948 | } | |
1949 | ||
1950 | static int udp_seq_open(struct inode *inode, struct file *file) | |
1951 | { | |
1952 | struct udp_seq_afinfo *afinfo = PDE(inode)->data; | |
1953 | struct udp_iter_state *s; | |
1954 | int err; | |
1955 | ||
1956 | err = seq_open_net(inode, file, &afinfo->seq_ops, | |
1957 | sizeof(struct udp_iter_state)); | |
1958 | if (err < 0) | |
1959 | return err; | |
1960 | ||
1961 | s = ((struct seq_file *)file->private_data)->private; | |
1962 | s->family = afinfo->family; | |
1963 | s->udp_table = afinfo->udp_table; | |
1964 | return err; | |
1965 | } | |
1966 | ||
1967 | /* ------------------------------------------------------------------------ */ | |
1968 | int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) | |
1969 | { | |
1970 | struct proc_dir_entry *p; | |
1971 | int rc = 0; | |
1972 | ||
1973 | afinfo->seq_fops.open = udp_seq_open; | |
1974 | afinfo->seq_fops.read = seq_read; | |
1975 | afinfo->seq_fops.llseek = seq_lseek; | |
1976 | afinfo->seq_fops.release = seq_release_net; | |
1977 | ||
1978 | afinfo->seq_ops.start = udp_seq_start; | |
1979 | afinfo->seq_ops.next = udp_seq_next; | |
1980 | afinfo->seq_ops.stop = udp_seq_stop; | |
1981 | ||
1982 | p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, | |
1983 | &afinfo->seq_fops, afinfo); | |
1984 | if (!p) | |
1985 | rc = -ENOMEM; | |
1986 | return rc; | |
1987 | } | |
1988 | EXPORT_SYMBOL(udp_proc_register); | |
1989 | ||
1990 | void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) | |
1991 | { | |
1992 | proc_net_remove(net, afinfo->name); | |
1993 | } | |
1994 | EXPORT_SYMBOL(udp_proc_unregister); | |
1995 | ||
1996 | /* ------------------------------------------------------------------------ */ | |
1997 | static void udp4_format_sock(struct sock *sp, struct seq_file *f, | |
1998 | int bucket, int *len) | |
1999 | { | |
2000 | struct inet_sock *inet = inet_sk(sp); | |
2001 | __be32 dest = inet->inet_daddr; | |
2002 | __be32 src = inet->inet_rcv_saddr; | |
2003 | __u16 destp = ntohs(inet->inet_dport); | |
2004 | __u16 srcp = ntohs(inet->inet_sport); | |
2005 | ||
2006 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" | |
2007 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", | |
2008 | bucket, src, srcp, dest, destp, sp->sk_state, | |
2009 | sk_wmem_alloc_get(sp), | |
2010 | sk_rmem_alloc_get(sp), | |
2011 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | |
2012 | atomic_read(&sp->sk_refcnt), sp, | |
2013 | atomic_read(&sp->sk_drops), len); | |
2014 | } | |
2015 | ||
2016 | int udp4_seq_show(struct seq_file *seq, void *v) | |
2017 | { | |
2018 | if (v == SEQ_START_TOKEN) | |
2019 | seq_printf(seq, "%-127s\n", | |
2020 | " sl local_address rem_address st tx_queue " | |
2021 | "rx_queue tr tm->when retrnsmt uid timeout " | |
2022 | "inode ref pointer drops"); | |
2023 | else { | |
2024 | struct udp_iter_state *state = seq->private; | |
2025 | int len; | |
2026 | ||
2027 | udp4_format_sock(v, seq, state->bucket, &len); | |
2028 | seq_printf(seq, "%*s\n", 127 - len, ""); | |
2029 | } | |
2030 | return 0; | |
2031 | } | |
2032 | ||
2033 | /* ------------------------------------------------------------------------ */ | |
2034 | static struct udp_seq_afinfo udp4_seq_afinfo = { | |
2035 | .name = "udp", | |
2036 | .family = AF_INET, | |
2037 | .udp_table = &udp_table, | |
2038 | .seq_fops = { | |
2039 | .owner = THIS_MODULE, | |
2040 | }, | |
2041 | .seq_ops = { | |
2042 | .show = udp4_seq_show, | |
2043 | }, | |
2044 | }; | |
2045 | ||
2046 | static int __net_init udp4_proc_init_net(struct net *net) | |
2047 | { | |
2048 | return udp_proc_register(net, &udp4_seq_afinfo); | |
2049 | } | |
2050 | ||
2051 | static void __net_exit udp4_proc_exit_net(struct net *net) | |
2052 | { | |
2053 | udp_proc_unregister(net, &udp4_seq_afinfo); | |
2054 | } | |
2055 | ||
2056 | static struct pernet_operations udp4_net_ops = { | |
2057 | .init = udp4_proc_init_net, | |
2058 | .exit = udp4_proc_exit_net, | |
2059 | }; | |
2060 | ||
2061 | int __init udp4_proc_init(void) | |
2062 | { | |
2063 | return register_pernet_subsys(&udp4_net_ops); | |
2064 | } | |
2065 | ||
2066 | void udp4_proc_exit(void) | |
2067 | { | |
2068 | unregister_pernet_subsys(&udp4_net_ops); | |
2069 | } | |
2070 | #endif /* CONFIG_PROC_FS */ | |
2071 | ||
2072 | static __initdata unsigned long uhash_entries; | |
2073 | static int __init set_uhash_entries(char *str) | |
2074 | { | |
2075 | if (!str) | |
2076 | return 0; | |
2077 | uhash_entries = simple_strtoul(str, &str, 0); | |
2078 | if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) | |
2079 | uhash_entries = UDP_HTABLE_SIZE_MIN; | |
2080 | return 1; | |
2081 | } | |
2082 | __setup("uhash_entries=", set_uhash_entries); | |
2083 | ||
2084 | void __init udp_table_init(struct udp_table *table, const char *name) | |
2085 | { | |
2086 | unsigned int i; | |
2087 | ||
2088 | if (!CONFIG_BASE_SMALL) | |
2089 | table->hash = alloc_large_system_hash(name, | |
2090 | 2 * sizeof(struct udp_hslot), | |
2091 | uhash_entries, | |
2092 | 21, /* one slot per 2 MB */ | |
2093 | 0, | |
2094 | &table->log, | |
2095 | &table->mask, | |
2096 | 64 * 1024); | |
2097 | /* | |
2098 | * Make sure hash table has the minimum size | |
2099 | */ | |
2100 | if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) { | |
2101 | table->hash = kmalloc(UDP_HTABLE_SIZE_MIN * | |
2102 | 2 * sizeof(struct udp_hslot), GFP_KERNEL); | |
2103 | if (!table->hash) | |
2104 | panic(name); | |
2105 | table->log = ilog2(UDP_HTABLE_SIZE_MIN); | |
2106 | table->mask = UDP_HTABLE_SIZE_MIN - 1; | |
2107 | } | |
2108 | table->hash2 = table->hash + (table->mask + 1); | |
2109 | for (i = 0; i <= table->mask; i++) { | |
2110 | INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); | |
2111 | table->hash[i].count = 0; | |
2112 | spin_lock_init(&table->hash[i].lock); | |
2113 | } | |
2114 | for (i = 0; i <= table->mask; i++) { | |
2115 | INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i); | |
2116 | table->hash2[i].count = 0; | |
2117 | spin_lock_init(&table->hash2[i].lock); | |
2118 | } | |
2119 | } | |
2120 | ||
2121 | void __init udp_init(void) | |
2122 | { | |
2123 | unsigned long nr_pages, limit; | |
2124 | ||
2125 | udp_table_init(&udp_table, "UDP"); | |
2126 | /* Set the pressure threshold up by the same strategy of TCP. It is a | |
2127 | * fraction of global memory that is up to 1/2 at 256 MB, decreasing | |
2128 | * toward zero with the amount of memory, with a floor of 128 pages. | |
2129 | */ | |
2130 | nr_pages = totalram_pages - totalhigh_pages; | |
2131 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | |
2132 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | |
2133 | limit = max(limit, 128UL); | |
2134 | sysctl_udp_mem[0] = limit / 4 * 3; | |
2135 | sysctl_udp_mem[1] = limit; | |
2136 | sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; | |
2137 | ||
2138 | sysctl_udp_rmem_min = SK_MEM_QUANTUM; | |
2139 | sysctl_udp_wmem_min = SK_MEM_QUANTUM; | |
2140 | } | |
2141 | ||
2142 | int udp4_ufo_send_check(struct sk_buff *skb) | |
2143 | { | |
2144 | const struct iphdr *iph; | |
2145 | struct udphdr *uh; | |
2146 | ||
2147 | if (!pskb_may_pull(skb, sizeof(*uh))) | |
2148 | return -EINVAL; | |
2149 | ||
2150 | iph = ip_hdr(skb); | |
2151 | uh = udp_hdr(skb); | |
2152 | ||
2153 | uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, | |
2154 | IPPROTO_UDP, 0); | |
2155 | skb->csum_start = skb_transport_header(skb) - skb->head; | |
2156 | skb->csum_offset = offsetof(struct udphdr, check); | |
2157 | skb->ip_summed = CHECKSUM_PARTIAL; | |
2158 | return 0; | |
2159 | } | |
2160 | ||
2161 | struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features) | |
2162 | { | |
2163 | struct sk_buff *segs = ERR_PTR(-EINVAL); | |
2164 | unsigned int mss; | |
2165 | int offset; | |
2166 | __wsum csum; | |
2167 | ||
2168 | mss = skb_shinfo(skb)->gso_size; | |
2169 | if (unlikely(skb->len <= mss)) | |
2170 | goto out; | |
2171 | ||
2172 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { | |
2173 | /* Packet is from an untrusted source, reset gso_segs. */ | |
2174 | int type = skb_shinfo(skb)->gso_type; | |
2175 | ||
2176 | if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || | |
2177 | !(type & (SKB_GSO_UDP)))) | |
2178 | goto out; | |
2179 | ||
2180 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); | |
2181 | ||
2182 | segs = NULL; | |
2183 | goto out; | |
2184 | } | |
2185 | ||
2186 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot | |
2187 | * do checksum of UDP packets sent as multiple IP fragments. | |
2188 | */ | |
2189 | offset = skb->csum_start - skb_headroom(skb); | |
2190 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | |
2191 | offset += skb->csum_offset; | |
2192 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | |
2193 | skb->ip_summed = CHECKSUM_NONE; | |
2194 | ||
2195 | /* Fragment the skb. IP headers of the fragments are updated in | |
2196 | * inet_gso_segment() | |
2197 | */ | |
2198 | segs = skb_segment(skb, features); | |
2199 | out: | |
2200 | return segs; | |
2201 | } | |
2202 |