]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/net/udp.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livep...
[mirror_ubuntu-jammy-kernel.git] / include / net / udp.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the UDP module.
7 *
8 * Version: @(#)udp.h 1.0.2 05/07/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * Fixes:
14 * Alan Cox : Turned on udp checksums. I don't want to
15 * chase 'memory corruption' bugs that aren't!
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22 #ifndef _UDP_H
23 #define _UDP_H
24
25 #include <linux/list.h>
26 #include <linux/bug.h>
27 #include <net/inet_sock.h>
28 #include <net/sock.h>
29 #include <net/snmp.h>
30 #include <net/ip.h>
31 #include <linux/ipv6.h>
32 #include <linux/seq_file.h>
33 #include <linux/poll.h>
34
35 /**
36 * struct udp_skb_cb - UDP(-Lite) private variables
37 *
38 * @header: private variables used by IPv4/IPv6
39 * @cscov: checksum coverage length (UDP-Lite only)
40 * @partial_cov: if set indicates partial csum coverage
41 */
42 struct udp_skb_cb {
43 union {
44 struct inet_skb_parm h4;
45 #if IS_ENABLED(CONFIG_IPV6)
46 struct inet6_skb_parm h6;
47 #endif
48 } header;
49 __u16 cscov;
50 __u8 partial_cov;
51 };
52 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
53
54 /**
55 * struct udp_hslot - UDP hash slot
56 *
57 * @head: head of list of sockets
58 * @count: number of sockets in 'head' list
59 * @lock: spinlock protecting changes to head/count
60 */
61 struct udp_hslot {
62 struct hlist_head head;
63 int count;
64 spinlock_t lock;
65 } __attribute__((aligned(2 * sizeof(long))));
66
67 /**
68 * struct udp_table - UDP table
69 *
70 * @hash: hash table, sockets are hashed on (local port)
71 * @hash2: hash table, sockets are hashed on (local port, local address)
72 * @mask: number of slots in hash tables, minus 1
73 * @log: log2(number of slots in hash table)
74 */
75 struct udp_table {
76 struct udp_hslot *hash;
77 struct udp_hslot *hash2;
78 unsigned int mask;
79 unsigned int log;
80 };
81 extern struct udp_table udp_table;
82 void udp_table_init(struct udp_table *, const char *);
83 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
84 struct net *net, unsigned int num)
85 {
86 return &table->hash[udp_hashfn(net, num, table->mask)];
87 }
88 /*
89 * For secondary hash, net_hash_mix() is performed before calling
90 * udp_hashslot2(), this explains difference with udp_hashslot()
91 */
92 static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
93 unsigned int hash)
94 {
95 return &table->hash2[hash & table->mask];
96 }
97
98 extern struct proto udp_prot;
99
100 extern atomic_long_t udp_memory_allocated;
101
102 /* sysctl variables for udp */
103 extern long sysctl_udp_mem[3];
104 extern int sysctl_udp_rmem_min;
105 extern int sysctl_udp_wmem_min;
106
107 struct sk_buff;
108
109 /*
110 * Generic checksumming routines for UDP(-Lite) v4 and v6
111 */
112 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
113 {
114 return (UDP_SKB_CB(skb)->cscov == skb->len ?
115 __skb_checksum_complete(skb) :
116 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
117 }
118
119 static inline int udp_lib_checksum_complete(struct sk_buff *skb)
120 {
121 return !skb_csum_unnecessary(skb) &&
122 __udp_lib_checksum_complete(skb);
123 }
124
125 /**
126 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
127 * @sk: socket we are writing to
128 * @skb: sk_buff containing the filled-in UDP header
129 * (checksum field must be zeroed out)
130 */
131 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
132 {
133 __wsum csum = csum_partial(skb_transport_header(skb),
134 sizeof(struct udphdr), 0);
135 skb_queue_walk(&sk->sk_write_queue, skb) {
136 csum = csum_add(csum, skb->csum);
137 }
138 return csum;
139 }
140
141 static inline __wsum udp_csum(struct sk_buff *skb)
142 {
143 __wsum csum = csum_partial(skb_transport_header(skb),
144 sizeof(struct udphdr), skb->csum);
145
146 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
147 csum = csum_add(csum, skb->csum);
148 }
149 return csum;
150 }
151
152 static inline __sum16 udp_v4_check(int len, __be32 saddr,
153 __be32 daddr, __wsum base)
154 {
155 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
156 }
157
158 void udp_set_csum(bool nocheck, struct sk_buff *skb,
159 __be32 saddr, __be32 daddr, int len);
160
161 static inline void udp_csum_pull_header(struct sk_buff *skb)
162 {
163 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
164 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
165 skb->csum);
166 skb_pull_rcsum(skb, sizeof(struct udphdr));
167 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
168 }
169
170 typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
171 __be16 dport);
172
173 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
174 struct udphdr *uh, udp_lookup_t lookup);
175 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
176
177 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
178 netdev_features_t features);
179
180 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
181 {
182 struct udphdr *uh;
183 unsigned int hlen, off;
184
185 off = skb_gro_offset(skb);
186 hlen = off + sizeof(*uh);
187 uh = skb_gro_header_fast(skb, off);
188 if (skb_gro_header_hard(skb, hlen))
189 uh = skb_gro_header_slow(skb, hlen, off);
190
191 return uh;
192 }
193
194 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
195 static inline int udp_lib_hash(struct sock *sk)
196 {
197 BUG();
198 return 0;
199 }
200
201 void udp_lib_unhash(struct sock *sk);
202 void udp_lib_rehash(struct sock *sk, u16 new_hash);
203
204 static inline void udp_lib_close(struct sock *sk, long timeout)
205 {
206 sk_common_release(sk);
207 }
208
209 int udp_lib_get_port(struct sock *sk, unsigned short snum,
210 unsigned int hash2_nulladdr);
211
212 u32 udp_flow_hashrnd(void);
213
214 static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
215 int min, int max, bool use_eth)
216 {
217 u32 hash;
218
219 if (min >= max) {
220 /* Use default range */
221 inet_get_local_port_range(net, &min, &max);
222 }
223
224 hash = skb_get_hash(skb);
225 if (unlikely(!hash)) {
226 if (use_eth) {
227 /* Can't find a normal hash, caller has indicated an
228 * Ethernet packet so use that to compute a hash.
229 */
230 hash = jhash(skb->data, 2 * ETH_ALEN,
231 (__force u32) skb->protocol);
232 } else {
233 /* Can't derive any sort of hash for the packet, set
234 * to some consistent random value.
235 */
236 hash = udp_flow_hashrnd();
237 }
238 }
239
240 /* Since this is being sent on the wire obfuscate hash a bit
241 * to minimize possbility that any useful information to an
242 * attacker is leaked. Only upper 16 bits are relevant in the
243 * computation for 16 bit port value.
244 */
245 hash ^= hash << 16;
246
247 return htons((((u64) hash * (max - min)) >> 32) + min);
248 }
249
250 static inline int udp_rqueue_get(struct sock *sk)
251 {
252 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
253 }
254
255 static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
256 int dif, int sdif)
257 {
258 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
259 return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
260 bound_dev_if, dif, sdif);
261 #else
262 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
263 #endif
264 }
265
266 /* net/ipv4/udp.c */
267 void udp_destruct_sock(struct sock *sk);
268 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
269 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
270 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
271 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
272 int noblock, int *peeked, int *off, int *err);
273 static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
274 int noblock, int *err)
275 {
276 int peeked, off = 0;
277
278 return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
279 }
280
281 int udp_v4_early_demux(struct sk_buff *skb);
282 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
283 int udp_get_port(struct sock *sk, unsigned short snum,
284 int (*saddr_cmp)(const struct sock *,
285 const struct sock *));
286 int udp_err(struct sk_buff *, u32);
287 int udp_abort(struct sock *sk, int err);
288 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
289 int udp_push_pending_frames(struct sock *sk);
290 void udp_flush_pending_frames(struct sock *sk);
291 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
292 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
293 int udp_rcv(struct sk_buff *skb);
294 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
295 int udp_init_sock(struct sock *sk);
296 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
297 int __udp_disconnect(struct sock *sk, int flags);
298 int udp_disconnect(struct sock *sk, int flags);
299 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
300 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
301 netdev_features_t features,
302 bool is_ipv6);
303 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
304 char __user *optval, int __user *optlen);
305 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
306 char __user *optval, unsigned int optlen,
307 int (*push_pending_frames)(struct sock *));
308 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
309 __be32 daddr, __be16 dport, int dif);
310 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
311 __be32 daddr, __be16 dport, int dif, int sdif,
312 struct udp_table *tbl, struct sk_buff *skb);
313 struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
314 __be16 sport, __be16 dport);
315 struct sock *udp6_lib_lookup(struct net *net,
316 const struct in6_addr *saddr, __be16 sport,
317 const struct in6_addr *daddr, __be16 dport,
318 int dif);
319 struct sock *__udp6_lib_lookup(struct net *net,
320 const struct in6_addr *saddr, __be16 sport,
321 const struct in6_addr *daddr, __be16 dport,
322 int dif, int sdif, struct udp_table *tbl,
323 struct sk_buff *skb);
324 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
325 __be16 sport, __be16 dport);
326
327 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
328 * possibly multiple cache miss on dequeue()
329 */
330 struct udp_dev_scratch {
331 /* skb->truesize and the stateless bit are embedded in a single field;
332 * do not use a bitfield since the compiler emits better/smaller code
333 * this way
334 */
335 u32 _tsize_state;
336
337 #if BITS_PER_LONG == 64
338 /* len and the bit needed to compute skb_csum_unnecessary
339 * will be on cold cache lines at recvmsg time.
340 * skb->len can be stored on 16 bits since the udp header has been
341 * already validated and pulled.
342 */
343 u16 len;
344 bool is_linear;
345 bool csum_unnecessary;
346 #endif
347 };
348
349 static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
350 {
351 return (struct udp_dev_scratch *)&skb->dev_scratch;
352 }
353
354 #if BITS_PER_LONG == 64
355 static inline unsigned int udp_skb_len(struct sk_buff *skb)
356 {
357 return udp_skb_scratch(skb)->len;
358 }
359
360 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
361 {
362 return udp_skb_scratch(skb)->csum_unnecessary;
363 }
364
365 static inline bool udp_skb_is_linear(struct sk_buff *skb)
366 {
367 return udp_skb_scratch(skb)->is_linear;
368 }
369
370 #else
371 static inline unsigned int udp_skb_len(struct sk_buff *skb)
372 {
373 return skb->len;
374 }
375
376 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
377 {
378 return skb_csum_unnecessary(skb);
379 }
380
381 static inline bool udp_skb_is_linear(struct sk_buff *skb)
382 {
383 return !skb_is_nonlinear(skb);
384 }
385 #endif
386
387 static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
388 struct iov_iter *to)
389 {
390 int n;
391
392 n = copy_to_iter(skb->data + off, len, to);
393 if (n == len)
394 return 0;
395
396 iov_iter_revert(to, n);
397 return -EFAULT;
398 }
399
400 /*
401 * SNMP statistics for UDP and UDP-Lite
402 */
403 #define UDP_INC_STATS(net, field, is_udplite) do { \
404 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
405 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
406 #define __UDP_INC_STATS(net, field, is_udplite) do { \
407 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
408 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
409
410 #define __UDP6_INC_STATS(net, field, is_udplite) do { \
411 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
412 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
413 } while(0)
414 #define UDP6_INC_STATS(net, field, __lite) do { \
415 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
416 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
417 } while(0)
418
419 #if IS_ENABLED(CONFIG_IPV6)
420 #define __UDPX_MIB(sk, ipv4) \
421 ({ \
422 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
423 sock_net(sk)->mib.udp_statistics) : \
424 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
425 sock_net(sk)->mib.udp_stats_in6); \
426 })
427 #else
428 #define __UDPX_MIB(sk, ipv4) \
429 ({ \
430 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
431 sock_net(sk)->mib.udp_statistics; \
432 })
433 #endif
434
435 #define __UDPX_INC_STATS(sk, field) \
436 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
437
438 #ifdef CONFIG_PROC_FS
439 struct udp_seq_afinfo {
440 sa_family_t family;
441 struct udp_table *udp_table;
442 };
443
444 struct udp_iter_state {
445 struct seq_net_private p;
446 int bucket;
447 };
448
449 void *udp_seq_start(struct seq_file *seq, loff_t *pos);
450 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
451 void udp_seq_stop(struct seq_file *seq, void *v);
452
453 extern const struct seq_operations udp_seq_ops;
454 extern const struct seq_operations udp6_seq_ops;
455
456 int udp4_proc_init(void);
457 void udp4_proc_exit(void);
458 #endif /* CONFIG_PROC_FS */
459
460 int udpv4_offload_init(void);
461
462 void udp_init(void);
463
464 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
465 void udp_encap_enable(void);
466 #if IS_ENABLED(CONFIG_IPV6)
467 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
468 void udpv6_encap_enable(void);
469 #endif
470
471 static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
472 struct sk_buff *skb, bool ipv4)
473 {
474 struct sk_buff *segs;
475
476 /* the GSO CB lays after the UDP one, no need to save and restore any
477 * CB fragment
478 */
479 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
480 if (unlikely(IS_ERR_OR_NULL(segs))) {
481 int segs_nr = skb_shinfo(skb)->gso_segs;
482
483 atomic_add(segs_nr, &sk->sk_drops);
484 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
485 kfree_skb(skb);
486 return NULL;
487 }
488
489 consume_skb(skb);
490 return segs;
491 }
492
493 #endif /* _UDP_H */