]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/net/udp.h
Merge back ACPI power management material for v5.14.
[mirror_ubuntu-jammy-kernel.git] / include / net / udp.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP module.
8 *
9 * Version: @(#)udp.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 *
14 * Fixes:
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
17 */
18 #ifndef _UDP_H
19 #define _UDP_H
20
21 #include <linux/list.h>
22 #include <linux/bug.h>
23 #include <net/inet_sock.h>
24 #include <net/sock.h>
25 #include <net/snmp.h>
26 #include <net/ip.h>
27 #include <linux/ipv6.h>
28 #include <linux/seq_file.h>
29 #include <linux/poll.h>
30 #include <linux/indirect_call_wrapper.h>
31
32 /**
33 * struct udp_skb_cb - UDP(-Lite) private variables
34 *
35 * @header: private variables used by IPv4/IPv6
36 * @cscov: checksum coverage length (UDP-Lite only)
37 * @partial_cov: if set indicates partial csum coverage
38 */
39 struct udp_skb_cb {
40 union {
41 struct inet_skb_parm h4;
42 #if IS_ENABLED(CONFIG_IPV6)
43 struct inet6_skb_parm h6;
44 #endif
45 } header;
46 __u16 cscov;
47 __u8 partial_cov;
48 };
49 #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
50
51 /**
52 * struct udp_hslot - UDP hash slot
53 *
54 * @head: head of list of sockets
55 * @count: number of sockets in 'head' list
56 * @lock: spinlock protecting changes to head/count
57 */
58 struct udp_hslot {
59 struct hlist_head head;
60 int count;
61 spinlock_t lock;
62 } __attribute__((aligned(2 * sizeof(long))));
63
64 /**
65 * struct udp_table - UDP table
66 *
67 * @hash: hash table, sockets are hashed on (local port)
68 * @hash2: hash table, sockets are hashed on (local port, local address)
69 * @mask: number of slots in hash tables, minus 1
70 * @log: log2(number of slots in hash table)
71 */
72 struct udp_table {
73 struct udp_hslot *hash;
74 struct udp_hslot *hash2;
75 unsigned int mask;
76 unsigned int log;
77 };
78 extern struct udp_table udp_table;
79 void udp_table_init(struct udp_table *, const char *);
80 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
81 struct net *net, unsigned int num)
82 {
83 return &table->hash[udp_hashfn(net, num, table->mask)];
84 }
85 /*
86 * For secondary hash, net_hash_mix() is performed before calling
87 * udp_hashslot2(), this explains difference with udp_hashslot()
88 */
89 static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
90 unsigned int hash)
91 {
92 return &table->hash2[hash & table->mask];
93 }
94
95 extern struct proto udp_prot;
96
97 extern atomic_long_t udp_memory_allocated;
98
99 /* sysctl variables for udp */
100 extern long sysctl_udp_mem[3];
101 extern int sysctl_udp_rmem_min;
102 extern int sysctl_udp_wmem_min;
103
104 struct sk_buff;
105
106 /*
107 * Generic checksumming routines for UDP(-Lite) v4 and v6
108 */
109 static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
110 {
111 return (UDP_SKB_CB(skb)->cscov == skb->len ?
112 __skb_checksum_complete(skb) :
113 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
114 }
115
116 static inline int udp_lib_checksum_complete(struct sk_buff *skb)
117 {
118 return !skb_csum_unnecessary(skb) &&
119 __udp_lib_checksum_complete(skb);
120 }
121
122 /**
123 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
124 * @sk: socket we are writing to
125 * @skb: sk_buff containing the filled-in UDP header
126 * (checksum field must be zeroed out)
127 */
128 static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
129 {
130 __wsum csum = csum_partial(skb_transport_header(skb),
131 sizeof(struct udphdr), 0);
132 skb_queue_walk(&sk->sk_write_queue, skb) {
133 csum = csum_add(csum, skb->csum);
134 }
135 return csum;
136 }
137
138 static inline __wsum udp_csum(struct sk_buff *skb)
139 {
140 __wsum csum = csum_partial(skb_transport_header(skb),
141 sizeof(struct udphdr), skb->csum);
142
143 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
144 csum = csum_add(csum, skb->csum);
145 }
146 return csum;
147 }
148
149 static inline __sum16 udp_v4_check(int len, __be32 saddr,
150 __be32 daddr, __wsum base)
151 {
152 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
153 }
154
155 void udp_set_csum(bool nocheck, struct sk_buff *skb,
156 __be32 saddr, __be32 daddr, int len);
157
158 static inline void udp_csum_pull_header(struct sk_buff *skb)
159 {
160 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
161 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
162 skb->csum);
163 skb_pull_rcsum(skb, sizeof(struct udphdr));
164 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
165 }
166
167 typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport,
168 __be16 dport);
169
170 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
171 struct sk_buff *));
172 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
173 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
174 struct sk_buff *));
175 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
176 INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *));
177 INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
178
179 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
180 struct udphdr *uh, struct sock *sk);
181 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
182
183 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
184 netdev_features_t features, bool is_ipv6);
185
186 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
187 {
188 struct udphdr *uh;
189 unsigned int hlen, off;
190
191 off = skb_gro_offset(skb);
192 hlen = off + sizeof(*uh);
193 uh = skb_gro_header_fast(skb, off);
194 if (skb_gro_header_hard(skb, hlen))
195 uh = skb_gro_header_slow(skb, hlen, off);
196
197 return uh;
198 }
199
200 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
201 static inline int udp_lib_hash(struct sock *sk)
202 {
203 BUG();
204 return 0;
205 }
206
207 void udp_lib_unhash(struct sock *sk);
208 void udp_lib_rehash(struct sock *sk, u16 new_hash);
209
210 static inline void udp_lib_close(struct sock *sk, long timeout)
211 {
212 sk_common_release(sk);
213 }
214
215 int udp_lib_get_port(struct sock *sk, unsigned short snum,
216 unsigned int hash2_nulladdr);
217
218 u32 udp_flow_hashrnd(void);
219
220 static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
221 int min, int max, bool use_eth)
222 {
223 u32 hash;
224
225 if (min >= max) {
226 /* Use default range */
227 inet_get_local_port_range(net, &min, &max);
228 }
229
230 hash = skb_get_hash(skb);
231 if (unlikely(!hash)) {
232 if (use_eth) {
233 /* Can't find a normal hash, caller has indicated an
234 * Ethernet packet so use that to compute a hash.
235 */
236 hash = jhash(skb->data, 2 * ETH_ALEN,
237 (__force u32) skb->protocol);
238 } else {
239 /* Can't derive any sort of hash for the packet, set
240 * to some consistent random value.
241 */
242 hash = udp_flow_hashrnd();
243 }
244 }
245
246 /* Since this is being sent on the wire obfuscate hash a bit
247 * to minimize possbility that any useful information to an
248 * attacker is leaked. Only upper 16 bits are relevant in the
249 * computation for 16 bit port value.
250 */
251 hash ^= hash << 16;
252
253 return htons((((u64) hash * (max - min)) >> 32) + min);
254 }
255
256 static inline int udp_rqueue_get(struct sock *sk)
257 {
258 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
259 }
260
261 static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
262 int dif, int sdif)
263 {
264 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
265 return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
266 bound_dev_if, dif, sdif);
267 #else
268 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
269 #endif
270 }
271
272 /* net/ipv4/udp.c */
273 void udp_destruct_sock(struct sock *sk);
274 void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
275 int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
276 void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
277 struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
278 int noblock, int *off, int *err);
279 static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
280 int noblock, int *err)
281 {
282 int off = 0;
283
284 return __skb_recv_udp(sk, flags, noblock, &off, err);
285 }
286
287 int udp_v4_early_demux(struct sk_buff *skb);
288 bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
289 int udp_get_port(struct sock *sk, unsigned short snum,
290 int (*saddr_cmp)(const struct sock *,
291 const struct sock *));
292 int udp_err(struct sk_buff *, u32);
293 int udp_abort(struct sock *sk, int err);
294 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
295 int udp_push_pending_frames(struct sock *sk);
296 void udp_flush_pending_frames(struct sock *sk);
297 int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
298 void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
299 int udp_rcv(struct sk_buff *skb);
300 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
301 int udp_init_sock(struct sock *sk);
302 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
303 int __udp_disconnect(struct sock *sk, int flags);
304 int udp_disconnect(struct sock *sk, int flags);
305 __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
306 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
307 netdev_features_t features,
308 bool is_ipv6);
309 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
310 char __user *optval, int __user *optlen);
311 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
312 sockptr_t optval, unsigned int optlen,
313 int (*push_pending_frames)(struct sock *));
314 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
315 __be32 daddr, __be16 dport, int dif);
316 struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
317 __be32 daddr, __be16 dport, int dif, int sdif,
318 struct udp_table *tbl, struct sk_buff *skb);
319 struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
320 __be16 sport, __be16 dport);
321 struct sock *udp6_lib_lookup(struct net *net,
322 const struct in6_addr *saddr, __be16 sport,
323 const struct in6_addr *daddr, __be16 dport,
324 int dif);
325 struct sock *__udp6_lib_lookup(struct net *net,
326 const struct in6_addr *saddr, __be16 sport,
327 const struct in6_addr *daddr, __be16 dport,
328 int dif, int sdif, struct udp_table *tbl,
329 struct sk_buff *skb);
330 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
331 __be16 sport, __be16 dport);
332 int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
333 sk_read_actor_t recv_actor);
334
335 /* UDP uses skb->dev_scratch to cache as much information as possible and avoid
336 * possibly multiple cache miss on dequeue()
337 */
338 struct udp_dev_scratch {
339 /* skb->truesize and the stateless bit are embedded in a single field;
340 * do not use a bitfield since the compiler emits better/smaller code
341 * this way
342 */
343 u32 _tsize_state;
344
345 #if BITS_PER_LONG == 64
346 /* len and the bit needed to compute skb_csum_unnecessary
347 * will be on cold cache lines at recvmsg time.
348 * skb->len can be stored on 16 bits since the udp header has been
349 * already validated and pulled.
350 */
351 u16 len;
352 bool is_linear;
353 bool csum_unnecessary;
354 #endif
355 };
356
357 static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
358 {
359 return (struct udp_dev_scratch *)&skb->dev_scratch;
360 }
361
362 #if BITS_PER_LONG == 64
363 static inline unsigned int udp_skb_len(struct sk_buff *skb)
364 {
365 return udp_skb_scratch(skb)->len;
366 }
367
368 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
369 {
370 return udp_skb_scratch(skb)->csum_unnecessary;
371 }
372
373 static inline bool udp_skb_is_linear(struct sk_buff *skb)
374 {
375 return udp_skb_scratch(skb)->is_linear;
376 }
377
378 #else
379 static inline unsigned int udp_skb_len(struct sk_buff *skb)
380 {
381 return skb->len;
382 }
383
384 static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
385 {
386 return skb_csum_unnecessary(skb);
387 }
388
389 static inline bool udp_skb_is_linear(struct sk_buff *skb)
390 {
391 return !skb_is_nonlinear(skb);
392 }
393 #endif
394
395 static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
396 struct iov_iter *to)
397 {
398 int n;
399
400 n = copy_to_iter(skb->data + off, len, to);
401 if (n == len)
402 return 0;
403
404 iov_iter_revert(to, n);
405 return -EFAULT;
406 }
407
408 /*
409 * SNMP statistics for UDP and UDP-Lite
410 */
411 #define UDP_INC_STATS(net, field, is_udplite) do { \
412 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
413 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
414 #define __UDP_INC_STATS(net, field, is_udplite) do { \
415 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
416 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
417
418 #define __UDP6_INC_STATS(net, field, is_udplite) do { \
419 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
420 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
421 } while(0)
422 #define UDP6_INC_STATS(net, field, __lite) do { \
423 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
424 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
425 } while(0)
426
427 #if IS_ENABLED(CONFIG_IPV6)
428 #define __UDPX_MIB(sk, ipv4) \
429 ({ \
430 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
431 sock_net(sk)->mib.udp_statistics) : \
432 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
433 sock_net(sk)->mib.udp_stats_in6); \
434 })
435 #else
436 #define __UDPX_MIB(sk, ipv4) \
437 ({ \
438 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
439 sock_net(sk)->mib.udp_statistics; \
440 })
441 #endif
442
443 #define __UDPX_INC_STATS(sk, field) \
444 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
445
446 #ifdef CONFIG_PROC_FS
447 struct udp_seq_afinfo {
448 sa_family_t family;
449 struct udp_table *udp_table;
450 };
451
452 struct udp_iter_state {
453 struct seq_net_private p;
454 int bucket;
455 struct udp_seq_afinfo *bpf_seq_afinfo;
456 };
457
458 void *udp_seq_start(struct seq_file *seq, loff_t *pos);
459 void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
460 void udp_seq_stop(struct seq_file *seq, void *v);
461
462 extern const struct seq_operations udp_seq_ops;
463 extern const struct seq_operations udp6_seq_ops;
464
465 int udp4_proc_init(void);
466 void udp4_proc_exit(void);
467 #endif /* CONFIG_PROC_FS */
468
469 int udpv4_offload_init(void);
470
471 void udp_init(void);
472
473 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
474 void udp_encap_enable(void);
475 void udp_encap_disable(void);
476 #if IS_ENABLED(CONFIG_IPV6)
477 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
478 void udpv6_encap_enable(void);
479 #endif
480
481 static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
482 struct sk_buff *skb, bool ipv4)
483 {
484 netdev_features_t features = NETIF_F_SG;
485 struct sk_buff *segs;
486
487 /* Avoid csum recalculation by skb_segment unless userspace explicitly
488 * asks for the final checksum values
489 */
490 if (!inet_get_convert_csum(sk))
491 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
492
493 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
494 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
495 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
496 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
497 * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
498 * specific case, where PARTIAL is both correct and required.
499 */
500 if (skb->pkt_type == PACKET_LOOPBACK)
501 skb->ip_summed = CHECKSUM_PARTIAL;
502
503 /* the GSO CB lays after the UDP one, no need to save and restore any
504 * CB fragment
505 */
506 segs = __skb_gso_segment(skb, features, false);
507 if (IS_ERR_OR_NULL(segs)) {
508 int segs_nr = skb_shinfo(skb)->gso_segs;
509
510 atomic_add(segs_nr, &sk->sk_drops);
511 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
512 kfree_skb(skb);
513 return NULL;
514 }
515
516 consume_skb(skb);
517 return segs;
518 }
519
520 static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
521 {
522 /* UDP-lite can't land here - no GRO */
523 WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
524
525 /* UDP packets generated with UDP_SEGMENT and traversing:
526 *
527 * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx)
528 *
529 * can reach an UDP socket with CHECKSUM_NONE, because
530 * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE.
531 * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will
532 * have a valid checksum, as the GRO engine validates the UDP csum
533 * before the aggregation and nobody strips such info in between.
534 * Instead of adding another check in the tunnel fastpath, we can force
535 * a valid csum after the segmentation.
536 * Additionally fixup the UDP CB.
537 */
538 UDP_SKB_CB(skb)->cscov = skb->len;
539 if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid)
540 skb->csum_valid = 1;
541 }
542
543 #ifdef CONFIG_BPF_SYSCALL
544 struct sk_psock;
545 struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
546 int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
547 #endif
548
549 #endif /* _UDP_H */