]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - include/net/udp.h
Merge tag 'for_v5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[mirror_ubuntu-hirsute-kernel.git] / include / net / udp.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the UDP module.
8 *
9 * Version: @(#)udp.h 1.0.2 05/07/93
10 *
02c30a84 11 * Authors: Ross Biro
1da177e4
LT
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 *
14 * Fixes:
15 * Alan Cox : Turned on udp checksums. I don't want to
16 * chase 'memory corruption' bugs that aren't!
1da177e4
LT
17 */
18#ifndef _UDP_H
19#define _UDP_H
20
1da177e4 21#include <linux/list.h>
187f1882 22#include <linux/bug.h>
14c85021 23#include <net/inet_sock.h>
1da177e4
LT
24#include <net/sock.h>
25#include <net/snmp.h>
ba4e58ec
GR
26#include <net/ip.h>
27#include <linux/ipv6.h>
1da177e4 28#include <linux/seq_file.h>
bd01f843 29#include <linux/poll.h>
1da177e4 30
ba4e58ec
GR
31/**
32 * struct udp_skb_cb - UDP(-Lite) private variables
33 *
34 * @header: private variables used by IPv4/IPv6
35 * @cscov: checksum coverage length (UDP-Lite only)
36 * @partial_cov: if set indicates partial csum coverage
37 */
38struct udp_skb_cb {
39 union {
40 struct inet_skb_parm h4;
dfd56b8b 41#if IS_ENABLED(CONFIG_IPV6)
ba4e58ec
GR
42 struct inet6_skb_parm h6;
43#endif
44 } header;
45 __u16 cscov;
46 __u8 partial_cov;
47};
48#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
1da177e4 49
fdcc8aa9
ED
50/**
51 * struct udp_hslot - UDP hash slot
52 *
53 * @head: head of list of sockets
54 * @count: number of sockets in 'head' list
55 * @lock: spinlock protecting changes to head/count
56 */
645ca708 57struct udp_hslot {
ca065d0c 58 struct hlist_head head;
fdcc8aa9 59 int count;
645ca708
ED
60 spinlock_t lock;
61} __attribute__((aligned(2 * sizeof(long))));
f86dcc5a 62
512615b6
ED
63/**
64 * struct udp_table - UDP table
65 *
66 * @hash: hash table, sockets are hashed on (local port)
67 * @hash2: hash table, sockets are hashed on (local port, local address)
68 * @mask: number of slots in hash tables, minus 1
69 * @log: log2(number of slots in hash table)
70 */
645ca708 71struct udp_table {
f86dcc5a 72 struct udp_hslot *hash;
512615b6
ED
73 struct udp_hslot *hash2;
74 unsigned int mask;
75 unsigned int log;
645ca708
ED
76};
77extern struct udp_table udp_table;
1a50bd06 78void udp_table_init(struct udp_table *, const char *);
f86dcc5a 79static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
95c96174 80 struct net *net, unsigned int num)
f86dcc5a
ED
81{
82 return &table->hash[udp_hashfn(net, num, table->mask)];
83}
512615b6
ED
84/*
85 * For secondary hash, net_hash_mix() is performed before calling
86 * udp_hashslot2(), this explains difference with udp_hashslot()
87 */
88static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
89 unsigned int hash)
90{
91 return &table->hash2[hash & table->mask];
92}
1da177e4 93
1da177e4
LT
94extern struct proto udp_prot;
95
8d987e5c 96extern atomic_long_t udp_memory_allocated;
95766fff
HA
97
98/* sysctl variables for udp */
8d987e5c 99extern long sysctl_udp_mem[3];
95766fff
HA
100extern int sysctl_udp_rmem_min;
101extern int sysctl_udp_wmem_min;
102
14c85021 103struct sk_buff;
1da177e4 104
ba4e58ec
GR
105/*
106 * Generic checksumming routines for UDP(-Lite) v4 and v6
107 */
b51655b9 108static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
ba4e58ec 109{
bbdff225
TH
110 return (UDP_SKB_CB(skb)->cscov == skb->len ?
111 __skb_checksum_complete(skb) :
112 __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
ba4e58ec
GR
113}
114
8e5200f5 115static inline int udp_lib_checksum_complete(struct sk_buff *skb)
ba4e58ec 116{
60476372 117 return !skb_csum_unnecessary(skb) &&
ba4e58ec
GR
118 __udp_lib_checksum_complete(skb);
119}
120
121/**
122 * udp_csum_outgoing - compute UDPv4/v6 checksum over fragments
123 * @sk: socket we are writing to
124 * @skb: sk_buff containing the filled-in UDP header
125 * (checksum field must be zeroed out)
126 */
868c86bc 127static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
ba4e58ec 128{
9c70220b
ACM
129 __wsum csum = csum_partial(skb_transport_header(skb),
130 sizeof(struct udphdr), 0);
ba4e58ec
GR
131 skb_queue_walk(&sk->sk_write_queue, skb) {
132 csum = csum_add(csum, skb->csum);
133 }
134 return csum;
135}
136
f6b9664f
HX
137static inline __wsum udp_csum(struct sk_buff *skb)
138{
139 __wsum csum = csum_partial(skb_transport_header(skb),
140 sizeof(struct udphdr), skb->csum);
141
142 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
143 csum = csum_add(csum, skb->csum);
144 }
145 return csum;
146}
147
af5fcba7
TH
148static inline __sum16 udp_v4_check(int len, __be32 saddr,
149 __be32 daddr, __wsum base)
150{
151 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
152}
153
154void udp_set_csum(bool nocheck, struct sk_buff *skb,
155 __be32 saddr, __be32 daddr, int len);
156
e6afc8ac 157static inline void udp_csum_pull_header(struct sk_buff *skb)
158{
595d0b29
ED
159 if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
160 skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
e6afc8ac 161 skb->csum);
162 skb_pull_rcsum(skb, sizeof(struct udphdr));
163 UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
164}
165
a6024562
TH
166typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
167 __be16 dport);
168
d4546c25 169struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
9fd1ff5d 170 struct udphdr *uh, struct sock *sk);
a6024562 171int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
57c67ff4 172
ee80d1eb 173struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
9a0d41b3 174 netdev_features_t features);
ee80d1eb 175
57c67ff4
TH
176static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
177{
178 struct udphdr *uh;
179 unsigned int hlen, off;
180
181 off = skb_gro_offset(skb);
182 hlen = off + sizeof(*uh);
183 uh = skb_gro_header_fast(skb, off);
184 if (skb_gro_header_hard(skb, hlen))
185 uh = skb_gro_header_slow(skb, hlen, off);
186
187 return uh;
188}
189
ba4e58ec 190/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
086c653f 191static inline int udp_lib_hash(struct sock *sk)
ba4e58ec
GR
192{
193 BUG();
086c653f 194 return 0;
ba4e58ec
GR
195}
196
1a50bd06
JP
197void udp_lib_unhash(struct sock *sk);
198void udp_lib_rehash(struct sock *sk, u16 new_hash);
ba4e58ec
GR
199
200static inline void udp_lib_close(struct sock *sk, long timeout)
201{
202 sk_common_release(sk);
203}
204
1a50bd06 205int udp_lib_get_port(struct sock *sk, unsigned short snum,
1a50bd06 206 unsigned int hash2_nulladdr);
ba4e58ec 207
723b8e46
TH
208u32 udp_flow_hashrnd(void);
209
b8f1a556
TH
210static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
211 int min, int max, bool use_eth)
212{
213 u32 hash;
214
215 if (min >= max) {
216 /* Use default range */
217 inet_get_local_port_range(net, &min, &max);
218 }
219
220 hash = skb_get_hash(skb);
723b8e46
TH
221 if (unlikely(!hash)) {
222 if (use_eth) {
223 /* Can't find a normal hash, caller has indicated an
224 * Ethernet packet so use that to compute a hash.
225 */
226 hash = jhash(skb->data, 2 * ETH_ALEN,
227 (__force u32) skb->protocol);
228 } else {
229 /* Can't derive any sort of hash for the packet, set
230 * to some consistent random value.
231 */
232 hash = udp_flow_hashrnd();
233 }
b8f1a556
TH
234 }
235
236 /* Since this is being sent on the wire obfuscate hash a bit
237 * to minimize possbility that any useful information to an
238 * attacker is leaked. Only upper 16 bits are relevant in the
239 * computation for 16 bit port value.
240 */
241 hash ^= hash << 16;
242
243 return htons((((u64) hash * (max - min)) >> 32) + min);
244}
245
6c206b20
PA
246static inline int udp_rqueue_get(struct sock *sk)
247{
248 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
249}
250
6da5b0f0
MM
251static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
252 int dif, int sdif)
253{
254#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
255 return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
256 bound_dev_if, dif, sdif);
257#else
258 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
259#endif
260}
261
ba4e58ec 262/* net/ipv4/udp.c */
c915fe13 263void udp_destruct_sock(struct sock *sk);
f970bd9e
PA
264void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
265int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
7c13f97f 266void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
2276f58a 267struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
fd69c399 268 int noblock, int *off, int *err);
7c13f97f
PA
269static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
270 int noblock, int *err)
271{
fd69c399 272 int off = 0;
7c13f97f 273
fd69c399 274 return __skb_recv_udp(sk, flags, noblock, &off, err);
7c13f97f 275}
f970bd9e 276
7487449c 277int udp_v4_early_demux(struct sk_buff *skb);
64f0f5d1 278bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
1a50bd06
JP
279int udp_get_port(struct sock *sk, unsigned short snum,
280 int (*saddr_cmp)(const struct sock *,
281 const struct sock *));
32bbd879 282int udp_err(struct sk_buff *, u32);
5d77dca8 283int udp_abort(struct sock *sk, int err);
1b784140 284int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
1a50bd06
JP
285int udp_push_pending_frames(struct sock *sk);
286void udp_flush_pending_frames(struct sock *sk);
2e8de857 287int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
1a50bd06
JP
288void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
289int udp_rcv(struct sk_buff *skb);
290int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
f970bd9e 291int udp_init_sock(struct sock *sk);
d74bad4e 292int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
286c72de 293int __udp_disconnect(struct sock *sk, int flags);
1a50bd06 294int udp_disconnect(struct sock *sk, int flags);
a11e1d43 295__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
1a50bd06 296struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
8bce6d7d
TH
297 netdev_features_t features,
298 bool is_ipv6);
1a50bd06
JP
299int udp_lib_getsockopt(struct sock *sk, int level, int optname,
300 char __user *optval, int __user *optlen);
301int udp_lib_setsockopt(struct sock *sk, int level, int optname,
302 char __user *optval, unsigned int optlen,
303 int (*push_pending_frames)(struct sock *));
304struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
305 __be32 daddr, __be16 dport, int dif);
306struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
fb74c277 307 __be32 daddr, __be16 dport, int dif, int sdif,
538950a1 308 struct udp_table *tbl, struct sk_buff *skb);
63058308
TH
309struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
310 __be16 sport, __be16 dport);
1a50bd06
JP
311struct sock *udp6_lib_lookup(struct net *net,
312 const struct in6_addr *saddr, __be16 sport,
313 const struct in6_addr *daddr, __be16 dport,
314 int dif);
315struct sock *__udp6_lib_lookup(struct net *net,
316 const struct in6_addr *saddr, __be16 sport,
317 const struct in6_addr *daddr, __be16 dport,
1801b570 318 int dif, int sdif, struct udp_table *tbl,
538950a1 319 struct sk_buff *skb);
63058308
TH
320struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
321 __be16 sport, __be16 dport);
bcd41303 322
b26bbdae
PA
323/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
324 * possibly multiple cache miss on dequeue()
325 */
b26bbdae 326struct udp_dev_scratch {
dce4551c
PA
327 /* skb->truesize and the stateless bit are embedded in a single field;
328 * do not use a bitfield since the compiler emits better/smaller code
329 * this way
330 */
331 u32 _tsize_state;
332
333#if BITS_PER_LONG == 64
334 /* len and the bit needed to compute skb_csum_unnecessary
335 * will be on cold cache lines at recvmsg time.
336 * skb->len can be stored on 16 bits since the udp header has been
337 * already validated and pulled.
338 */
b26bbdae
PA
339 u16 len;
340 bool is_linear;
341 bool csum_unnecessary;
dce4551c 342#endif
b26bbdae
PA
343};
344
dce4551c
PA
345static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
346{
347 return (struct udp_dev_scratch *)&skb->dev_scratch;
348}
349
350#if BITS_PER_LONG == 64
b26bbdae
PA
351static inline unsigned int udp_skb_len(struct sk_buff *skb)
352{
dce4551c 353 return udp_skb_scratch(skb)->len;
b26bbdae
PA
354}
355
356static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
357{
dce4551c 358 return udp_skb_scratch(skb)->csum_unnecessary;
b26bbdae
PA
359}
360
361static inline bool udp_skb_is_linear(struct sk_buff *skb)
362{
dce4551c 363 return udp_skb_scratch(skb)->is_linear;
b26bbdae
PA
364}
365
366#else
367static inline unsigned int udp_skb_len(struct sk_buff *skb)
368{
369 return skb->len;
370}
371
372static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
373{
374 return skb_csum_unnecessary(skb);
375}
376
377static inline bool udp_skb_is_linear(struct sk_buff *skb)
378{
379 return !skb_is_nonlinear(skb);
380}
381#endif
382
383static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
384 struct iov_iter *to)
385{
42b73059 386 int n;
b26bbdae 387
42b73059
AV
388 n = copy_to_iter(skb->data + off, len, to);
389 if (n == len)
b26bbdae
PA
390 return 0;
391
42b73059 392 iov_iter_revert(to, n);
b26bbdae
PA
393 return -EFAULT;
394}
395
ba4e58ec
GR
396/*
397 * SNMP statistics for UDP and UDP-Lite
398 */
6aef70a8
ED
399#define UDP_INC_STATS(net, field, is_udplite) do { \
400 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
401 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
02c22347 402#define __UDP_INC_STATS(net, field, is_udplite) do { \
13415e46
ED
403 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
404 else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
1da177e4 405
02c22347 406#define __UDP6_INC_STATS(net, field, is_udplite) do { \
13415e46
ED
407 if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
408 else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
0c7ed677 409} while(0)
6aef70a8
ED
410#define UDP6_INC_STATS(net, field, __lite) do { \
411 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
412 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
0c7ed677 413} while(0)
1781f7f5 414
dfd56b8b 415#if IS_ENABLED(CONFIG_IPV6)
cf329aa4
PA
416#define __UDPX_MIB(sk, ipv4) \
417({ \
418 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
419 sock_net(sk)->mib.udp_statistics) : \
420 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
421 sock_net(sk)->mib.udp_stats_in6); \
422})
1781f7f5 423#else
cf329aa4
PA
424#define __UDPX_MIB(sk, ipv4) \
425({ \
426 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
427 sock_net(sk)->mib.udp_statistics; \
428})
1781f7f5
HX
429#endif
430
cf329aa4
PA
431#define __UDPX_INC_STATS(sk, field) \
432 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
433
a3d2599b 434#ifdef CONFIG_PROC_FS
1da177e4 435struct udp_seq_afinfo {
73cb88ec
AV
436 sa_family_t family;
437 struct udp_table *udp_table;
1da177e4
LT
438};
439
440struct udp_iter_state {
6f191efe 441 struct seq_net_private p;
1da177e4 442 int bucket;
1da177e4
LT
443};
444
a3d2599b
CH
445void *udp_seq_start(struct seq_file *seq, loff_t *pos);
446void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
447void udp_seq_stop(struct seq_file *seq, void *v);
448
c3506372
CH
449extern const struct seq_operations udp_seq_ops;
450extern const struct seq_operations udp6_seq_ops;
20380731 451
1a50bd06
JP
452int udp4_proc_init(void);
453void udp4_proc_exit(void);
a3d2599b 454#endif /* CONFIG_PROC_FS */
95766fff 455
1a50bd06 456int udpv4_offload_init(void);
da5bab07 457
1a50bd06 458void udp_init(void);
d7ca4cc0 459
f2e9de21 460DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
1a50bd06 461void udp_encap_enable(void);
d7f3f621 462#if IS_ENABLED(CONFIG_IPV6)
f2e9de21 463DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
1a50bd06 464void udpv6_encap_enable(void);
d7f3f621 465#endif
dddb64bc 466
cf329aa4
PA
467static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
468 struct sk_buff *skb, bool ipv4)
469{
f2696099 470 netdev_features_t features = NETIF_F_SG;
cf329aa4
PA
471 struct sk_buff *segs;
472
f2696099
ST
473 /* Avoid csum recalculation by skb_segment unless userspace explicitly
474 * asks for the final checksum values
475 */
476 if (!inet_get_convert_csum(sk))
477 features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
478
d0208bf4
WB
479 /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
480 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
481 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
482 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
483 * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
484 * specific case, where PARTIAL is both correct and required.
485 */
6cd021a5
WB
486 if (skb->pkt_type == PACKET_LOOPBACK)
487 skb->ip_summed = CHECKSUM_PARTIAL;
488
cf329aa4
PA
489 /* the GSO CB lays after the UDP one, no need to save and restore any
490 * CB fragment
491 */
f2696099 492 segs = __skb_gso_segment(skb, features, false);
974ceb21 493 if (IS_ERR_OR_NULL(segs)) {
cf329aa4
PA
494 int segs_nr = skb_shinfo(skb)->gso_segs;
495
496 atomic_add(segs_nr, &sk->sk_drops);
497 SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
498 kfree_skb(skb);
499 return NULL;
500 }
501
502 consume_skb(skb);
503 return segs;
504}
505
edc6741c
LB
506#ifdef CONFIG_BPF_STREAM_PARSER
507struct sk_psock;
508struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
509#endif /* BPF_STREAM_PARSER */
510
1da177e4 511#endif /* _UDP_H */