]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/net/inet_hashtables.h
Merge tag 'platform-drivers-x86-v5.15-1' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / include / net / inet_hashtables.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Authors: Lotsa people, from code originally in tcp
8 */
9
10 #ifndef _INET_HASHTABLES_H
11 #define _INET_HASHTABLES_H
12
13
14 #include <linux/interrupt.h>
15 #include <linux/ip.h>
16 #include <linux/ipv6.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/socket.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
23
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_sock.h>
26 #include <net/sock.h>
27 #include <net/route.h>
28 #include <net/tcp_states.h>
29 #include <net/netns/hash.h>
30
31 #include <linux/refcount.h>
32 #include <asm/byteorder.h>
33
34 /* This is for all connections with a full identity, no wildcards.
35 * The 'e' prefix stands for Establish, but we really put all sockets
36 * but LISTEN ones.
37 */
38 struct inet_ehash_bucket {
39 struct hlist_nulls_head chain;
40 };
41
42 /* There are a few simple rules, which allow for local port reuse by
43 * an application. In essence:
44 *
45 * 1) Sockets bound to different interfaces may share a local port.
46 * Failing that, goto test 2.
47 * 2) If all sockets have sk->sk_reuse set, and none of them are in
48 * TCP_LISTEN state, the port may be shared.
49 * Failing that, goto test 3.
50 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
51 * address, and none of them are the same, the port may be
52 * shared.
53 * Failing this, the port cannot be shared.
54 *
55 * The interesting point, is test #2. This is what an FTP server does
56 * all day. To optimize this case we use a specific flag bit defined
57 * below. As we add sockets to a bind bucket list, we perform a
58 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
59 * As long as all sockets added to a bind bucket pass this test,
60 * the flag bit will be set.
61 * The resulting situation is that tcp_v[46]_verify_bind() can just check
62 * for this flag bit, if it is set and the socket trying to bind has
63 * sk->sk_reuse set, we don't even have to walk the owners list at all,
64 * we return that it is ok to bind this socket to the requested local port.
65 *
66 * Sounds like a lot of work, but it is worth it. In a more naive
67 * implementation (ie. current FreeBSD etc.) the entire list of ports
68 * must be walked for each data port opened by an ftp server. Needless
69 * to say, this does not scale at all. With a couple thousand FTP
70 * users logged onto your box, isn't it nice to know that new data
71 * ports are created in O(1) time? I thought so. ;-) -DaveM
72 */
73 #define FASTREUSEPORT_ANY 1
74 #define FASTREUSEPORT_STRICT 2
75
76 struct inet_bind_bucket {
77 possible_net_t ib_net;
78 int l3mdev;
79 unsigned short port;
80 signed char fastreuse;
81 signed char fastreuseport;
82 kuid_t fastuid;
83 #if IS_ENABLED(CONFIG_IPV6)
84 struct in6_addr fast_v6_rcv_saddr;
85 #endif
86 __be32 fast_rcv_saddr;
87 unsigned short fast_sk_family;
88 bool fast_ipv6_only;
89 struct hlist_node node;
90 struct hlist_head owners;
91 };
92
93 static inline struct net *ib_net(struct inet_bind_bucket *ib)
94 {
95 return read_pnet(&ib->ib_net);
96 }
97
98 #define inet_bind_bucket_for_each(tb, head) \
99 hlist_for_each_entry(tb, head, node)
100
101 struct inet_bind_hashbucket {
102 spinlock_t lock;
103 struct hlist_head chain;
104 };
105
106 /* Sockets can be hashed in established or listening table.
107 * We must use different 'nulls' end-of-chain value for all hash buckets :
108 * A socket might transition from ESTABLISH to LISTEN state without
109 * RCU grace period. A lookup in ehash table needs to handle this case.
110 */
111 #define LISTENING_NULLS_BASE (1U << 29)
112 struct inet_listen_hashbucket {
113 spinlock_t lock;
114 unsigned int count;
115 union {
116 struct hlist_head head;
117 struct hlist_nulls_head nulls_head;
118 };
119 };
120
121 /* This is for listening sockets, thus all sockets which possess wildcards. */
122 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
123
124 struct inet_hashinfo {
125 /* This is for sockets with full identity only. Sockets here will
126 * always be without wildcards and will have the following invariant:
127 *
128 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
129 *
130 */
131 struct inet_ehash_bucket *ehash;
132 spinlock_t *ehash_locks;
133 unsigned int ehash_mask;
134 unsigned int ehash_locks_mask;
135
136 /* Ok, let's try this, I give up, we do need a local binding
137 * TCP hash as well as the others for fast bind/connect.
138 */
139 struct kmem_cache *bind_bucket_cachep;
140 struct inet_bind_hashbucket *bhash;
141 unsigned int bhash_size;
142
143 /* The 2nd listener table hashed by local port and address */
144 unsigned int lhash2_mask;
145 struct inet_listen_hashbucket *lhash2;
146
147 /* All the above members are written once at bootup and
148 * never written again _or_ are predominantly read-access.
149 *
150 * Now align to a new cache line as all the following members
151 * might be often dirty.
152 */
153 /* All sockets in TCP_LISTEN state will be in listening_hash.
154 * This is the only table where wildcard'd TCP sockets can
155 * exist. listening_hash is only hashed by local port number.
156 * If lhash2 is initialized, the same socket will also be hashed
157 * to lhash2 by port and address.
158 */
159 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
160 ____cacheline_aligned_in_smp;
161 };
162
163 #define inet_lhash2_for_each_icsk_continue(__icsk) \
164 hlist_for_each_entry_continue(__icsk, icsk_listen_portaddr_node)
165
166 #define inet_lhash2_for_each_icsk(__icsk, list) \
167 hlist_for_each_entry(__icsk, list, icsk_listen_portaddr_node)
168
169 #define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
170 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
171
172 static inline struct inet_listen_hashbucket *
173 inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
174 {
175 return &h->lhash2[hash & h->lhash2_mask];
176 }
177
178 static inline struct inet_ehash_bucket *inet_ehash_bucket(
179 struct inet_hashinfo *hashinfo,
180 unsigned int hash)
181 {
182 return &hashinfo->ehash[hash & hashinfo->ehash_mask];
183 }
184
185 static inline spinlock_t *inet_ehash_lockp(
186 struct inet_hashinfo *hashinfo,
187 unsigned int hash)
188 {
189 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
190 }
191
192 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
193
194 static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
195 {
196 kfree(h->lhash2);
197 h->lhash2 = NULL;
198 }
199
200 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
201 {
202 kvfree(hashinfo->ehash_locks);
203 hashinfo->ehash_locks = NULL;
204 }
205
206 static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
207 int dif, int sdif)
208 {
209 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
210 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
211 bound_dev_if, dif, sdif);
212 #else
213 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
214 #endif
215 }
216
217 struct inet_bind_bucket *
218 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
219 struct inet_bind_hashbucket *head,
220 const unsigned short snum, int l3mdev);
221 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
222 struct inet_bind_bucket *tb);
223
224 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
225 const u32 bhash_size)
226 {
227 return (lport + net_hash_mix(net)) & (bhash_size - 1);
228 }
229
230 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
231 const unsigned short snum);
232
233 /* These can have wildcards, don't try too hard. */
234 static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
235 {
236 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
237 }
238
239 static inline int inet_sk_listen_hashfn(const struct sock *sk)
240 {
241 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
242 }
243
244 /* Caller must disable local BH processing. */
245 int __inet_inherit_port(const struct sock *sk, struct sock *child);
246
247 void inet_put_port(struct sock *sk);
248
249 void inet_hashinfo_init(struct inet_hashinfo *h);
250 void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
251 unsigned long numentries, int scale,
252 unsigned long low_limit,
253 unsigned long high_limit);
254 int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
255
256 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
257 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
258 bool *found_dup_sk);
259 int __inet_hash(struct sock *sk, struct sock *osk);
260 int inet_hash(struct sock *sk);
261 void inet_unhash(struct sock *sk);
262
263 struct sock *__inet_lookup_listener(struct net *net,
264 struct inet_hashinfo *hashinfo,
265 struct sk_buff *skb, int doff,
266 const __be32 saddr, const __be16 sport,
267 const __be32 daddr,
268 const unsigned short hnum,
269 const int dif, const int sdif);
270
271 static inline struct sock *inet_lookup_listener(struct net *net,
272 struct inet_hashinfo *hashinfo,
273 struct sk_buff *skb, int doff,
274 __be32 saddr, __be16 sport,
275 __be32 daddr, __be16 dport, int dif, int sdif)
276 {
277 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
278 daddr, ntohs(dport), dif, sdif);
279 }
280
281 /* Socket demux engine toys. */
282 /* What happens here is ugly; there's a pair of adjacent fields in
283 struct inet_sock; __be16 dport followed by __u16 num. We want to
284 search by pair, so we combine the keys into a single 32bit value
285 and compare with 32bit value read from &...->dport. Let's at least
286 make sure that it's not mixed with anything else...
287 On 64bit targets we combine comparisons with pair of adjacent __be32
288 fields in the same way.
289 */
290 #ifdef __BIG_ENDIAN
291 #define INET_COMBINED_PORTS(__sport, __dport) \
292 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
293 #else /* __LITTLE_ENDIAN */
294 #define INET_COMBINED_PORTS(__sport, __dport) \
295 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
296 #endif
297
298 #if (BITS_PER_LONG == 64)
299 #ifdef __BIG_ENDIAN
300 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
301 const __addrpair __name = (__force __addrpair) ( \
302 (((__force __u64)(__be32)(__saddr)) << 32) | \
303 ((__force __u64)(__be32)(__daddr)))
304 #else /* __LITTLE_ENDIAN */
305 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
306 const __addrpair __name = (__force __addrpair) ( \
307 (((__force __u64)(__be32)(__daddr)) << 32) | \
308 ((__force __u64)(__be32)(__saddr)))
309 #endif /* __BIG_ENDIAN */
310 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
311 (((__sk)->sk_portpair == (__ports)) && \
312 ((__sk)->sk_addrpair == (__cookie)) && \
313 (((__sk)->sk_bound_dev_if == (__dif)) || \
314 ((__sk)->sk_bound_dev_if == (__sdif))) && \
315 net_eq(sock_net(__sk), (__net)))
316 #else /* 32-bit arch */
317 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
318 const int __name __deprecated __attribute__((unused))
319
320 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
321 (((__sk)->sk_portpair == (__ports)) && \
322 ((__sk)->sk_daddr == (__saddr)) && \
323 ((__sk)->sk_rcv_saddr == (__daddr)) && \
324 (((__sk)->sk_bound_dev_if == (__dif)) || \
325 ((__sk)->sk_bound_dev_if == (__sdif))) && \
326 net_eq(sock_net(__sk), (__net)))
327 #endif /* 64-bit arch */
328
329 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
330 * not check it for lookups anymore, thanks Alexey. -DaveM
331 */
332 struct sock *__inet_lookup_established(struct net *net,
333 struct inet_hashinfo *hashinfo,
334 const __be32 saddr, const __be16 sport,
335 const __be32 daddr, const u16 hnum,
336 const int dif, const int sdif);
337
338 static inline struct sock *
339 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
340 const __be32 saddr, const __be16 sport,
341 const __be32 daddr, const __be16 dport,
342 const int dif)
343 {
344 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
345 ntohs(dport), dif, 0);
346 }
347
348 static inline struct sock *__inet_lookup(struct net *net,
349 struct inet_hashinfo *hashinfo,
350 struct sk_buff *skb, int doff,
351 const __be32 saddr, const __be16 sport,
352 const __be32 daddr, const __be16 dport,
353 const int dif, const int sdif,
354 bool *refcounted)
355 {
356 u16 hnum = ntohs(dport);
357 struct sock *sk;
358
359 sk = __inet_lookup_established(net, hashinfo, saddr, sport,
360 daddr, hnum, dif, sdif);
361 *refcounted = true;
362 if (sk)
363 return sk;
364 *refcounted = false;
365 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
366 sport, daddr, hnum, dif, sdif);
367 }
368
369 static inline struct sock *inet_lookup(struct net *net,
370 struct inet_hashinfo *hashinfo,
371 struct sk_buff *skb, int doff,
372 const __be32 saddr, const __be16 sport,
373 const __be32 daddr, const __be16 dport,
374 const int dif)
375 {
376 struct sock *sk;
377 bool refcounted;
378
379 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
380 dport, dif, 0, &refcounted);
381
382 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
383 sk = NULL;
384 return sk;
385 }
386
387 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
388 struct sk_buff *skb,
389 int doff,
390 const __be16 sport,
391 const __be16 dport,
392 const int sdif,
393 bool *refcounted)
394 {
395 struct sock *sk = skb_steal_sock(skb, refcounted);
396 const struct iphdr *iph = ip_hdr(skb);
397
398 if (sk)
399 return sk;
400
401 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
402 doff, iph->saddr, sport,
403 iph->daddr, dport, inet_iif(skb), sdif,
404 refcounted);
405 }
406
407 u32 inet6_ehashfn(const struct net *net,
408 const struct in6_addr *laddr, const u16 lport,
409 const struct in6_addr *faddr, const __be16 fport);
410
411 static inline void sk_daddr_set(struct sock *sk, __be32 addr)
412 {
413 sk->sk_daddr = addr; /* alias of inet_daddr */
414 #if IS_ENABLED(CONFIG_IPV6)
415 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
416 #endif
417 }
418
419 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
420 {
421 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
422 #if IS_ENABLED(CONFIG_IPV6)
423 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
424 #endif
425 }
426
427 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
428 struct sock *sk, u32 port_offset,
429 int (*check_established)(struct inet_timewait_death_row *,
430 struct sock *, __u16,
431 struct inet_timewait_sock **));
432
433 int inet_hash_connect(struct inet_timewait_death_row *death_row,
434 struct sock *sk);
435 #endif /* _INET_HASHTABLES_H */