]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/net/inet_hashtables.h
futex: Split futex_mm_release() for exit/exec
[mirror_ubuntu-bionic-kernel.git] / include / net / inet_hashtables.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Authors: Lotsa people, from code originally in tcp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
16
17
18 #include <linux/interrupt.h>
19 #include <linux/ip.h>
20 #include <linux/ipv6.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/socket.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/wait.h>
27
28 #include <net/inet_connection_sock.h>
29 #include <net/inet_sock.h>
30 #include <net/sock.h>
31 #include <net/route.h>
32 #include <net/tcp_states.h>
33 #include <net/netns/hash.h>
34
35 #include <linux/refcount.h>
36 #include <asm/byteorder.h>
37
38 /* This is for all connections with a full identity, no wildcards.
39 * The 'e' prefix stands for Establish, but we really put all sockets
40 * but LISTEN ones.
41 */
42 struct inet_ehash_bucket {
43 struct hlist_nulls_head chain;
44 };
45
46 /* There are a few simple rules, which allow for local port reuse by
47 * an application. In essence:
48 *
49 * 1) Sockets bound to different interfaces may share a local port.
50 * Failing that, goto test 2.
51 * 2) If all sockets have sk->sk_reuse set, and none of them are in
52 * TCP_LISTEN state, the port may be shared.
53 * Failing that, goto test 3.
54 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
55 * address, and none of them are the same, the port may be
56 * shared.
57 * Failing this, the port cannot be shared.
58 *
59 * The interesting point, is test #2. This is what an FTP server does
60 * all day. To optimize this case we use a specific flag bit defined
61 * below. As we add sockets to a bind bucket list, we perform a
62 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
63 * As long as all sockets added to a bind bucket pass this test,
64 * the flag bit will be set.
65 * The resulting situation is that tcp_v[46]_verify_bind() can just check
66 * for this flag bit, if it is set and the socket trying to bind has
67 * sk->sk_reuse set, we don't even have to walk the owners list at all,
68 * we return that it is ok to bind this socket to the requested local port.
69 *
70 * Sounds like a lot of work, but it is worth it. In a more naive
71 * implementation (ie. current FreeBSD etc.) the entire list of ports
72 * must be walked for each data port opened by an ftp server. Needless
73 * to say, this does not scale at all. With a couple thousand FTP
74 * users logged onto your box, isn't it nice to know that new data
75 * ports are created in O(1) time? I thought so. ;-) -DaveM
76 */
77 #define FASTREUSEPORT_ANY 1
78 #define FASTREUSEPORT_STRICT 2
79
80 struct inet_bind_bucket {
81 possible_net_t ib_net;
82 unsigned short port;
83 signed char fastreuse;
84 signed char fastreuseport;
85 kuid_t fastuid;
86 #if IS_ENABLED(CONFIG_IPV6)
87 struct in6_addr fast_v6_rcv_saddr;
88 #endif
89 __be32 fast_rcv_saddr;
90 unsigned short fast_sk_family;
91 bool fast_ipv6_only;
92 struct hlist_node node;
93 struct hlist_head owners;
94 };
95
96 static inline struct net *ib_net(struct inet_bind_bucket *ib)
97 {
98 return read_pnet(&ib->ib_net);
99 }
100
101 #define inet_bind_bucket_for_each(tb, head) \
102 hlist_for_each_entry(tb, head, node)
103
104 struct inet_bind_hashbucket {
105 spinlock_t lock;
106 struct hlist_head chain;
107 };
108
109 /*
110 * Sockets can be hashed in established or listening table
111 */
112 struct inet_listen_hashbucket {
113 spinlock_t lock;
114 struct hlist_head head;
115 };
116
117 /* This is for listening sockets, thus all sockets which possess wildcards. */
118 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
119
120 struct inet_hashinfo {
121 /* This is for sockets with full identity only. Sockets here will
122 * always be without wildcards and will have the following invariant:
123 *
124 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
125 *
126 */
127 struct inet_ehash_bucket *ehash;
128 spinlock_t *ehash_locks;
129 unsigned int ehash_mask;
130 unsigned int ehash_locks_mask;
131
132 /* Ok, let's try this, I give up, we do need a local binding
133 * TCP hash as well as the others for fast bind/connect.
134 */
135 struct inet_bind_hashbucket *bhash;
136
137 unsigned int bhash_size;
138 /* 4 bytes hole on 64 bit */
139
140 struct kmem_cache *bind_bucket_cachep;
141
142 /* All the above members are written once at bootup and
143 * never written again _or_ are predominantly read-access.
144 *
145 * Now align to a new cache line as all the following members
146 * might be often dirty.
147 */
148 /* All sockets in TCP_LISTEN state will be in here. This is the only
149 * table where wildcard'd TCP sockets can exist. Hash function here
150 * is just local port number.
151 */
152 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
153 ____cacheline_aligned_in_smp;
154 };
155
156 static inline struct inet_ehash_bucket *inet_ehash_bucket(
157 struct inet_hashinfo *hashinfo,
158 unsigned int hash)
159 {
160 return &hashinfo->ehash[hash & hashinfo->ehash_mask];
161 }
162
163 static inline spinlock_t *inet_ehash_lockp(
164 struct inet_hashinfo *hashinfo,
165 unsigned int hash)
166 {
167 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
168 }
169
170 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
171
172 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
173 {
174 kvfree(hashinfo->ehash_locks);
175 hashinfo->ehash_locks = NULL;
176 }
177
178 struct inet_bind_bucket *
179 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
180 struct inet_bind_hashbucket *head,
181 const unsigned short snum);
182 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
183 struct inet_bind_bucket *tb);
184
185 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
186 const u32 bhash_size)
187 {
188 return (lport + net_hash_mix(net)) & (bhash_size - 1);
189 }
190
191 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
192 const unsigned short snum);
193
194 /* These can have wildcards, don't try too hard. */
195 static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
196 {
197 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
198 }
199
200 static inline int inet_sk_listen_hashfn(const struct sock *sk)
201 {
202 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
203 }
204
205 /* Caller must disable local BH processing. */
206 int __inet_inherit_port(const struct sock *sk, struct sock *child);
207
208 void inet_put_port(struct sock *sk);
209
210 void inet_hashinfo_init(struct inet_hashinfo *h);
211
212 bool inet_ehash_insert(struct sock *sk, struct sock *osk);
213 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
214 int __inet_hash(struct sock *sk, struct sock *osk);
215 int inet_hash(struct sock *sk);
216 void inet_unhash(struct sock *sk);
217
218 struct sock *__inet_lookup_listener(struct net *net,
219 struct inet_hashinfo *hashinfo,
220 struct sk_buff *skb, int doff,
221 const __be32 saddr, const __be16 sport,
222 const __be32 daddr,
223 const unsigned short hnum,
224 const int dif, const int sdif);
225
226 static inline struct sock *inet_lookup_listener(struct net *net,
227 struct inet_hashinfo *hashinfo,
228 struct sk_buff *skb, int doff,
229 __be32 saddr, __be16 sport,
230 __be32 daddr, __be16 dport, int dif, int sdif)
231 {
232 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
233 daddr, ntohs(dport), dif, sdif);
234 }
235
236 /* Socket demux engine toys. */
237 /* What happens here is ugly; there's a pair of adjacent fields in
238 struct inet_sock; __be16 dport followed by __u16 num. We want to
239 search by pair, so we combine the keys into a single 32bit value
240 and compare with 32bit value read from &...->dport. Let's at least
241 make sure that it's not mixed with anything else...
242 On 64bit targets we combine comparisons with pair of adjacent __be32
243 fields in the same way.
244 */
245 #ifdef __BIG_ENDIAN
246 #define INET_COMBINED_PORTS(__sport, __dport) \
247 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
248 #else /* __LITTLE_ENDIAN */
249 #define INET_COMBINED_PORTS(__sport, __dport) \
250 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
251 #endif
252
253 #if (BITS_PER_LONG == 64)
254 #ifdef __BIG_ENDIAN
255 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
256 const __addrpair __name = (__force __addrpair) ( \
257 (((__force __u64)(__be32)(__saddr)) << 32) | \
258 ((__force __u64)(__be32)(__daddr)))
259 #else /* __LITTLE_ENDIAN */
260 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
261 const __addrpair __name = (__force __addrpair) ( \
262 (((__force __u64)(__be32)(__daddr)) << 32) | \
263 ((__force __u64)(__be32)(__saddr)))
264 #endif /* __BIG_ENDIAN */
265 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
266 (((__sk)->sk_portpair == (__ports)) && \
267 ((__sk)->sk_addrpair == (__cookie)) && \
268 (!(__sk)->sk_bound_dev_if || \
269 ((__sk)->sk_bound_dev_if == (__dif)) || \
270 ((__sk)->sk_bound_dev_if == (__sdif))) && \
271 net_eq(sock_net(__sk), (__net)))
272 #else /* 32-bit arch */
273 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
274 const int __name __deprecated __attribute__((unused))
275
276 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
277 (((__sk)->sk_portpair == (__ports)) && \
278 ((__sk)->sk_daddr == (__saddr)) && \
279 ((__sk)->sk_rcv_saddr == (__daddr)) && \
280 (!(__sk)->sk_bound_dev_if || \
281 ((__sk)->sk_bound_dev_if == (__dif)) || \
282 ((__sk)->sk_bound_dev_if == (__sdif))) && \
283 net_eq(sock_net(__sk), (__net)))
284 #endif /* 64-bit arch */
285
286 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
287 * not check it for lookups anymore, thanks Alexey. -DaveM
288 */
289 struct sock *__inet_lookup_established(struct net *net,
290 struct inet_hashinfo *hashinfo,
291 const __be32 saddr, const __be16 sport,
292 const __be32 daddr, const u16 hnum,
293 const int dif, const int sdif);
294
295 static inline struct sock *
296 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
297 const __be32 saddr, const __be16 sport,
298 const __be32 daddr, const __be16 dport,
299 const int dif)
300 {
301 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
302 ntohs(dport), dif, 0);
303 }
304
305 static inline struct sock *__inet_lookup(struct net *net,
306 struct inet_hashinfo *hashinfo,
307 struct sk_buff *skb, int doff,
308 const __be32 saddr, const __be16 sport,
309 const __be32 daddr, const __be16 dport,
310 const int dif, const int sdif,
311 bool *refcounted)
312 {
313 u16 hnum = ntohs(dport);
314 struct sock *sk;
315
316 sk = __inet_lookup_established(net, hashinfo, saddr, sport,
317 daddr, hnum, dif, sdif);
318 *refcounted = true;
319 if (sk)
320 return sk;
321 *refcounted = false;
322 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
323 sport, daddr, hnum, dif, sdif);
324 }
325
326 static inline struct sock *inet_lookup(struct net *net,
327 struct inet_hashinfo *hashinfo,
328 struct sk_buff *skb, int doff,
329 const __be32 saddr, const __be16 sport,
330 const __be32 daddr, const __be16 dport,
331 const int dif)
332 {
333 struct sock *sk;
334 bool refcounted;
335
336 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
337 dport, dif, 0, &refcounted);
338
339 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
340 sk = NULL;
341 return sk;
342 }
343
344 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
345 struct sk_buff *skb,
346 int doff,
347 const __be16 sport,
348 const __be16 dport,
349 const int sdif,
350 bool *refcounted)
351 {
352 struct sock *sk = skb_steal_sock(skb);
353 const struct iphdr *iph = ip_hdr(skb);
354
355 *refcounted = true;
356 if (sk)
357 return sk;
358
359 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
360 doff, iph->saddr, sport,
361 iph->daddr, dport, inet_iif(skb), sdif,
362 refcounted);
363 }
364
365 u32 inet6_ehashfn(const struct net *net,
366 const struct in6_addr *laddr, const u16 lport,
367 const struct in6_addr *faddr, const __be16 fport);
368
369 static inline void sk_daddr_set(struct sock *sk, __be32 addr)
370 {
371 sk->sk_daddr = addr; /* alias of inet_daddr */
372 #if IS_ENABLED(CONFIG_IPV6)
373 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
374 #endif
375 }
376
377 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
378 {
379 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
380 #if IS_ENABLED(CONFIG_IPV6)
381 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
382 #endif
383 }
384
385 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
386 struct sock *sk, u32 port_offset,
387 int (*check_established)(struct inet_timewait_death_row *,
388 struct sock *, __u16,
389 struct inet_timewait_sock **));
390
391 int inet_hash_connect(struct inet_timewait_death_row *death_row,
392 struct sock *sk);
393 #endif /* _INET_HASHTABLES_H */