]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/net/inet_hashtables.h
[INET]: Generalise tcp_v4_hash_connect
[mirror_ubuntu-bionic-kernel.git] / include / net / inet_hashtables.h
CommitLineData
304a1618
ACM
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Authors: Lotsa people, from code originally in tcp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _INET_HASHTABLES_H
15#define _INET_HASHTABLES_H
16
8feaf0c0
ACM
17#include <linux/config.h>
18
2d8c4ce5 19#include <linux/interrupt.h>
33b62231 20#include <linux/ipv6.h>
77d8bf9c
ACM
21#include <linux/list.h>
22#include <linux/slab.h>
33b62231 23#include <linux/socket.h>
77d8bf9c 24#include <linux/spinlock.h>
304a1618 25#include <linux/types.h>
f3f05f70 26#include <linux/wait.h>
304a1618 27
463c84b9 28#include <net/inet_connection_sock.h>
0a5578cf 29#include <net/route.h>
2d8c4ce5 30#include <net/sock.h>
c752f073 31#include <net/tcp_states.h>
2d8c4ce5 32
f3f05f70 33#include <asm/atomic.h>
e48c414e 34#include <asm/byteorder.h>
f3f05f70 35
77d8bf9c
ACM
36/* This is for all connections with a full identity, no wildcards.
37 * New scheme, half the table is for TIME_WAIT, the other half is
38 * for the rest. I'll experiment with dynamic table growth later.
39 */
40struct inet_ehash_bucket {
41 rwlock_t lock;
42 struct hlist_head chain;
6d255361 43};
77d8bf9c
ACM
44
45/* There are a few simple rules, which allow for local port reuse by
46 * an application. In essence:
47 *
48 * 1) Sockets bound to different interfaces may share a local port.
49 * Failing that, goto test 2.
50 * 2) If all sockets have sk->sk_reuse set, and none of them are in
51 * TCP_LISTEN state, the port may be shared.
52 * Failing that, goto test 3.
53 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
54 * address, and none of them are the same, the port may be
55 * shared.
56 * Failing this, the port cannot be shared.
57 *
58 * The interesting point, is test #2. This is what an FTP server does
59 * all day. To optimize this case we use a specific flag bit defined
60 * below. As we add sockets to a bind bucket list, we perform a
61 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
62 * As long as all sockets added to a bind bucket pass this test,
63 * the flag bit will be set.
64 * The resulting situation is that tcp_v[46]_verify_bind() can just check
65 * for this flag bit, if it is set and the socket trying to bind has
66 * sk->sk_reuse set, we don't even have to walk the owners list at all,
67 * we return that it is ok to bind this socket to the requested local port.
68 *
69 * Sounds like a lot of work, but it is worth it. In a more naive
70 * implementation (ie. current FreeBSD etc.) the entire list of ports
71 * must be walked for each data port opened by an ftp server. Needless
72 * to say, this does not scale at all. With a couple thousand FTP
73 * users logged onto your box, isn't it nice to know that new data
74 * ports are created in O(1) time? I thought so. ;-) -DaveM
75 */
76struct inet_bind_bucket {
77 unsigned short port;
78 signed short fastreuse;
79 struct hlist_node node;
80 struct hlist_head owners;
81};
82
83#define inet_bind_bucket_for_each(tb, node, head) \
84 hlist_for_each_entry(tb, node, head, node)
85
86struct inet_bind_hashbucket {
87 spinlock_t lock;
88 struct hlist_head chain;
89};
90
91/* This is for listening sockets, thus all sockets which possess wildcards. */
92#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
93
94struct inet_hashinfo {
95 /* This is for sockets with full identity only. Sockets here will
96 * always be without wildcards and will have the following invariant:
97 *
98 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
99 *
100 * First half of the table is for sockets not in TIME_WAIT, second half
101 * is for TIME_WAIT sockets only.
102 */
103 struct inet_ehash_bucket *ehash;
104
105 /* Ok, let's try this, I give up, we do need a local binding
106 * TCP hash as well as the others for fast bind/connect.
107 */
108 struct inet_bind_hashbucket *bhash;
109
110 int bhash_size;
81c3d547 111 unsigned int ehash_size;
77d8bf9c
ACM
112
113 /* All sockets in TCP_LISTEN state will be in here. This is the only
114 * table where wildcard'd TCP sockets can exist. Hash function here
115 * is just local port number.
116 */
117 struct hlist_head listening_hash[INET_LHTABLE_SIZE];
118
119 /* All the above members are written once at bootup and
120 * never written again _or_ are predominantly read-access.
121 *
122 * Now align to a new cache line as all the following members
123 * are often dirty.
124 */
125 rwlock_t lhash_lock ____cacheline_aligned;
126 atomic_t lhash_users;
127 wait_queue_head_t lhash_wait;
2d8c4ce5 128 kmem_cache_t *bind_bucket_cachep;
77d8bf9c
ACM
129};
130
81c3d547
ED
131static inline unsigned int inet_ehashfn(const __u32 laddr, const __u16 lport,
132 const __u32 faddr, const __u16 fport)
304a1618 133{
81c3d547 134 unsigned int h = (laddr ^ lport) ^ (faddr ^ fport);
304a1618
ACM
135 h ^= h >> 16;
136 h ^= h >> 8;
81c3d547 137 return h;
304a1618
ACM
138}
139
81c3d547 140static inline int inet_sk_ehashfn(const struct sock *sk)
304a1618
ACM
141{
142 const struct inet_sock *inet = inet_sk(sk);
143 const __u32 laddr = inet->rcv_saddr;
144 const __u16 lport = inet->num;
145 const __u32 faddr = inet->daddr;
146 const __u16 fport = inet->dport;
147
81c3d547
ED
148 return inet_ehashfn(laddr, lport, faddr, fport);
149}
150
151static inline struct inet_ehash_bucket *inet_ehash_bucket(
152 struct inet_hashinfo *hashinfo,
153 unsigned int hash)
154{
155 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
304a1618
ACM
156}
157
77d8bf9c
ACM
158extern struct inet_bind_bucket *
159 inet_bind_bucket_create(kmem_cache_t *cachep,
160 struct inet_bind_hashbucket *head,
161 const unsigned short snum);
162extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
163 struct inet_bind_bucket *tb);
164
165static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
166{
167 return lport & (bhash_size - 1);
168}
169
2d8c4ce5
ACM
170extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
171 const unsigned short snum);
172
77d8bf9c
ACM
173/* These can have wildcards, don't try too hard. */
174static inline int inet_lhashfn(const unsigned short num)
175{
176 return num & (INET_LHTABLE_SIZE - 1);
177}
178
179static inline int inet_sk_listen_hashfn(const struct sock *sk)
180{
181 return inet_lhashfn(inet_sk(sk)->num);
182}
183
2d8c4ce5
ACM
184/* Caller must disable local BH processing. */
185static inline void __inet_inherit_port(struct inet_hashinfo *table,
186 struct sock *sk, struct sock *child)
187{
188 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
189 struct inet_bind_hashbucket *head = &table->bhash[bhash];
190 struct inet_bind_bucket *tb;
191
192 spin_lock(&head->lock);
463c84b9 193 tb = inet_csk(sk)->icsk_bind_hash;
2d8c4ce5 194 sk_add_bind_node(child, &tb->owners);
463c84b9 195 inet_csk(child)->icsk_bind_hash = tb;
2d8c4ce5
ACM
196 spin_unlock(&head->lock);
197}
198
199static inline void inet_inherit_port(struct inet_hashinfo *table,
200 struct sock *sk, struct sock *child)
201{
202 local_bh_disable();
203 __inet_inherit_port(table, sk, child);
204 local_bh_enable();
205}
206
207extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
208
f3f05f70
ACM
209extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
210
211/*
212 * - We may sleep inside this lock.
213 * - If sleeping is not required (or called from BH),
214 * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
215 */
216static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
217{
218 /* read_lock synchronizes to candidates to writers */
219 read_lock(&hashinfo->lhash_lock);
220 atomic_inc(&hashinfo->lhash_users);
221 read_unlock(&hashinfo->lhash_lock);
222}
223
224static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
225{
226 if (atomic_dec_and_test(&hashinfo->lhash_users))
227 wake_up(&hashinfo->lhash_wait);
228}
229
230static inline void __inet_hash(struct inet_hashinfo *hashinfo,
231 struct sock *sk, const int listen_possible)
232{
233 struct hlist_head *list;
234 rwlock_t *lock;
235
236 BUG_TRAP(sk_unhashed(sk));
237 if (listen_possible && sk->sk_state == TCP_LISTEN) {
238 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
239 lock = &hashinfo->lhash_lock;
240 inet_listen_wlock(hashinfo);
241 } else {
81c3d547
ED
242 struct inet_ehash_bucket *head;
243 sk->sk_hash = inet_sk_ehashfn(sk);
244 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
245 list = &head->chain;
246 lock = &head->lock;
f3f05f70
ACM
247 write_lock(lock);
248 }
249 __sk_add_node(sk, list);
250 sock_prot_inc_use(sk->sk_prot);
251 write_unlock(lock);
252 if (listen_possible && sk->sk_state == TCP_LISTEN)
253 wake_up(&hashinfo->lhash_wait);
254}
81849d10
ACM
255
256static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
257{
258 if (sk->sk_state != TCP_CLOSE) {
259 local_bh_disable();
260 __inet_hash(hashinfo, sk, 1);
261 local_bh_enable();
262 }
263}
264
265static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
266{
267 rwlock_t *lock;
268
269 if (sk_unhashed(sk))
270 goto out;
271
272 if (sk->sk_state == TCP_LISTEN) {
273 local_bh_disable();
274 inet_listen_wlock(hashinfo);
275 lock = &hashinfo->lhash_lock;
276 } else {
81c3d547
ED
277 lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
278 write_lock_bh(lock);
81849d10
ACM
279 }
280
281 if (__sk_del_node_init(sk))
282 sock_prot_dec_use(sk->sk_prot);
283 write_unlock_bh(lock);
284out:
285 if (sk->sk_state == TCP_LISTEN)
286 wake_up(&hashinfo->lhash_wait);
287}
33b62231 288
0a5578cf
ACM
289static inline int inet_iif(const struct sk_buff *skb)
290{
291 return ((struct rtable *)skb->dst)->rt_iif;
292}
293
33b62231
ACM
294extern struct sock *__inet_lookup_listener(const struct hlist_head *head,
295 const u32 daddr,
296 const unsigned short hnum,
297 const int dif);
298
299/* Optimize the common listener case. */
e48c414e
ACM
300static inline struct sock *
301 inet_lookup_listener(struct inet_hashinfo *hashinfo,
302 const u32 daddr,
303 const unsigned short hnum, const int dif)
33b62231
ACM
304{
305 struct sock *sk = NULL;
e48c414e 306 const struct hlist_head *head;
33b62231
ACM
307
308 read_lock(&hashinfo->lhash_lock);
309 head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
310 if (!hlist_empty(head)) {
311 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
312
313 if (inet->num == hnum && !sk->sk_node.next &&
314 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
315 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
316 !sk->sk_bound_dev_if)
317 goto sherry_cache;
318 sk = __inet_lookup_listener(head, daddr, hnum, dif);
319 }
320 if (sk) {
321sherry_cache:
322 sock_hold(sk);
323 }
324 read_unlock(&hashinfo->lhash_lock);
325 return sk;
326}
8feaf0c0
ACM
327
328/* Socket demux engine toys. */
329#ifdef __BIG_ENDIAN
330#define INET_COMBINED_PORTS(__sport, __dport) \
331 (((__u32)(__sport) << 16) | (__u32)(__dport))
332#else /* __LITTLE_ENDIAN */
333#define INET_COMBINED_PORTS(__sport, __dport) \
334 (((__u32)(__dport) << 16) | (__u32)(__sport))
335#endif
336
337#if (BITS_PER_LONG == 64)
338#ifdef __BIG_ENDIAN
339#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
340 const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr));
341#else /* __LITTLE_ENDIAN */
342#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
343 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
344#endif /* __BIG_ENDIAN */
81c3d547
ED
345#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
346 (((__sk)->sk_hash == (__hash)) && \
347 ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
8feaf0c0
ACM
348 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
349 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
81c3d547
ED
350#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
351 (((__sk)->sk_hash == (__hash)) && \
352 ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
8feaf0c0
ACM
353 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
354 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
355#else /* 32-bit arch */
356#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
81c3d547
ED
357#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
358 (((__sk)->sk_hash == (__hash)) && \
359 (inet_sk(__sk)->daddr == (__saddr)) && \
8feaf0c0
ACM
360 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
361 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
362 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
81c3d547
ED
363#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
364 (((__sk)->sk_hash == (__hash)) && \
365 (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
8feaf0c0
ACM
366 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
367 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
368 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
369#endif /* 64-bit arch */
e48c414e
ACM
370
371/*
372 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
373 * not check it for lookups anymore, thanks Alexey. -DaveM
374 *
375 * Local BH must be disabled here.
376 */
377static inline struct sock *
378 __inet_lookup_established(struct inet_hashinfo *hashinfo,
379 const u32 saddr, const u16 sport,
380 const u32 daddr, const u16 hnum,
381 const int dif)
382{
383 INET_ADDR_COOKIE(acookie, saddr, daddr)
384 const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
385 struct sock *sk;
386 const struct hlist_node *node;
387 /* Optimize here for direct hit, only listening connections can
388 * have wildcards anyways.
389 */
81c3d547
ED
390 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
391 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
e48c414e 392
81c3d547 393 prefetch(head->chain.first);
e48c414e
ACM
394 read_lock(&head->lock);
395 sk_for_each(sk, node, &head->chain) {
81c3d547 396 if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
e48c414e
ACM
397 goto hit; /* You sunk my battleship! */
398 }
399
400 /* Must check for a TIME_WAIT'er before going to listener hash. */
401 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
81c3d547 402 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
e48c414e
ACM
403 goto hit;
404 }
405 sk = NULL;
406out:
407 read_unlock(&head->lock);
408 return sk;
409hit:
410 sock_hold(sk);
411 goto out;
412}
413
414static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
415 const u32 saddr, const u16 sport,
416 const u32 daddr, const u16 hnum,
417 const int dif)
418{
419 struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
420 hnum, dif);
421 return sk ? : inet_lookup_listener(hashinfo, daddr, hnum, dif);
422}
423
424static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
425 const u32 saddr, const u16 sport,
426 const u32 daddr, const u16 dport,
427 const int dif)
428{
429 struct sock *sk;
430
431 local_bh_disable();
432 sk = __inet_lookup(hashinfo, saddr, sport, daddr, ntohs(dport), dif);
433 local_bh_enable();
434
435 return sk;
436}
a7f5e7f1
ACM
437
438extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
439 struct sock *sk);
304a1618 440#endif /* _INET_HASHTABLES_H */