2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET transport hashtables
8 * Authors: Lotsa people, from code originally in tcp
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
27 * Allocate and initialize a new local port bind bucket.
28 * The bindhash mutex for snum's hash chain must be held here.
30 struct inet_bind_bucket
*inet_bind_bucket_create(struct kmem_cache
*cachep
,
32 struct inet_bind_hashbucket
*head
,
33 const unsigned short snum
)
35 struct inet_bind_bucket
*tb
= kmem_cache_alloc(cachep
, GFP_ATOMIC
);
41 INIT_HLIST_HEAD(&tb
->owners
);
42 hlist_add_head(&tb
->node
, &head
->chain
);
48 * Caller must hold hashbucket lock for this tb with local BH disabled
50 void inet_bind_bucket_destroy(struct kmem_cache
*cachep
, struct inet_bind_bucket
*tb
)
52 if (hlist_empty(&tb
->owners
)) {
53 __hlist_del(&tb
->node
);
54 kmem_cache_free(cachep
, tb
);
58 void inet_bind_hash(struct sock
*sk
, struct inet_bind_bucket
*tb
,
59 const unsigned short snum
)
61 inet_sk(sk
)->num
= snum
;
62 sk_add_bind_node(sk
, &tb
->owners
);
63 inet_csk(sk
)->icsk_bind_hash
= tb
;
67 * Get rid of any references to a local port held by the given sock.
69 static void __inet_put_port(struct sock
*sk
)
71 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->hashinfo
;
72 const int bhash
= inet_bhashfn(inet_sk(sk
)->num
, hashinfo
->bhash_size
);
73 struct inet_bind_hashbucket
*head
= &hashinfo
->bhash
[bhash
];
74 struct inet_bind_bucket
*tb
;
76 spin_lock(&head
->lock
);
77 tb
= inet_csk(sk
)->icsk_bind_hash
;
78 __sk_del_bind_node(sk
);
79 inet_csk(sk
)->icsk_bind_hash
= NULL
;
81 inet_bind_bucket_destroy(hashinfo
->bind_bucket_cachep
, tb
);
82 spin_unlock(&head
->lock
);
85 void inet_put_port(struct sock
*sk
)
92 EXPORT_SYMBOL(inet_put_port
);
95 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
96 * Look, when several writers sleep and reader wakes them up, all but one
97 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
98 * this, _but_ remember, it adds useless work on UP machines (wake up each
99 * exclusive lock release). It should be ifdefed really.
101 void inet_listen_wlock(struct inet_hashinfo
*hashinfo
)
102 __acquires(hashinfo
->lhash_lock
)
104 write_lock(&hashinfo
->lhash_lock
);
106 if (atomic_read(&hashinfo
->lhash_users
)) {
110 prepare_to_wait_exclusive(&hashinfo
->lhash_wait
,
111 &wait
, TASK_UNINTERRUPTIBLE
);
112 if (!atomic_read(&hashinfo
->lhash_users
))
114 write_unlock_bh(&hashinfo
->lhash_lock
);
116 write_lock_bh(&hashinfo
->lhash_lock
);
119 finish_wait(&hashinfo
->lhash_wait
, &wait
);
123 EXPORT_SYMBOL(inet_listen_wlock
);
126 * Don't inline this cruft. Here are some nice properties to exploit here. The
127 * BSD API does not allow a listening sock to specify the remote port nor the
128 * remote address for the connection. So always assume those are both
129 * wildcarded during the search since they can never be otherwise.
131 static struct sock
*inet_lookup_listener_slow(struct net
*net
,
132 const struct hlist_head
*head
,
134 const unsigned short hnum
,
137 struct sock
*result
= NULL
, *sk
;
138 const struct hlist_node
*node
;
141 sk_for_each(sk
, node
, head
) {
142 const struct inet_sock
*inet
= inet_sk(sk
);
144 if (sk
->sk_net
== net
&& inet
->num
== hnum
&&
145 !ipv6_only_sock(sk
)) {
146 const __be32 rcv_saddr
= inet
->rcv_saddr
;
147 int score
= sk
->sk_family
== PF_INET
? 1 : 0;
150 if (rcv_saddr
!= daddr
)
154 if (sk
->sk_bound_dev_if
) {
155 if (sk
->sk_bound_dev_if
!= dif
)
161 if (score
> hiscore
) {
170 /* Optimize the common listener case. */
171 struct sock
*__inet_lookup_listener(struct net
*net
,
172 struct inet_hashinfo
*hashinfo
,
173 const __be32 daddr
, const unsigned short hnum
,
176 struct sock
*sk
= NULL
;
177 const struct hlist_head
*head
;
179 read_lock(&hashinfo
->lhash_lock
);
180 head
= &hashinfo
->listening_hash
[inet_lhashfn(hnum
)];
181 if (!hlist_empty(head
)) {
182 const struct inet_sock
*inet
= inet_sk((sk
= __sk_head(head
)));
184 if (inet
->num
== hnum
&& !sk
->sk_node
.next
&&
185 (!inet
->rcv_saddr
|| inet
->rcv_saddr
== daddr
) &&
186 (sk
->sk_family
== PF_INET
|| !ipv6_only_sock(sk
)) &&
187 !sk
->sk_bound_dev_if
&& sk
->sk_net
== net
)
189 sk
= inet_lookup_listener_slow(net
, head
, daddr
, hnum
, dif
);
195 read_unlock(&hashinfo
->lhash_lock
);
198 EXPORT_SYMBOL_GPL(__inet_lookup_listener
);
200 struct sock
* __inet_lookup_established(struct net
*net
,
201 struct inet_hashinfo
*hashinfo
,
202 const __be32 saddr
, const __be16 sport
,
203 const __be32 daddr
, const u16 hnum
,
206 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
207 const __portpair ports
= INET_COMBINED_PORTS(sport
, hnum
);
209 const struct hlist_node
*node
;
210 /* Optimize here for direct hit, only listening connections can
211 * have wildcards anyways.
213 unsigned int hash
= inet_ehashfn(daddr
, hnum
, saddr
, sport
);
214 struct inet_ehash_bucket
*head
= inet_ehash_bucket(hashinfo
, hash
);
215 rwlock_t
*lock
= inet_ehash_lockp(hashinfo
, hash
);
217 prefetch(head
->chain
.first
);
219 sk_for_each(sk
, node
, &head
->chain
) {
220 if (INET_MATCH(sk
, net
, hash
, acookie
,
221 saddr
, daddr
, ports
, dif
))
222 goto hit
; /* You sunk my battleship! */
225 /* Must check for a TIME_WAIT'er before going to listener hash. */
226 sk_for_each(sk
, node
, &head
->twchain
) {
227 if (INET_TW_MATCH(sk
, net
, hash
, acookie
,
228 saddr
, daddr
, ports
, dif
))
239 EXPORT_SYMBOL_GPL(__inet_lookup_established
);
241 /* called with local bh disabled */
242 static int __inet_check_established(struct inet_timewait_death_row
*death_row
,
243 struct sock
*sk
, __u16 lport
,
244 struct inet_timewait_sock
**twp
)
246 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
247 struct inet_sock
*inet
= inet_sk(sk
);
248 __be32 daddr
= inet
->rcv_saddr
;
249 __be32 saddr
= inet
->daddr
;
250 int dif
= sk
->sk_bound_dev_if
;
251 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
252 const __portpair ports
= INET_COMBINED_PORTS(inet
->dport
, lport
);
253 unsigned int hash
= inet_ehashfn(daddr
, lport
, saddr
, inet
->dport
);
254 struct inet_ehash_bucket
*head
= inet_ehash_bucket(hinfo
, hash
);
255 rwlock_t
*lock
= inet_ehash_lockp(hinfo
, hash
);
257 const struct hlist_node
*node
;
258 struct inet_timewait_sock
*tw
;
259 struct net
*net
= sk
->sk_net
;
261 prefetch(head
->chain
.first
);
264 /* Check TIME-WAIT sockets first. */
265 sk_for_each(sk2
, node
, &head
->twchain
) {
268 if (INET_TW_MATCH(sk2
, net
, hash
, acookie
,
269 saddr
, daddr
, ports
, dif
)) {
270 if (twsk_unique(sk
, sk2
, twp
))
278 /* And established part... */
279 sk_for_each(sk2
, node
, &head
->chain
) {
280 if (INET_MATCH(sk2
, net
, hash
, acookie
,
281 saddr
, daddr
, ports
, dif
))
286 /* Must record num and sport now. Otherwise we will see
287 * in hash table socket with a funny identity. */
289 inet
->sport
= htons(lport
);
291 BUG_TRAP(sk_unhashed(sk
));
292 __sk_add_node(sk
, &head
->chain
);
293 sock_prot_inuse_add(sk
->sk_prot
, 1);
298 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED
);
300 /* Silly. Should hash-dance instead... */
301 inet_twsk_deschedule(tw
, death_row
);
302 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED
);
311 return -EADDRNOTAVAIL
;
314 static inline u32
inet_sk_port_offset(const struct sock
*sk
)
316 const struct inet_sock
*inet
= inet_sk(sk
);
317 return secure_ipv4_port_ephemeral(inet
->rcv_saddr
, inet
->daddr
,
321 void __inet_hash_nolisten(struct sock
*sk
)
323 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->hashinfo
;
324 struct hlist_head
*list
;
326 struct inet_ehash_bucket
*head
;
328 BUG_TRAP(sk_unhashed(sk
));
330 sk
->sk_hash
= inet_sk_ehashfn(sk
);
331 head
= inet_ehash_bucket(hashinfo
, sk
->sk_hash
);
333 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
336 __sk_add_node(sk
, list
);
337 sock_prot_inuse_add(sk
->sk_prot
, 1);
340 EXPORT_SYMBOL_GPL(__inet_hash_nolisten
);
342 static void __inet_hash(struct sock
*sk
)
344 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->hashinfo
;
345 struct hlist_head
*list
;
348 if (sk
->sk_state
!= TCP_LISTEN
) {
349 __inet_hash_nolisten(sk
);
353 BUG_TRAP(sk_unhashed(sk
));
354 list
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)];
355 lock
= &hashinfo
->lhash_lock
;
357 inet_listen_wlock(hashinfo
);
358 __sk_add_node(sk
, list
);
359 sock_prot_inuse_add(sk
->sk_prot
, 1);
361 wake_up(&hashinfo
->lhash_wait
);
364 void inet_hash(struct sock
*sk
)
366 if (sk
->sk_state
!= TCP_CLOSE
) {
372 EXPORT_SYMBOL_GPL(inet_hash
);
374 void inet_unhash(struct sock
*sk
)
377 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->hashinfo
;
382 if (sk
->sk_state
== TCP_LISTEN
) {
384 inet_listen_wlock(hashinfo
);
385 lock
= &hashinfo
->lhash_lock
;
387 lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
391 if (__sk_del_node_init(sk
))
392 sock_prot_inuse_add(sk
->sk_prot
, -1);
393 write_unlock_bh(lock
);
395 if (sk
->sk_state
== TCP_LISTEN
)
396 wake_up(&hashinfo
->lhash_wait
);
398 EXPORT_SYMBOL_GPL(inet_unhash
);
400 int __inet_hash_connect(struct inet_timewait_death_row
*death_row
,
402 int (*check_established
)(struct inet_timewait_death_row
*,
403 struct sock
*, __u16
, struct inet_timewait_sock
**),
404 void (*hash
)(struct sock
*sk
))
406 struct inet_hashinfo
*hinfo
= death_row
->hashinfo
;
407 const unsigned short snum
= inet_sk(sk
)->num
;
408 struct inet_bind_hashbucket
*head
;
409 struct inet_bind_bucket
*tb
;
411 struct net
*net
= sk
->sk_net
;
414 int i
, remaining
, low
, high
, port
;
416 u32 offset
= hint
+ inet_sk_port_offset(sk
);
417 struct hlist_node
*node
;
418 struct inet_timewait_sock
*tw
= NULL
;
420 inet_get_local_port_range(&low
, &high
);
421 remaining
= (high
- low
) + 1;
424 for (i
= 1; i
<= remaining
; i
++) {
425 port
= low
+ (i
+ offset
) % remaining
;
426 head
= &hinfo
->bhash
[inet_bhashfn(port
, hinfo
->bhash_size
)];
427 spin_lock(&head
->lock
);
429 /* Does not bother with rcv_saddr checks,
430 * because the established check is already
433 inet_bind_bucket_for_each(tb
, node
, &head
->chain
) {
434 if (tb
->ib_net
== net
&& tb
->port
== port
) {
435 BUG_TRAP(!hlist_empty(&tb
->owners
));
436 if (tb
->fastreuse
>= 0)
438 if (!check_established(death_row
, sk
,
445 tb
= inet_bind_bucket_create(hinfo
->bind_bucket_cachep
,
448 spin_unlock(&head
->lock
);
455 spin_unlock(&head
->lock
);
459 return -EADDRNOTAVAIL
;
464 /* Head lock still held and bh's disabled */
465 inet_bind_hash(sk
, tb
, port
);
466 if (sk_unhashed(sk
)) {
467 inet_sk(sk
)->sport
= htons(port
);
470 spin_unlock(&head
->lock
);
473 inet_twsk_deschedule(tw
, death_row
);
481 head
= &hinfo
->bhash
[inet_bhashfn(snum
, hinfo
->bhash_size
)];
482 tb
= inet_csk(sk
)->icsk_bind_hash
;
483 spin_lock_bh(&head
->lock
);
484 if (sk_head(&tb
->owners
) == sk
&& !sk
->sk_bind_node
.next
) {
486 spin_unlock_bh(&head
->lock
);
489 spin_unlock(&head
->lock
);
490 /* No definite answer... Walk to established hash table */
491 ret
= check_established(death_row
, sk
, snum
, NULL
);
497 EXPORT_SYMBOL_GPL(__inet_hash_connect
);
500 * Bind a port for a connect operation and hash it.
502 int inet_hash_connect(struct inet_timewait_death_row
*death_row
,
505 return __inet_hash_connect(death_row
, sk
,
506 __inet_check_established
, __inet_hash_nolisten
);
509 EXPORT_SYMBOL_GPL(inet_hash_connect
);