2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Support for INET connection oriented protocols.
8 * Authors: See the TCP sources
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/jhash.h>
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
28 const char inet_csk_timer_bug_msg
[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg
);
33 * This struct holds the first and last local port number.
35 struct local_ports sysctl_local_ports __read_mostly
= {
36 .lock
= __SEQLOCK_UNLOCKED(sysctl_local_ports
.lock
),
37 .range
= { 32768, 61000 },
40 unsigned long *sysctl_local_reserved_ports
;
41 EXPORT_SYMBOL(sysctl_local_reserved_ports
);
43 void inet_get_local_port_range(int *low
, int *high
)
48 seq
= read_seqbegin(&sysctl_local_ports
.lock
);
50 *low
= sysctl_local_ports
.range
[0];
51 *high
= sysctl_local_ports
.range
[1];
52 } while (read_seqretry(&sysctl_local_ports
.lock
, seq
));
54 EXPORT_SYMBOL(inet_get_local_port_range
);
56 int inet_csk_bind_conflict(const struct sock
*sk
,
57 const struct inet_bind_bucket
*tb
, bool relax
)
60 struct hlist_node
*node
;
61 int reuse
= sk
->sk_reuse
;
62 int reuseport
= sk
->sk_reuseport
;
63 kuid_t uid
= sock_i_uid((struct sock
*)sk
);
66 * Unlike other sk lookup places we do not check
67 * for sk_net here, since _all_ the socks listed
68 * in tb->owners list belong to the same net - the
69 * one this bucket belongs to.
72 sk_for_each_bound(sk2
, node
, &tb
->owners
) {
74 !inet_v6_ipv6only(sk2
) &&
75 (!sk
->sk_bound_dev_if
||
76 !sk2
->sk_bound_dev_if
||
77 sk
->sk_bound_dev_if
== sk2
->sk_bound_dev_if
)) {
78 if ((!reuse
|| !sk2
->sk_reuse
||
79 sk2
->sk_state
== TCP_LISTEN
) &&
80 (!reuseport
|| !sk2
->sk_reuseport
||
81 (sk2
->sk_state
!= TCP_TIME_WAIT
&&
82 !uid_eq(uid
, sock_i_uid(sk2
))))) {
83 const __be32 sk2_rcv_saddr
= sk_rcv_saddr(sk2
);
84 if (!sk2_rcv_saddr
|| !sk_rcv_saddr(sk
) ||
85 sk2_rcv_saddr
== sk_rcv_saddr(sk
))
88 if (!relax
&& reuse
&& sk2
->sk_reuse
&&
89 sk2
->sk_state
!= TCP_LISTEN
) {
90 const __be32 sk2_rcv_saddr
= sk_rcv_saddr(sk2
);
92 if (!sk2_rcv_saddr
|| !sk_rcv_saddr(sk
) ||
93 sk2_rcv_saddr
== sk_rcv_saddr(sk
))
100 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict
);
102 /* Obtain a reference to a local port for the given sock,
103 * if snum is zero it means select any available local port.
105 int inet_csk_get_port(struct sock
*sk
, unsigned short snum
)
107 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
108 struct inet_bind_hashbucket
*head
;
109 struct hlist_node
*node
;
110 struct inet_bind_bucket
*tb
;
111 int ret
, attempts
= 5;
112 struct net
*net
= sock_net(sk
);
113 int smallest_size
= -1, smallest_rover
;
114 kuid_t uid
= sock_i_uid(sk
);
118 int remaining
, rover
, low
, high
;
121 inet_get_local_port_range(&low
, &high
);
122 remaining
= (high
- low
) + 1;
123 smallest_rover
= rover
= net_random() % remaining
+ low
;
127 if (inet_is_reserved_local_port(rover
))
129 head
= &hashinfo
->bhash
[inet_bhashfn(net
, rover
,
130 hashinfo
->bhash_size
)];
131 spin_lock(&head
->lock
);
132 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
133 if (net_eq(ib_net(tb
), net
) && tb
->port
== rover
) {
134 if (((tb
->fastreuse
> 0 &&
136 sk
->sk_state
!= TCP_LISTEN
) ||
137 (tb
->fastreuseport
> 0 &&
139 uid_eq(tb
->fastuid
, uid
))) &&
140 (tb
->num_owners
< smallest_size
|| smallest_size
== -1)) {
141 smallest_size
= tb
->num_owners
;
142 smallest_rover
= rover
;
143 if (atomic_read(&hashinfo
->bsockets
) > (high
- low
) + 1 &&
144 !inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
, false)) {
145 snum
= smallest_rover
;
149 if (!inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
, false)) {
157 spin_unlock(&head
->lock
);
161 } while (--remaining
> 0);
163 /* Exhausted local port range during search? It is not
164 * possible for us to be holding one of the bind hash
165 * locks if this test triggers, because if 'remaining'
166 * drops to zero, we broke out of the do/while loop at
167 * the top level, not from the 'break;' statement.
170 if (remaining
<= 0) {
171 if (smallest_size
!= -1) {
172 snum
= smallest_rover
;
177 /* OK, here is the one we will use. HEAD is
178 * non-NULL and we hold it's mutex.
183 head
= &hashinfo
->bhash
[inet_bhashfn(net
, snum
,
184 hashinfo
->bhash_size
)];
185 spin_lock(&head
->lock
);
186 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
187 if (net_eq(ib_net(tb
), net
) && tb
->port
== snum
)
193 if (!hlist_empty(&tb
->owners
)) {
194 if (sk
->sk_reuse
== SK_FORCE_REUSE
)
197 if (((tb
->fastreuse
> 0 &&
198 sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
) ||
199 (tb
->fastreuseport
> 0 &&
200 sk
->sk_reuseport
&& uid_eq(tb
->fastuid
, uid
))) &&
201 smallest_size
== -1) {
205 if (inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
, true)) {
206 if (((sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
) ||
207 (tb
->fastreuseport
> 0 &&
208 sk
->sk_reuseport
&& uid_eq(tb
->fastuid
, uid
))) &&
209 smallest_size
!= -1 && --attempts
>= 0) {
210 spin_unlock(&head
->lock
);
220 if (!tb
&& (tb
= inet_bind_bucket_create(hashinfo
->bind_bucket_cachep
,
221 net
, head
, snum
)) == NULL
)
223 if (hlist_empty(&tb
->owners
)) {
224 if (sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
)
228 if (sk
->sk_reuseport
) {
229 tb
->fastreuseport
= 1;
232 tb
->fastreuseport
= 0;
235 (!sk
->sk_reuse
|| sk
->sk_state
== TCP_LISTEN
))
237 if (tb
->fastreuseport
&&
238 (!sk
->sk_reuseport
|| !uid_eq(tb
->fastuid
, uid
)))
239 tb
->fastreuseport
= 0;
242 if (!inet_csk(sk
)->icsk_bind_hash
)
243 inet_bind_hash(sk
, tb
, snum
);
244 WARN_ON(inet_csk(sk
)->icsk_bind_hash
!= tb
);
248 spin_unlock(&head
->lock
);
253 EXPORT_SYMBOL_GPL(inet_csk_get_port
);
256 * Wait for an incoming connection, avoid race conditions. This must be called
257 * with the socket locked.
259 static int inet_csk_wait_for_connect(struct sock
*sk
, long timeo
)
261 struct inet_connection_sock
*icsk
= inet_csk(sk
);
266 * True wake-one mechanism for incoming connections: only
267 * one process gets woken up, not the 'whole herd'.
268 * Since we do not 'race & poll' for established sockets
269 * anymore, the common case will execute the loop only once.
271 * Subtle issue: "add_wait_queue_exclusive()" will be added
272 * after any current non-exclusive waiters, and we know that
273 * it will always _stay_ after any new non-exclusive waiters
274 * because all non-exclusive waiters are added at the
275 * beginning of the wait-queue. As such, it's ok to "drop"
276 * our exclusiveness temporarily when we get woken up without
277 * having to remove and re-insert us on the wait queue.
280 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
283 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
))
284 timeo
= schedule_timeout(timeo
);
287 if (!reqsk_queue_empty(&icsk
->icsk_accept_queue
))
290 if (sk
->sk_state
!= TCP_LISTEN
)
292 err
= sock_intr_errno(timeo
);
293 if (signal_pending(current
))
299 finish_wait(sk_sleep(sk
), &wait
);
304 * This will accept the next outstanding connection.
306 struct sock
*inet_csk_accept(struct sock
*sk
, int flags
, int *err
)
308 struct inet_connection_sock
*icsk
= inet_csk(sk
);
309 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
311 struct request_sock
*req
;
316 /* We need to make sure that this socket is listening,
317 * and that it has something pending.
320 if (sk
->sk_state
!= TCP_LISTEN
)
323 /* Find already established connection */
324 if (reqsk_queue_empty(queue
)) {
325 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
327 /* If this is a non blocking socket don't sleep */
332 error
= inet_csk_wait_for_connect(sk
, timeo
);
336 req
= reqsk_queue_remove(queue
);
339 sk_acceptq_removed(sk
);
340 if (sk
->sk_protocol
== IPPROTO_TCP
&& queue
->fastopenq
!= NULL
) {
341 spin_lock_bh(&queue
->fastopenq
->lock
);
342 if (tcp_rsk(req
)->listener
) {
343 /* We are still waiting for the final ACK from 3WHS
344 * so can't free req now. Instead, we set req->sk to
345 * NULL to signify that the child socket is taken
346 * so reqsk_fastopen_remove() will free the req
347 * when 3WHS finishes (or is aborted).
352 spin_unlock_bh(&queue
->fastopenq
->lock
);
365 EXPORT_SYMBOL(inet_csk_accept
);
368 * Using different timers for retransmit, delayed acks and probes
369 * We may wish use just one timer maintaining a list of expire jiffies
372 void inet_csk_init_xmit_timers(struct sock
*sk
,
373 void (*retransmit_handler
)(unsigned long),
374 void (*delack_handler
)(unsigned long),
375 void (*keepalive_handler
)(unsigned long))
377 struct inet_connection_sock
*icsk
= inet_csk(sk
);
379 setup_timer(&icsk
->icsk_retransmit_timer
, retransmit_handler
,
381 setup_timer(&icsk
->icsk_delack_timer
, delack_handler
,
383 setup_timer(&sk
->sk_timer
, keepalive_handler
, (unsigned long)sk
);
384 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= 0;
386 EXPORT_SYMBOL(inet_csk_init_xmit_timers
);
388 void inet_csk_clear_xmit_timers(struct sock
*sk
)
390 struct inet_connection_sock
*icsk
= inet_csk(sk
);
392 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= icsk
->icsk_ack
.blocked
= 0;
394 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
395 sk_stop_timer(sk
, &icsk
->icsk_delack_timer
);
396 sk_stop_timer(sk
, &sk
->sk_timer
);
398 EXPORT_SYMBOL(inet_csk_clear_xmit_timers
);
400 void inet_csk_delete_keepalive_timer(struct sock
*sk
)
402 sk_stop_timer(sk
, &sk
->sk_timer
);
404 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer
);
406 void inet_csk_reset_keepalive_timer(struct sock
*sk
, unsigned long len
)
408 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ len
);
410 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer
);
412 struct dst_entry
*inet_csk_route_req(struct sock
*sk
,
414 const struct request_sock
*req
)
417 const struct inet_request_sock
*ireq
= inet_rsk(req
);
418 struct ip_options_rcu
*opt
= inet_rsk(req
)->opt
;
419 struct net
*net
= sock_net(sk
);
420 int flags
= inet_sk_flowi_flags(sk
);
422 flowi4_init_output(fl4
, sk
->sk_bound_dev_if
, sk
->sk_mark
,
423 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
426 (opt
&& opt
->opt
.srr
) ? opt
->opt
.faddr
: ireq
->rmt_addr
,
427 ireq
->loc_addr
, ireq
->rmt_port
, inet_sk(sk
)->inet_sport
);
428 security_req_classify_flow(req
, flowi4_to_flowi(fl4
));
429 rt
= ip_route_output_flow(net
, fl4
, sk
);
432 if (opt
&& opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
439 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
442 EXPORT_SYMBOL_GPL(inet_csk_route_req
);
444 struct dst_entry
*inet_csk_route_child_sock(struct sock
*sk
,
446 const struct request_sock
*req
)
448 const struct inet_request_sock
*ireq
= inet_rsk(req
);
449 struct inet_sock
*newinet
= inet_sk(newsk
);
450 struct ip_options_rcu
*opt
;
451 struct net
*net
= sock_net(sk
);
455 fl4
= &newinet
->cork
.fl
.u
.ip4
;
458 opt
= rcu_dereference(newinet
->inet_opt
);
459 flowi4_init_output(fl4
, sk
->sk_bound_dev_if
, sk
->sk_mark
,
460 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
461 sk
->sk_protocol
, inet_sk_flowi_flags(sk
),
462 (opt
&& opt
->opt
.srr
) ? opt
->opt
.faddr
: ireq
->rmt_addr
,
463 ireq
->loc_addr
, ireq
->rmt_port
, inet_sk(sk
)->inet_sport
);
464 security_req_classify_flow(req
, flowi4_to_flowi(fl4
));
465 rt
= ip_route_output_flow(net
, fl4
, sk
);
468 if (opt
&& opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
477 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
480 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock
);
482 static inline u32
inet_synq_hash(const __be32 raddr
, const __be16 rport
,
483 const u32 rnd
, const u32 synq_hsize
)
485 return jhash_2words((__force u32
)raddr
, (__force u32
)rport
, rnd
) & (synq_hsize
- 1);
488 #if IS_ENABLED(CONFIG_IPV6)
489 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
491 #define AF_INET_FAMILY(fam) 1
494 struct request_sock
*inet_csk_search_req(const struct sock
*sk
,
495 struct request_sock
***prevp
,
496 const __be16 rport
, const __be32 raddr
,
499 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
500 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
501 struct request_sock
*req
, **prev
;
503 for (prev
= &lopt
->syn_table
[inet_synq_hash(raddr
, rport
, lopt
->hash_rnd
,
504 lopt
->nr_table_entries
)];
505 (req
= *prev
) != NULL
;
506 prev
= &req
->dl_next
) {
507 const struct inet_request_sock
*ireq
= inet_rsk(req
);
509 if (ireq
->rmt_port
== rport
&&
510 ireq
->rmt_addr
== raddr
&&
511 ireq
->loc_addr
== laddr
&&
512 AF_INET_FAMILY(req
->rsk_ops
->family
)) {
521 EXPORT_SYMBOL_GPL(inet_csk_search_req
);
523 void inet_csk_reqsk_queue_hash_add(struct sock
*sk
, struct request_sock
*req
,
524 unsigned long timeout
)
526 struct inet_connection_sock
*icsk
= inet_csk(sk
);
527 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
528 const u32 h
= inet_synq_hash(inet_rsk(req
)->rmt_addr
, inet_rsk(req
)->rmt_port
,
529 lopt
->hash_rnd
, lopt
->nr_table_entries
);
531 reqsk_queue_hash_req(&icsk
->icsk_accept_queue
, h
, req
, timeout
);
532 inet_csk_reqsk_queue_added(sk
, timeout
);
534 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add
);
536 /* Only thing we need from tcp.h */
537 extern int sysctl_tcp_synack_retries
;
540 /* Decide when to expire the request and when to resend SYN-ACK */
541 static inline void syn_ack_recalc(struct request_sock
*req
, const int thresh
,
542 const int max_retries
,
543 const u8 rskq_defer_accept
,
544 int *expire
, int *resend
)
546 if (!rskq_defer_accept
) {
547 *expire
= req
->num_timeout
>= thresh
;
551 *expire
= req
->num_timeout
>= thresh
&&
552 (!inet_rsk(req
)->acked
|| req
->num_timeout
>= max_retries
);
554 * Do not resend while waiting for data after ACK,
555 * start to resend on end of deferring period to give
556 * last chance for data or ACK to create established socket.
558 *resend
= !inet_rsk(req
)->acked
||
559 req
->num_timeout
>= rskq_defer_accept
- 1;
562 int inet_rtx_syn_ack(struct sock
*parent
, struct request_sock
*req
)
564 int err
= req
->rsk_ops
->rtx_syn_ack(parent
, req
, NULL
);
570 EXPORT_SYMBOL(inet_rtx_syn_ack
);
572 void inet_csk_reqsk_queue_prune(struct sock
*parent
,
573 const unsigned long interval
,
574 const unsigned long timeout
,
575 const unsigned long max_rto
)
577 struct inet_connection_sock
*icsk
= inet_csk(parent
);
578 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
579 struct listen_sock
*lopt
= queue
->listen_opt
;
580 int max_retries
= icsk
->icsk_syn_retries
? : sysctl_tcp_synack_retries
;
581 int thresh
= max_retries
;
582 unsigned long now
= jiffies
;
583 struct request_sock
**reqp
, *req
;
586 if (lopt
== NULL
|| lopt
->qlen
== 0)
589 /* Normally all the openreqs are young and become mature
590 * (i.e. converted to established socket) for first timeout.
591 * If synack was not acknowledged for 1 second, it means
592 * one of the following things: synack was lost, ack was lost,
593 * rtt is high or nobody planned to ack (i.e. synflood).
594 * When server is a bit loaded, queue is populated with old
595 * open requests, reducing effective size of queue.
596 * When server is well loaded, queue size reduces to zero
597 * after several minutes of work. It is not synflood,
598 * it is normal operation. The solution is pruning
599 * too old entries overriding normal timeout, when
600 * situation becomes dangerous.
602 * Essentially, we reserve half of room for young
603 * embrions; and abort old ones without pity, if old
604 * ones are about to clog our table.
606 if (lopt
->qlen
>>(lopt
->max_qlen_log
-1)) {
607 int young
= (lopt
->qlen_young
<<1);
610 if (lopt
->qlen
< young
)
617 if (queue
->rskq_defer_accept
)
618 max_retries
= queue
->rskq_defer_accept
;
620 budget
= 2 * (lopt
->nr_table_entries
/ (timeout
/ interval
));
621 i
= lopt
->clock_hand
;
624 reqp
=&lopt
->syn_table
[i
];
625 while ((req
= *reqp
) != NULL
) {
626 if (time_after_eq(now
, req
->expires
)) {
627 int expire
= 0, resend
= 0;
629 syn_ack_recalc(req
, thresh
, max_retries
,
630 queue
->rskq_defer_accept
,
632 req
->rsk_ops
->syn_ack_timeout(parent
, req
);
635 !inet_rtx_syn_ack(parent
, req
) ||
636 inet_rsk(req
)->acked
)) {
639 if (req
->num_timeout
++ == 0)
641 timeo
= min(timeout
<< req
->num_timeout
,
643 req
->expires
= now
+ timeo
;
644 reqp
= &req
->dl_next
;
648 /* Drop this request */
649 inet_csk_reqsk_queue_unlink(parent
, req
, reqp
);
650 reqsk_queue_removed(queue
, req
);
654 reqp
= &req
->dl_next
;
657 i
= (i
+ 1) & (lopt
->nr_table_entries
- 1);
659 } while (--budget
> 0);
661 lopt
->clock_hand
= i
;
664 inet_csk_reset_keepalive_timer(parent
, interval
);
666 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune
);
669 * inet_csk_clone_lock - clone an inet socket, and lock its clone
670 * @sk: the socket to clone
672 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
674 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
676 struct sock
*inet_csk_clone_lock(const struct sock
*sk
,
677 const struct request_sock
*req
,
678 const gfp_t priority
)
680 struct sock
*newsk
= sk_clone_lock(sk
, priority
);
683 struct inet_connection_sock
*newicsk
= inet_csk(newsk
);
685 newsk
->sk_state
= TCP_SYN_RECV
;
686 newicsk
->icsk_bind_hash
= NULL
;
688 inet_sk(newsk
)->inet_dport
= inet_rsk(req
)->rmt_port
;
689 inet_sk(newsk
)->inet_num
= ntohs(inet_rsk(req
)->loc_port
);
690 inet_sk(newsk
)->inet_sport
= inet_rsk(req
)->loc_port
;
691 newsk
->sk_write_space
= sk_stream_write_space
;
693 newicsk
->icsk_retransmits
= 0;
694 newicsk
->icsk_backoff
= 0;
695 newicsk
->icsk_probes_out
= 0;
697 /* Deinitialize accept_queue to trap illegal accesses. */
698 memset(&newicsk
->icsk_accept_queue
, 0, sizeof(newicsk
->icsk_accept_queue
));
700 security_inet_csk_clone(newsk
, req
);
704 EXPORT_SYMBOL_GPL(inet_csk_clone_lock
);
707 * At this point, there should be no process reference to this
708 * socket, and thus no user references at all. Therefore we
709 * can assume the socket waitqueue is inactive and nobody will
710 * try to jump onto it.
712 void inet_csk_destroy_sock(struct sock
*sk
)
714 WARN_ON(sk
->sk_state
!= TCP_CLOSE
);
715 WARN_ON(!sock_flag(sk
, SOCK_DEAD
));
717 /* It cannot be in hash table! */
718 WARN_ON(!sk_unhashed(sk
));
720 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
721 WARN_ON(inet_sk(sk
)->inet_num
&& !inet_csk(sk
)->icsk_bind_hash
);
723 sk
->sk_prot
->destroy(sk
);
725 sk_stream_kill_queues(sk
);
727 xfrm_sk_free_policy(sk
);
729 sk_refcnt_debug_release(sk
);
731 percpu_counter_dec(sk
->sk_prot
->orphan_count
);
734 EXPORT_SYMBOL(inet_csk_destroy_sock
);
736 /* This function allows to force a closure of a socket after the call to
737 * tcp/dccp_create_openreq_child().
739 void inet_csk_prepare_forced_close(struct sock
*sk
)
741 /* sk_clone_lock locked the socket and set refcnt to 2 */
745 /* The below has to be done to allow calling inet_csk_destroy_sock */
746 sock_set_flag(sk
, SOCK_DEAD
);
747 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
748 inet_sk(sk
)->inet_num
= 0;
750 EXPORT_SYMBOL(inet_csk_prepare_forced_close
);
752 int inet_csk_listen_start(struct sock
*sk
, const int nr_table_entries
)
754 struct inet_sock
*inet
= inet_sk(sk
);
755 struct inet_connection_sock
*icsk
= inet_csk(sk
);
756 int rc
= reqsk_queue_alloc(&icsk
->icsk_accept_queue
, nr_table_entries
);
761 sk
->sk_max_ack_backlog
= 0;
762 sk
->sk_ack_backlog
= 0;
763 inet_csk_delack_init(sk
);
765 /* There is race window here: we announce ourselves listening,
766 * but this transition is still not validated by get_port().
767 * It is OK, because this socket enters to hash table only
768 * after validation is complete.
770 sk
->sk_state
= TCP_LISTEN
;
771 if (!sk
->sk_prot
->get_port(sk
, inet
->inet_num
)) {
772 inet
->inet_sport
= htons(inet
->inet_num
);
775 sk
->sk_prot
->hash(sk
);
780 sk
->sk_state
= TCP_CLOSE
;
781 __reqsk_queue_destroy(&icsk
->icsk_accept_queue
);
784 EXPORT_SYMBOL_GPL(inet_csk_listen_start
);
787 * This routine closes sockets which have been at least partially
788 * opened, but not yet accepted.
790 void inet_csk_listen_stop(struct sock
*sk
)
792 struct inet_connection_sock
*icsk
= inet_csk(sk
);
793 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
794 struct request_sock
*acc_req
;
795 struct request_sock
*req
;
797 inet_csk_delete_keepalive_timer(sk
);
799 /* make all the listen_opt local to us */
800 acc_req
= reqsk_queue_yank_acceptq(queue
);
802 /* Following specs, it would be better either to send FIN
803 * (and enter FIN-WAIT-1, it is normal close)
804 * or to send active reset (abort).
805 * Certainly, it is pretty dangerous while synflood, but it is
806 * bad justification for our negligence 8)
807 * To be honest, we are not able to make either
808 * of the variants now. --ANK
810 reqsk_queue_destroy(queue
);
812 while ((req
= acc_req
) != NULL
) {
813 struct sock
*child
= req
->sk
;
815 acc_req
= req
->dl_next
;
819 WARN_ON(sock_owned_by_user(child
));
822 sk
->sk_prot
->disconnect(child
, O_NONBLOCK
);
826 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
828 if (sk
->sk_protocol
== IPPROTO_TCP
&& tcp_rsk(req
)->listener
) {
829 BUG_ON(tcp_sk(child
)->fastopen_rsk
!= req
);
830 BUG_ON(sk
!= tcp_rsk(req
)->listener
);
832 /* Paranoid, to prevent race condition if
833 * an inbound pkt destined for child is
834 * blocked by sock lock in tcp_v4_rcv().
835 * Also to satisfy an assertion in
836 * tcp_v4_destroy_sock().
838 tcp_sk(child
)->fastopen_rsk
= NULL
;
841 inet_csk_destroy_sock(child
);
843 bh_unlock_sock(child
);
847 sk_acceptq_removed(sk
);
850 if (queue
->fastopenq
!= NULL
) {
851 /* Free all the reqs queued in rskq_rst_head. */
852 spin_lock_bh(&queue
->fastopenq
->lock
);
853 acc_req
= queue
->fastopenq
->rskq_rst_head
;
854 queue
->fastopenq
->rskq_rst_head
= NULL
;
855 spin_unlock_bh(&queue
->fastopenq
->lock
);
856 while ((req
= acc_req
) != NULL
) {
857 acc_req
= req
->dl_next
;
861 WARN_ON(sk
->sk_ack_backlog
);
863 EXPORT_SYMBOL_GPL(inet_csk_listen_stop
);
865 void inet_csk_addr2sockaddr(struct sock
*sk
, struct sockaddr
*uaddr
)
867 struct sockaddr_in
*sin
= (struct sockaddr_in
*)uaddr
;
868 const struct inet_sock
*inet
= inet_sk(sk
);
870 sin
->sin_family
= AF_INET
;
871 sin
->sin_addr
.s_addr
= inet
->inet_daddr
;
872 sin
->sin_port
= inet
->inet_dport
;
874 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr
);
877 int inet_csk_compat_getsockopt(struct sock
*sk
, int level
, int optname
,
878 char __user
*optval
, int __user
*optlen
)
880 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
882 if (icsk
->icsk_af_ops
->compat_getsockopt
!= NULL
)
883 return icsk
->icsk_af_ops
->compat_getsockopt(sk
, level
, optname
,
885 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
888 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt
);
890 int inet_csk_compat_setsockopt(struct sock
*sk
, int level
, int optname
,
891 char __user
*optval
, unsigned int optlen
)
893 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
895 if (icsk
->icsk_af_ops
->compat_setsockopt
!= NULL
)
896 return icsk
->icsk_af_ops
->compat_setsockopt(sk
, level
, optname
,
898 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
901 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt
);
904 static struct dst_entry
*inet_csk_rebuild_route(struct sock
*sk
, struct flowi
*fl
)
906 const struct inet_sock
*inet
= inet_sk(sk
);
907 const struct ip_options_rcu
*inet_opt
;
908 __be32 daddr
= inet
->inet_daddr
;
913 inet_opt
= rcu_dereference(inet
->inet_opt
);
914 if (inet_opt
&& inet_opt
->opt
.srr
)
915 daddr
= inet_opt
->opt
.faddr
;
917 rt
= ip_route_output_ports(sock_net(sk
), fl4
, sk
, daddr
,
918 inet
->inet_saddr
, inet
->inet_dport
,
919 inet
->inet_sport
, sk
->sk_protocol
,
920 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
);
924 sk_setup_caps(sk
, &rt
->dst
);
930 struct dst_entry
*inet_csk_update_pmtu(struct sock
*sk
, u32 mtu
)
932 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
933 struct inet_sock
*inet
= inet_sk(sk
);
936 dst
= inet_csk_rebuild_route(sk
, &inet
->cork
.fl
);
940 dst
->ops
->update_pmtu(dst
, sk
, NULL
, mtu
);
942 dst
= __sk_dst_check(sk
, 0);
944 dst
= inet_csk_rebuild_route(sk
, &inet
->cork
.fl
);
948 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu
);