2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Support for INET connection oriented protocols.
8 * Authors: See the TCP sources
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/jhash.h>
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
27 #include <net/sock_reuseport.h>
28 #include <net/addrconf.h>
31 const char inet_csk_timer_bug_msg
[] = "inet_csk BUG: unknown timer value\n";
32 EXPORT_SYMBOL(inet_csk_timer_bug_msg
);
35 #if IS_ENABLED(CONFIG_IPV6)
36 /* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
37 * only, and any IPv4 addresses if not IPv6 only
38 * match_wildcard == false: addresses must be exactly the same, i.e.
39 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
40 * and 0.0.0.0 equals to 0.0.0.0 only
42 static int ipv6_rcv_saddr_equal(const struct in6_addr
*sk1_rcv_saddr6
,
43 const struct in6_addr
*sk2_rcv_saddr6
,
44 __be32 sk1_rcv_saddr
, __be32 sk2_rcv_saddr
,
45 bool sk1_ipv6only
, bool sk2_ipv6only
,
48 int addr_type
= ipv6_addr_type(sk1_rcv_saddr6
);
49 int addr_type2
= sk2_rcv_saddr6
? ipv6_addr_type(sk2_rcv_saddr6
) : IPV6_ADDR_MAPPED
;
51 /* if both are mapped, treat as IPv4 */
52 if (addr_type
== IPV6_ADDR_MAPPED
&& addr_type2
== IPV6_ADDR_MAPPED
) {
54 if (sk1_rcv_saddr
== sk2_rcv_saddr
)
56 if (!sk1_rcv_saddr
|| !sk2_rcv_saddr
)
57 return match_wildcard
;
62 if (addr_type
== IPV6_ADDR_ANY
&& addr_type2
== IPV6_ADDR_ANY
)
65 if (addr_type2
== IPV6_ADDR_ANY
&& match_wildcard
&&
66 !(sk2_ipv6only
&& addr_type
== IPV6_ADDR_MAPPED
))
69 if (addr_type
== IPV6_ADDR_ANY
&& match_wildcard
&&
70 !(sk1_ipv6only
&& addr_type2
== IPV6_ADDR_MAPPED
))
74 ipv6_addr_equal(sk1_rcv_saddr6
, sk2_rcv_saddr6
))
81 /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
82 * match_wildcard == false: addresses must be exactly the same, i.e.
83 * 0.0.0.0 only equals to 0.0.0.0
85 static int ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr
, __be32 sk2_rcv_saddr
,
86 bool sk2_ipv6only
, bool match_wildcard
)
89 if (sk1_rcv_saddr
== sk2_rcv_saddr
)
91 if (!sk1_rcv_saddr
|| !sk2_rcv_saddr
)
92 return match_wildcard
;
97 int inet_rcv_saddr_equal(const struct sock
*sk
, const struct sock
*sk2
,
100 #if IS_ENABLED(CONFIG_IPV6)
101 if (sk
->sk_family
== AF_INET6
)
102 return ipv6_rcv_saddr_equal(&sk
->sk_v6_rcv_saddr
,
103 inet6_rcv_saddr(sk2
),
110 return ipv4_rcv_saddr_equal(sk
->sk_rcv_saddr
, sk2
->sk_rcv_saddr
,
111 ipv6_only_sock(sk2
), match_wildcard
);
113 EXPORT_SYMBOL(inet_rcv_saddr_equal
);
115 void inet_get_local_port_range(struct net
*net
, int *low
, int *high
)
120 seq
= read_seqbegin(&net
->ipv4
.ip_local_ports
.lock
);
122 *low
= net
->ipv4
.ip_local_ports
.range
[0];
123 *high
= net
->ipv4
.ip_local_ports
.range
[1];
124 } while (read_seqretry(&net
->ipv4
.ip_local_ports
.lock
, seq
));
126 EXPORT_SYMBOL(inet_get_local_port_range
);
128 static int inet_csk_bind_conflict(const struct sock
*sk
,
129 const struct inet_bind_bucket
*tb
,
130 bool relax
, bool reuseport_ok
)
133 bool reuse
= sk
->sk_reuse
;
134 bool reuseport
= !!sk
->sk_reuseport
&& reuseport_ok
;
135 kuid_t uid
= sock_i_uid((struct sock
*)sk
);
138 * Unlike other sk lookup places we do not check
139 * for sk_net here, since _all_ the socks listed
140 * in tb->owners list belong to the same net - the
141 * one this bucket belongs to.
144 sk_for_each_bound(sk2
, &tb
->owners
) {
146 (!sk
->sk_bound_dev_if
||
147 !sk2
->sk_bound_dev_if
||
148 sk
->sk_bound_dev_if
== sk2
->sk_bound_dev_if
)) {
149 if ((!reuse
|| !sk2
->sk_reuse
||
150 sk2
->sk_state
== TCP_LISTEN
) &&
151 (!reuseport
|| !sk2
->sk_reuseport
||
152 rcu_access_pointer(sk
->sk_reuseport_cb
) ||
153 (sk2
->sk_state
!= TCP_TIME_WAIT
&&
154 !uid_eq(uid
, sock_i_uid(sk2
))))) {
155 if (inet_rcv_saddr_equal(sk
, sk2
, true))
158 if (!relax
&& reuse
&& sk2
->sk_reuse
&&
159 sk2
->sk_state
!= TCP_LISTEN
) {
160 if (inet_rcv_saddr_equal(sk
, sk2
, true))
169 * Find an open port number for the socket. Returns with the
170 * inet_bind_hashbucket lock held.
172 static struct inet_bind_hashbucket
*
173 inet_csk_find_open_port(struct sock
*sk
, struct inet_bind_bucket
**tb_ret
, int *port_ret
)
175 struct inet_hashinfo
*hinfo
= sk
->sk_prot
->h
.hashinfo
;
177 struct inet_bind_hashbucket
*head
;
178 struct net
*net
= sock_net(sk
);
179 int i
, low
, high
, attempt_half
;
180 struct inet_bind_bucket
*tb
;
181 u32 remaining
, offset
;
183 attempt_half
= (sk
->sk_reuse
== SK_CAN_REUSE
) ? 1 : 0;
185 inet_get_local_port_range(net
, &low
, &high
);
186 high
++; /* [32768, 60999] -> [32768, 61000[ */
190 int half
= low
+ (((high
- low
) >> 2) << 1);
192 if (attempt_half
== 1)
197 remaining
= high
- low
;
198 if (likely(remaining
> 1))
201 offset
= prandom_u32() % remaining
;
202 /* __inet_hash_connect() favors ports having @low parity
203 * We do the opposite to not pollute connect() users.
209 for (i
= 0; i
< remaining
; i
+= 2, port
+= 2) {
210 if (unlikely(port
>= high
))
212 if (inet_is_local_reserved_port(net
, port
))
214 head
= &hinfo
->bhash
[inet_bhashfn(net
, port
,
216 spin_lock_bh(&head
->lock
);
217 inet_bind_bucket_for_each(tb
, &head
->chain
)
218 if (net_eq(ib_net(tb
), net
) && tb
->port
== port
) {
219 if (!inet_csk_bind_conflict(sk
, tb
, false, false))
226 spin_unlock_bh(&head
->lock
);
232 goto other_parity_scan
;
234 if (attempt_half
== 1) {
235 /* OK we now try the upper half of the range */
237 goto other_half_scan
;
246 static inline int sk_reuseport_match(struct inet_bind_bucket
*tb
,
249 kuid_t uid
= sock_i_uid(sk
);
251 if (tb
->fastreuseport
<= 0)
253 if (!sk
->sk_reuseport
)
255 if (rcu_access_pointer(sk
->sk_reuseport_cb
))
257 if (!uid_eq(tb
->fastuid
, uid
))
259 /* We only need to check the rcv_saddr if this tb was once marked
260 * without fastreuseport and then was reset, as we can only know that
261 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
264 if (tb
->fastreuseport
== FASTREUSEPORT_ANY
)
266 #if IS_ENABLED(CONFIG_IPV6)
267 if (tb
->fast_sk_family
== AF_INET6
)
268 return ipv6_rcv_saddr_equal(&tb
->fast_v6_rcv_saddr
,
269 &sk
->sk_v6_rcv_saddr
,
273 ipv6_only_sock(sk
), true);
275 return ipv4_rcv_saddr_equal(tb
->fast_rcv_saddr
, sk
->sk_rcv_saddr
,
276 ipv6_only_sock(sk
), true);
279 /* Obtain a reference to a local port for the given sock,
280 * if snum is zero it means select any available local port.
281 * We try to allocate an odd port (and leave even ports for connect())
283 int inet_csk_get_port(struct sock
*sk
, unsigned short snum
)
285 bool reuse
= sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
;
286 struct inet_hashinfo
*hinfo
= sk
->sk_prot
->h
.hashinfo
;
287 int ret
= 1, port
= snum
;
288 struct inet_bind_hashbucket
*head
;
289 struct net
*net
= sock_net(sk
);
290 struct inet_bind_bucket
*tb
= NULL
;
291 kuid_t uid
= sock_i_uid(sk
);
294 head
= inet_csk_find_open_port(sk
, &tb
, &port
);
301 head
= &hinfo
->bhash
[inet_bhashfn(net
, port
,
303 spin_lock_bh(&head
->lock
);
304 inet_bind_bucket_for_each(tb
, &head
->chain
)
305 if (net_eq(ib_net(tb
), net
) && tb
->port
== port
)
308 tb
= inet_bind_bucket_create(hinfo
->bind_bucket_cachep
,
313 if (!hlist_empty(&tb
->owners
)) {
314 if (sk
->sk_reuse
== SK_FORCE_REUSE
)
317 if ((tb
->fastreuse
> 0 && reuse
) ||
318 sk_reuseport_match(tb
, sk
))
320 if (inet_csk_bind_conflict(sk
, tb
, true, true))
324 if (!hlist_empty(&tb
->owners
)) {
325 tb
->fastreuse
= reuse
;
326 if (sk
->sk_reuseport
) {
327 tb
->fastreuseport
= FASTREUSEPORT_ANY
;
329 tb
->fast_rcv_saddr
= sk
->sk_rcv_saddr
;
330 tb
->fast_ipv6_only
= ipv6_only_sock(sk
);
331 #if IS_ENABLED(CONFIG_IPV6)
332 tb
->fast_v6_rcv_saddr
= sk
->sk_v6_rcv_saddr
;
335 tb
->fastreuseport
= 0;
340 if (sk
->sk_reuseport
) {
341 /* We didn't match or we don't have fastreuseport set on
342 * the tb, but we have sk_reuseport set on this socket
343 * and we know that there are no bind conflicts with
344 * this socket in this tb, so reset our tb's reuseport
345 * settings so that any subsequent sockets that match
346 * our current socket will be put on the fast path.
348 * If we reset we need to set FASTREUSEPORT_STRICT so we
349 * do extra checking for all subsequent sk_reuseport
352 if (!sk_reuseport_match(tb
, sk
)) {
353 tb
->fastreuseport
= FASTREUSEPORT_STRICT
;
355 tb
->fast_rcv_saddr
= sk
->sk_rcv_saddr
;
356 tb
->fast_ipv6_only
= ipv6_only_sock(sk
);
357 #if IS_ENABLED(CONFIG_IPV6)
358 tb
->fast_v6_rcv_saddr
= sk
->sk_v6_rcv_saddr
;
362 tb
->fastreuseport
= 0;
365 if (!inet_csk(sk
)->icsk_bind_hash
)
366 inet_bind_hash(sk
, tb
, port
);
367 WARN_ON(inet_csk(sk
)->icsk_bind_hash
!= tb
);
371 spin_unlock_bh(&head
->lock
);
374 EXPORT_SYMBOL_GPL(inet_csk_get_port
);
377 * Wait for an incoming connection, avoid race conditions. This must be called
378 * with the socket locked.
380 static int inet_csk_wait_for_connect(struct sock
*sk
, long timeo
)
382 struct inet_connection_sock
*icsk
= inet_csk(sk
);
387 * True wake-one mechanism for incoming connections: only
388 * one process gets woken up, not the 'whole herd'.
389 * Since we do not 'race & poll' for established sockets
390 * anymore, the common case will execute the loop only once.
392 * Subtle issue: "add_wait_queue_exclusive()" will be added
393 * after any current non-exclusive waiters, and we know that
394 * it will always _stay_ after any new non-exclusive waiters
395 * because all non-exclusive waiters are added at the
396 * beginning of the wait-queue. As such, it's ok to "drop"
397 * our exclusiveness temporarily when we get woken up without
398 * having to remove and re-insert us on the wait queue.
401 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
404 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
))
405 timeo
= schedule_timeout(timeo
);
406 sched_annotate_sleep();
409 if (!reqsk_queue_empty(&icsk
->icsk_accept_queue
))
412 if (sk
->sk_state
!= TCP_LISTEN
)
414 err
= sock_intr_errno(timeo
);
415 if (signal_pending(current
))
421 finish_wait(sk_sleep(sk
), &wait
);
426 * This will accept the next outstanding connection.
428 struct sock
*inet_csk_accept(struct sock
*sk
, int flags
, int *err
, bool kern
)
430 struct inet_connection_sock
*icsk
= inet_csk(sk
);
431 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
432 struct request_sock
*req
;
438 /* We need to make sure that this socket is listening,
439 * and that it has something pending.
442 if (sk
->sk_state
!= TCP_LISTEN
)
445 /* Find already established connection */
446 if (reqsk_queue_empty(queue
)) {
447 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
449 /* If this is a non blocking socket don't sleep */
454 error
= inet_csk_wait_for_connect(sk
, timeo
);
458 req
= reqsk_queue_remove(queue
, sk
);
461 if (sk
->sk_protocol
== IPPROTO_TCP
&&
462 tcp_rsk(req
)->tfo_listener
) {
463 spin_lock_bh(&queue
->fastopenq
.lock
);
464 if (tcp_rsk(req
)->tfo_listener
) {
465 /* We are still waiting for the final ACK from 3WHS
466 * so can't free req now. Instead, we set req->sk to
467 * NULL to signify that the child socket is taken
468 * so reqsk_fastopen_remove() will free the req
469 * when 3WHS finishes (or is aborted).
474 spin_unlock_bh(&queue
->fastopenq
.lock
);
487 EXPORT_SYMBOL(inet_csk_accept
);
490 * Using different timers for retransmit, delayed acks and probes
491 * We may wish use just one timer maintaining a list of expire jiffies
494 void inet_csk_init_xmit_timers(struct sock
*sk
,
495 void (*retransmit_handler
)(unsigned long),
496 void (*delack_handler
)(unsigned long),
497 void (*keepalive_handler
)(unsigned long))
499 struct inet_connection_sock
*icsk
= inet_csk(sk
);
501 setup_timer(&icsk
->icsk_retransmit_timer
, retransmit_handler
,
503 setup_timer(&icsk
->icsk_delack_timer
, delack_handler
,
505 setup_timer(&sk
->sk_timer
, keepalive_handler
, (unsigned long)sk
);
506 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= 0;
508 EXPORT_SYMBOL(inet_csk_init_xmit_timers
);
510 void inet_csk_clear_xmit_timers(struct sock
*sk
)
512 struct inet_connection_sock
*icsk
= inet_csk(sk
);
514 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= icsk
->icsk_ack
.blocked
= 0;
516 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
517 sk_stop_timer(sk
, &icsk
->icsk_delack_timer
);
518 sk_stop_timer(sk
, &sk
->sk_timer
);
520 EXPORT_SYMBOL(inet_csk_clear_xmit_timers
);
522 void inet_csk_delete_keepalive_timer(struct sock
*sk
)
524 sk_stop_timer(sk
, &sk
->sk_timer
);
526 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer
);
528 void inet_csk_reset_keepalive_timer(struct sock
*sk
, unsigned long len
)
530 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ len
);
532 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer
);
534 struct dst_entry
*inet_csk_route_req(const struct sock
*sk
,
536 const struct request_sock
*req
)
538 const struct inet_request_sock
*ireq
= inet_rsk(req
);
539 struct net
*net
= read_pnet(&ireq
->ireq_net
);
540 struct ip_options_rcu
*opt
= ireq
->opt
;
543 flowi4_init_output(fl4
, ireq
->ir_iif
, ireq
->ir_mark
,
544 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
545 sk
->sk_protocol
, inet_sk_flowi_flags(sk
),
546 (opt
&& opt
->opt
.srr
) ? opt
->opt
.faddr
: ireq
->ir_rmt_addr
,
547 ireq
->ir_loc_addr
, ireq
->ir_rmt_port
,
548 htons(ireq
->ir_num
), sk
->sk_uid
);
549 security_req_classify_flow(req
, flowi4_to_flowi(fl4
));
550 rt
= ip_route_output_flow(net
, fl4
, sk
);
553 if (opt
&& opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
560 __IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
563 EXPORT_SYMBOL_GPL(inet_csk_route_req
);
565 struct dst_entry
*inet_csk_route_child_sock(const struct sock
*sk
,
567 const struct request_sock
*req
)
569 const struct inet_request_sock
*ireq
= inet_rsk(req
);
570 struct net
*net
= read_pnet(&ireq
->ireq_net
);
571 struct inet_sock
*newinet
= inet_sk(newsk
);
572 struct ip_options_rcu
*opt
;
576 fl4
= &newinet
->cork
.fl
.u
.ip4
;
579 opt
= rcu_dereference(newinet
->inet_opt
);
580 flowi4_init_output(fl4
, ireq
->ir_iif
, ireq
->ir_mark
,
581 RT_CONN_FLAGS(sk
), RT_SCOPE_UNIVERSE
,
582 sk
->sk_protocol
, inet_sk_flowi_flags(sk
),
583 (opt
&& opt
->opt
.srr
) ? opt
->opt
.faddr
: ireq
->ir_rmt_addr
,
584 ireq
->ir_loc_addr
, ireq
->ir_rmt_port
,
585 htons(ireq
->ir_num
), sk
->sk_uid
);
586 security_req_classify_flow(req
, flowi4_to_flowi(fl4
));
587 rt
= ip_route_output_flow(net
, fl4
, sk
);
590 if (opt
&& opt
->opt
.is_strictroute
&& rt
->rt_uses_gateway
)
599 __IP_INC_STATS(net
, IPSTATS_MIB_OUTNOROUTES
);
602 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock
);
604 #if IS_ENABLED(CONFIG_IPV6)
605 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
607 #define AF_INET_FAMILY(fam) true
610 /* Decide when to expire the request and when to resend SYN-ACK */
611 static inline void syn_ack_recalc(struct request_sock
*req
, const int thresh
,
612 const int max_retries
,
613 const u8 rskq_defer_accept
,
614 int *expire
, int *resend
)
616 if (!rskq_defer_accept
) {
617 *expire
= req
->num_timeout
>= thresh
;
621 *expire
= req
->num_timeout
>= thresh
&&
622 (!inet_rsk(req
)->acked
|| req
->num_timeout
>= max_retries
);
624 * Do not resend while waiting for data after ACK,
625 * start to resend on end of deferring period to give
626 * last chance for data or ACK to create established socket.
628 *resend
= !inet_rsk(req
)->acked
||
629 req
->num_timeout
>= rskq_defer_accept
- 1;
632 int inet_rtx_syn_ack(const struct sock
*parent
, struct request_sock
*req
)
634 int err
= req
->rsk_ops
->rtx_syn_ack(parent
, req
);
640 EXPORT_SYMBOL(inet_rtx_syn_ack
);
642 /* return true if req was found in the ehash table */
643 static bool reqsk_queue_unlink(struct request_sock_queue
*queue
,
644 struct request_sock
*req
)
646 struct inet_hashinfo
*hashinfo
= req_to_sk(req
)->sk_prot
->h
.hashinfo
;
649 if (sk_hashed(req_to_sk(req
))) {
650 spinlock_t
*lock
= inet_ehash_lockp(hashinfo
, req
->rsk_hash
);
653 found
= __sk_nulls_del_node_init_rcu(req_to_sk(req
));
656 if (timer_pending(&req
->rsk_timer
) && del_timer_sync(&req
->rsk_timer
))
661 void inet_csk_reqsk_queue_drop(struct sock
*sk
, struct request_sock
*req
)
663 if (reqsk_queue_unlink(&inet_csk(sk
)->icsk_accept_queue
, req
)) {
664 reqsk_queue_removed(&inet_csk(sk
)->icsk_accept_queue
, req
);
668 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop
);
670 void inet_csk_reqsk_queue_drop_and_put(struct sock
*sk
, struct request_sock
*req
)
672 inet_csk_reqsk_queue_drop(sk
, req
);
675 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put
);
677 static void reqsk_timer_handler(unsigned long data
)
679 struct request_sock
*req
= (struct request_sock
*)data
;
680 struct sock
*sk_listener
= req
->rsk_listener
;
681 struct net
*net
= sock_net(sk_listener
);
682 struct inet_connection_sock
*icsk
= inet_csk(sk_listener
);
683 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
684 int qlen
, expire
= 0, resend
= 0;
685 int max_retries
, thresh
;
688 if (sk_state_load(sk_listener
) != TCP_LISTEN
)
691 max_retries
= icsk
->icsk_syn_retries
? : net
->ipv4
.sysctl_tcp_synack_retries
;
692 thresh
= max_retries
;
693 /* Normally all the openreqs are young and become mature
694 * (i.e. converted to established socket) for first timeout.
695 * If synack was not acknowledged for 1 second, it means
696 * one of the following things: synack was lost, ack was lost,
697 * rtt is high or nobody planned to ack (i.e. synflood).
698 * When server is a bit loaded, queue is populated with old
699 * open requests, reducing effective size of queue.
700 * When server is well loaded, queue size reduces to zero
701 * after several minutes of work. It is not synflood,
702 * it is normal operation. The solution is pruning
703 * too old entries overriding normal timeout, when
704 * situation becomes dangerous.
706 * Essentially, we reserve half of room for young
707 * embrions; and abort old ones without pity, if old
708 * ones are about to clog our table.
710 qlen
= reqsk_queue_len(queue
);
711 if ((qlen
<< 1) > max(8U, sk_listener
->sk_max_ack_backlog
)) {
712 int young
= reqsk_queue_len_young(queue
) << 1;
721 defer_accept
= READ_ONCE(queue
->rskq_defer_accept
);
723 max_retries
= defer_accept
;
724 syn_ack_recalc(req
, thresh
, max_retries
, defer_accept
,
726 req
->rsk_ops
->syn_ack_timeout(req
);
729 !inet_rtx_syn_ack(sk_listener
, req
) ||
730 inet_rsk(req
)->acked
)) {
733 if (req
->num_timeout
++ == 0)
734 atomic_dec(&queue
->young
);
735 timeo
= min(TCP_TIMEOUT_INIT
<< req
->num_timeout
, TCP_RTO_MAX
);
736 mod_timer(&req
->rsk_timer
, jiffies
+ timeo
);
740 inet_csk_reqsk_queue_drop_and_put(sk_listener
, req
);
743 static void reqsk_queue_hash_req(struct request_sock
*req
,
744 unsigned long timeout
)
746 req
->num_retrans
= 0;
747 req
->num_timeout
= 0;
750 setup_pinned_timer(&req
->rsk_timer
, reqsk_timer_handler
,
752 mod_timer(&req
->rsk_timer
, jiffies
+ timeout
);
754 inet_ehash_insert(req_to_sk(req
), NULL
);
755 /* before letting lookups find us, make sure all req fields
756 * are committed to memory and refcnt initialized.
759 refcount_set(&req
->rsk_refcnt
, 2 + 1);
762 void inet_csk_reqsk_queue_hash_add(struct sock
*sk
, struct request_sock
*req
,
763 unsigned long timeout
)
765 reqsk_queue_hash_req(req
, timeout
);
766 inet_csk_reqsk_queue_added(sk
);
768 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add
);
771 * inet_csk_clone_lock - clone an inet socket, and lock its clone
772 * @sk: the socket to clone
774 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
776 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
778 struct sock
*inet_csk_clone_lock(const struct sock
*sk
,
779 const struct request_sock
*req
,
780 const gfp_t priority
)
782 struct sock
*newsk
= sk_clone_lock(sk
, priority
);
785 struct inet_connection_sock
*newicsk
= inet_csk(newsk
);
787 newsk
->sk_state
= TCP_SYN_RECV
;
788 newicsk
->icsk_bind_hash
= NULL
;
790 inet_sk(newsk
)->inet_dport
= inet_rsk(req
)->ir_rmt_port
;
791 inet_sk(newsk
)->inet_num
= inet_rsk(req
)->ir_num
;
792 inet_sk(newsk
)->inet_sport
= htons(inet_rsk(req
)->ir_num
);
794 /* listeners have SOCK_RCU_FREE, not the children */
795 sock_reset_flag(newsk
, SOCK_RCU_FREE
);
797 inet_sk(newsk
)->mc_list
= NULL
;
799 newsk
->sk_mark
= inet_rsk(req
)->ir_mark
;
800 atomic64_set(&newsk
->sk_cookie
,
801 atomic64_read(&inet_rsk(req
)->ir_cookie
));
803 newicsk
->icsk_retransmits
= 0;
804 newicsk
->icsk_backoff
= 0;
805 newicsk
->icsk_probes_out
= 0;
807 /* Deinitialize accept_queue to trap illegal accesses. */
808 memset(&newicsk
->icsk_accept_queue
, 0, sizeof(newicsk
->icsk_accept_queue
));
810 security_inet_csk_clone(newsk
, req
);
814 EXPORT_SYMBOL_GPL(inet_csk_clone_lock
);
817 * At this point, there should be no process reference to this
818 * socket, and thus no user references at all. Therefore we
819 * can assume the socket waitqueue is inactive and nobody will
820 * try to jump onto it.
822 void inet_csk_destroy_sock(struct sock
*sk
)
824 WARN_ON(sk
->sk_state
!= TCP_CLOSE
);
825 WARN_ON(!sock_flag(sk
, SOCK_DEAD
));
827 /* It cannot be in hash table! */
828 WARN_ON(!sk_unhashed(sk
));
830 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
831 WARN_ON(inet_sk(sk
)->inet_num
&& !inet_csk(sk
)->icsk_bind_hash
);
833 sk
->sk_prot
->destroy(sk
);
835 sk_stream_kill_queues(sk
);
837 xfrm_sk_free_policy(sk
);
839 sk_refcnt_debug_release(sk
);
841 percpu_counter_dec(sk
->sk_prot
->orphan_count
);
845 EXPORT_SYMBOL(inet_csk_destroy_sock
);
847 /* This function allows to force a closure of a socket after the call to
848 * tcp/dccp_create_openreq_child().
850 void inet_csk_prepare_forced_close(struct sock
*sk
)
851 __releases(&sk
->sk_lock
.slock
)
853 /* sk_clone_lock locked the socket and set refcnt to 2 */
857 /* The below has to be done to allow calling inet_csk_destroy_sock */
858 sock_set_flag(sk
, SOCK_DEAD
);
859 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
860 inet_sk(sk
)->inet_num
= 0;
862 EXPORT_SYMBOL(inet_csk_prepare_forced_close
);
864 int inet_csk_listen_start(struct sock
*sk
, int backlog
)
866 struct inet_connection_sock
*icsk
= inet_csk(sk
);
867 struct inet_sock
*inet
= inet_sk(sk
);
868 int err
= -EADDRINUSE
;
870 reqsk_queue_alloc(&icsk
->icsk_accept_queue
);
872 sk
->sk_max_ack_backlog
= backlog
;
873 sk
->sk_ack_backlog
= 0;
874 inet_csk_delack_init(sk
);
876 /* There is race window here: we announce ourselves listening,
877 * but this transition is still not validated by get_port().
878 * It is OK, because this socket enters to hash table only
879 * after validation is complete.
881 sk_state_store(sk
, TCP_LISTEN
);
882 if (!sk
->sk_prot
->get_port(sk
, inet
->inet_num
)) {
883 inet
->inet_sport
= htons(inet
->inet_num
);
886 err
= sk
->sk_prot
->hash(sk
);
892 sk
->sk_state
= TCP_CLOSE
;
895 EXPORT_SYMBOL_GPL(inet_csk_listen_start
);
897 static void inet_child_forget(struct sock
*sk
, struct request_sock
*req
,
900 sk
->sk_prot
->disconnect(child
, O_NONBLOCK
);
904 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
906 if (sk
->sk_protocol
== IPPROTO_TCP
&& tcp_rsk(req
)->tfo_listener
) {
907 BUG_ON(tcp_sk(child
)->fastopen_rsk
!= req
);
908 BUG_ON(sk
!= req
->rsk_listener
);
910 /* Paranoid, to prevent race condition if
911 * an inbound pkt destined for child is
912 * blocked by sock lock in tcp_v4_rcv().
913 * Also to satisfy an assertion in
914 * tcp_v4_destroy_sock().
916 tcp_sk(child
)->fastopen_rsk
= NULL
;
918 inet_csk_destroy_sock(child
);
922 struct sock
*inet_csk_reqsk_queue_add(struct sock
*sk
,
923 struct request_sock
*req
,
926 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
928 spin_lock(&queue
->rskq_lock
);
929 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
930 inet_child_forget(sk
, req
, child
);
935 if (queue
->rskq_accept_head
== NULL
)
936 queue
->rskq_accept_head
= req
;
938 queue
->rskq_accept_tail
->dl_next
= req
;
939 queue
->rskq_accept_tail
= req
;
940 sk_acceptq_added(sk
);
942 spin_unlock(&queue
->rskq_lock
);
945 EXPORT_SYMBOL(inet_csk_reqsk_queue_add
);
947 struct sock
*inet_csk_complete_hashdance(struct sock
*sk
, struct sock
*child
,
948 struct request_sock
*req
, bool own_req
)
951 inet_csk_reqsk_queue_drop(sk
, req
);
952 reqsk_queue_removed(&inet_csk(sk
)->icsk_accept_queue
, req
);
953 if (inet_csk_reqsk_queue_add(sk
, req
, child
))
956 /* Too bad, another child took ownership of the request, undo. */
957 bh_unlock_sock(child
);
961 EXPORT_SYMBOL(inet_csk_complete_hashdance
);
964 * This routine closes sockets which have been at least partially
965 * opened, but not yet accepted.
967 void inet_csk_listen_stop(struct sock
*sk
)
969 struct inet_connection_sock
*icsk
= inet_csk(sk
);
970 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
971 struct request_sock
*next
, *req
;
973 /* Following specs, it would be better either to send FIN
974 * (and enter FIN-WAIT-1, it is normal close)
975 * or to send active reset (abort).
976 * Certainly, it is pretty dangerous while synflood, but it is
977 * bad justification for our negligence 8)
978 * To be honest, we are not able to make either
979 * of the variants now. --ANK
981 while ((req
= reqsk_queue_remove(queue
, sk
)) != NULL
) {
982 struct sock
*child
= req
->sk
;
986 WARN_ON(sock_owned_by_user(child
));
989 inet_child_forget(sk
, req
, child
);
990 bh_unlock_sock(child
);
996 if (queue
->fastopenq
.rskq_rst_head
) {
997 /* Free all the reqs queued in rskq_rst_head. */
998 spin_lock_bh(&queue
->fastopenq
.lock
);
999 req
= queue
->fastopenq
.rskq_rst_head
;
1000 queue
->fastopenq
.rskq_rst_head
= NULL
;
1001 spin_unlock_bh(&queue
->fastopenq
.lock
);
1002 while (req
!= NULL
) {
1003 next
= req
->dl_next
;
1008 WARN_ON_ONCE(sk
->sk_ack_backlog
);
1010 EXPORT_SYMBOL_GPL(inet_csk_listen_stop
);
1012 void inet_csk_addr2sockaddr(struct sock
*sk
, struct sockaddr
*uaddr
)
1014 struct sockaddr_in
*sin
= (struct sockaddr_in
*)uaddr
;
1015 const struct inet_sock
*inet
= inet_sk(sk
);
1017 sin
->sin_family
= AF_INET
;
1018 sin
->sin_addr
.s_addr
= inet
->inet_daddr
;
1019 sin
->sin_port
= inet
->inet_dport
;
1021 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr
);
1023 #ifdef CONFIG_COMPAT
1024 int inet_csk_compat_getsockopt(struct sock
*sk
, int level
, int optname
,
1025 char __user
*optval
, int __user
*optlen
)
1027 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1029 if (icsk
->icsk_af_ops
->compat_getsockopt
)
1030 return icsk
->icsk_af_ops
->compat_getsockopt(sk
, level
, optname
,
1032 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
1035 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt
);
1037 int inet_csk_compat_setsockopt(struct sock
*sk
, int level
, int optname
,
1038 char __user
*optval
, unsigned int optlen
)
1040 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
1042 if (icsk
->icsk_af_ops
->compat_setsockopt
)
1043 return icsk
->icsk_af_ops
->compat_setsockopt(sk
, level
, optname
,
1045 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
1048 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt
);
1051 static struct dst_entry
*inet_csk_rebuild_route(struct sock
*sk
, struct flowi
*fl
)
1053 const struct inet_sock
*inet
= inet_sk(sk
);
1054 const struct ip_options_rcu
*inet_opt
;
1055 __be32 daddr
= inet
->inet_daddr
;
1060 inet_opt
= rcu_dereference(inet
->inet_opt
);
1061 if (inet_opt
&& inet_opt
->opt
.srr
)
1062 daddr
= inet_opt
->opt
.faddr
;
1064 rt
= ip_route_output_ports(sock_net(sk
), fl4
, sk
, daddr
,
1065 inet
->inet_saddr
, inet
->inet_dport
,
1066 inet
->inet_sport
, sk
->sk_protocol
,
1067 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
);
1071 sk_setup_caps(sk
, &rt
->dst
);
1077 struct dst_entry
*inet_csk_update_pmtu(struct sock
*sk
, u32 mtu
)
1079 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
1080 struct inet_sock
*inet
= inet_sk(sk
);
1083 dst
= inet_csk_rebuild_route(sk
, &inet
->cork
.fl
);
1087 dst
->ops
->update_pmtu(dst
, sk
, NULL
, mtu
);
1089 dst
= __sk_dst_check(sk
, 0);
1091 dst
= inet_csk_rebuild_route(sk
, &inet
->cork
.fl
);
1095 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu
);