1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/algapi.h>
13 #include <crypto/sha2.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/ip6_route.h>
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
24 #include <uapi/linux/mptcp.h>
28 #include <trace/events/mptcp.h>
30 static void mptcp_subflow_ops_undo_override(struct sock
*ssk
);
32 static void SUBFLOW_REQ_INC_STATS(struct request_sock
*req
,
33 enum linux_mptcp_mib_field field
)
35 MPTCP_INC_STATS(sock_net(req_to_sk(req
)), field
);
38 static void subflow_req_destructor(struct request_sock
*req
)
40 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
42 pr_debug("subflow_req=%p", subflow_req
);
45 sock_put((struct sock
*)subflow_req
->msk
);
47 mptcp_token_destroy_request(req
);
48 tcp_request_sock_ops
.destructor(req
);
51 static void subflow_generate_hmac(u64 key1
, u64 key2
, u32 nonce1
, u32 nonce2
,
56 put_unaligned_be32(nonce1
, &msg
[0]);
57 put_unaligned_be32(nonce2
, &msg
[4]);
59 mptcp_crypto_hmac_sha(key1
, key2
, msg
, 8, hmac
);
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock
*msk
)
64 return mptcp_is_fully_established((void *)msk
) &&
65 READ_ONCE(msk
->pm
.accept_subflow
);
68 /* validate received token and create truncated hmac and nonce for SYN-ACK */
69 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock
*subflow_req
)
71 struct mptcp_sock
*msk
= subflow_req
->msk
;
72 u8 hmac
[SHA256_DIGEST_SIZE
];
74 get_random_bytes(&subflow_req
->local_nonce
, sizeof(u32
));
76 subflow_generate_hmac(msk
->local_key
, msk
->remote_key
,
77 subflow_req
->local_nonce
,
78 subflow_req
->remote_nonce
, hmac
);
80 subflow_req
->thmac
= get_unaligned_be64(hmac
);
83 static struct mptcp_sock
*subflow_token_join_request(struct request_sock
*req
)
85 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
86 struct mptcp_sock
*msk
;
89 msk
= mptcp_token_get_sock(sock_net(req_to_sk(req
)), subflow_req
->token
);
91 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_JOINNOTOKEN
);
95 local_id
= mptcp_pm_get_local_id(msk
, (struct sock_common
*)req
);
97 sock_put((struct sock
*)msk
);
100 subflow_req
->local_id
= local_id
;
105 static void subflow_init_req(struct request_sock
*req
, const struct sock
*sk_listener
)
107 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
109 subflow_req
->mp_capable
= 0;
110 subflow_req
->mp_join
= 0;
111 subflow_req
->csum_reqd
= mptcp_is_checksum_enabled(sock_net(sk_listener
));
112 subflow_req
->allow_join_id0
= mptcp_allow_join_id0(sock_net(sk_listener
));
113 subflow_req
->msk
= NULL
;
114 mptcp_token_init_request(req
);
117 static bool subflow_use_different_sport(struct mptcp_sock
*msk
, const struct sock
*sk
)
119 return inet_sk(sk
)->inet_sport
!= inet_sk((struct sock
*)msk
)->inet_sport
;
122 static void subflow_add_reset_reason(struct sk_buff
*skb
, u8 reason
)
124 struct mptcp_ext
*mpext
= skb_ext_add(skb
, SKB_EXT_MPTCP
);
127 memset(mpext
, 0, sizeof(*mpext
));
128 mpext
->reset_reason
= reason
;
132 /* Init mptcp request socket.
134 * Returns an error code if a JOIN has failed and a TCP reset
137 static int subflow_check_req(struct request_sock
*req
,
138 const struct sock
*sk_listener
,
141 struct mptcp_subflow_context
*listener
= mptcp_subflow_ctx(sk_listener
);
142 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
143 struct mptcp_options_received mp_opt
;
144 bool opt_mp_capable
, opt_mp_join
;
146 pr_debug("subflow_req=%p, listener=%p", subflow_req
, listener
);
148 #ifdef CONFIG_TCP_MD5SIG
149 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
152 if (rcu_access_pointer(tcp_sk(sk_listener
)->md5sig_info
))
156 mptcp_get_options(sk_listener
, skb
, &mp_opt
);
158 opt_mp_capable
= !!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPC
);
159 opt_mp_join
= !!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPJ
);
160 if (opt_mp_capable
) {
161 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_MPCAPABLEPASSIVE
);
165 } else if (opt_mp_join
) {
166 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_JOINSYNRX
);
169 if (opt_mp_capable
&& listener
->request_mptcp
) {
170 int err
, retries
= MPTCP_TOKEN_MAX_RETRIES
;
172 subflow_req
->ssn_offset
= TCP_SKB_CB(skb
)->seq
;
175 get_random_bytes(&subflow_req
->local_key
, sizeof(subflow_req
->local_key
));
176 } while (subflow_req
->local_key
== 0);
178 if (unlikely(req
->syncookie
)) {
179 mptcp_crypto_key_sha(subflow_req
->local_key
,
182 if (mptcp_token_exists(subflow_req
->token
)) {
185 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_TOKENFALLBACKINIT
);
187 subflow_req
->mp_capable
= 1;
192 err
= mptcp_token_new_request(req
);
194 subflow_req
->mp_capable
= 1;
195 else if (retries
-- > 0)
198 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_TOKENFALLBACKINIT
);
200 } else if (opt_mp_join
&& listener
->request_mptcp
) {
201 subflow_req
->ssn_offset
= TCP_SKB_CB(skb
)->seq
;
202 subflow_req
->mp_join
= 1;
203 subflow_req
->backup
= mp_opt
.backup
;
204 subflow_req
->remote_id
= mp_opt
.join_id
;
205 subflow_req
->token
= mp_opt
.token
;
206 subflow_req
->remote_nonce
= mp_opt
.nonce
;
207 subflow_req
->msk
= subflow_token_join_request(req
);
209 /* Can't fall back to TCP in this case. */
210 if (!subflow_req
->msk
) {
211 subflow_add_reset_reason(skb
, MPTCP_RST_EMPTCP
);
215 if (subflow_use_different_sport(subflow_req
->msk
, sk_listener
)) {
216 pr_debug("syn inet_sport=%d %d",
217 ntohs(inet_sk(sk_listener
)->inet_sport
),
218 ntohs(inet_sk((struct sock
*)subflow_req
->msk
)->inet_sport
));
219 if (!mptcp_pm_sport_in_anno_list(subflow_req
->msk
, sk_listener
)) {
220 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_MISMATCHPORTSYNRX
);
223 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_JOINPORTSYNRX
);
226 subflow_req_create_thmac(subflow_req
);
228 if (unlikely(req
->syncookie
)) {
229 if (mptcp_can_accept_new_subflow(subflow_req
->msk
))
230 subflow_init_req_cookie_join_save(subflow_req
, skb
);
235 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req
->token
,
236 subflow_req
->remote_nonce
, subflow_req
->msk
);
242 int mptcp_subflow_init_cookie_req(struct request_sock
*req
,
243 const struct sock
*sk_listener
,
246 struct mptcp_subflow_context
*listener
= mptcp_subflow_ctx(sk_listener
);
247 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
248 struct mptcp_options_received mp_opt
;
249 bool opt_mp_capable
, opt_mp_join
;
252 subflow_init_req(req
, sk_listener
);
253 mptcp_get_options(sk_listener
, skb
, &mp_opt
);
255 opt_mp_capable
= !!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPC
);
256 opt_mp_join
= !!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPJ
);
257 if (opt_mp_capable
&& opt_mp_join
)
260 if (opt_mp_capable
&& listener
->request_mptcp
) {
261 if (mp_opt
.sndr_key
== 0)
264 subflow_req
->local_key
= mp_opt
.rcvr_key
;
265 err
= mptcp_token_new_request(req
);
269 subflow_req
->mp_capable
= 1;
270 subflow_req
->ssn_offset
= TCP_SKB_CB(skb
)->seq
- 1;
271 } else if (opt_mp_join
&& listener
->request_mptcp
) {
272 if (!mptcp_token_join_cookie_init_state(subflow_req
, skb
))
275 subflow_req
->mp_join
= 1;
276 subflow_req
->ssn_offset
= TCP_SKB_CB(skb
)->seq
- 1;
281 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req
);
283 static struct dst_entry
*subflow_v4_route_req(const struct sock
*sk
,
286 struct request_sock
*req
)
288 struct dst_entry
*dst
;
291 tcp_rsk(req
)->is_mptcp
= 1;
292 subflow_init_req(req
, sk
);
294 dst
= tcp_request_sock_ipv4_ops
.route_req(sk
, skb
, fl
, req
);
298 err
= subflow_check_req(req
, sk
, skb
);
304 tcp_request_sock_ops
.send_reset(sk
, skb
);
308 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
309 static struct dst_entry
*subflow_v6_route_req(const struct sock
*sk
,
312 struct request_sock
*req
)
314 struct dst_entry
*dst
;
317 tcp_rsk(req
)->is_mptcp
= 1;
318 subflow_init_req(req
, sk
);
320 dst
= tcp_request_sock_ipv6_ops
.route_req(sk
, skb
, fl
, req
);
324 err
= subflow_check_req(req
, sk
, skb
);
330 tcp6_request_sock_ops
.send_reset(sk
, skb
);
335 /* validate received truncated hmac and create hmac for third ACK */
336 static bool subflow_thmac_valid(struct mptcp_subflow_context
*subflow
)
338 u8 hmac
[SHA256_DIGEST_SIZE
];
341 subflow_generate_hmac(subflow
->remote_key
, subflow
->local_key
,
342 subflow
->remote_nonce
, subflow
->local_nonce
,
345 thmac
= get_unaligned_be64(hmac
);
346 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
347 subflow
, subflow
->token
,
348 (unsigned long long)thmac
,
349 (unsigned long long)subflow
->thmac
);
351 return thmac
== subflow
->thmac
;
354 void mptcp_subflow_reset(struct sock
*ssk
)
356 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
357 struct sock
*sk
= subflow
->conn
;
359 /* must hold: tcp_done() could drop last reference on parent */
362 tcp_set_state(ssk
, TCP_CLOSE
);
363 tcp_send_active_reset(ssk
, GFP_ATOMIC
);
365 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &mptcp_sk(sk
)->flags
) &&
366 schedule_work(&mptcp_sk(sk
)->work
))
367 return; /* worker will put sk for us */
372 static bool subflow_use_different_dport(struct mptcp_sock
*msk
, const struct sock
*sk
)
374 return inet_sk(sk
)->inet_dport
!= inet_sk((struct sock
*)msk
)->inet_dport
;
377 void __mptcp_set_connected(struct sock
*sk
)
379 if (sk
->sk_state
== TCP_SYN_SENT
) {
380 inet_sk_state_store(sk
, TCP_ESTABLISHED
);
381 sk
->sk_state_change(sk
);
385 static void mptcp_set_connected(struct sock
*sk
)
388 if (!sock_owned_by_user(sk
))
389 __mptcp_set_connected(sk
);
391 set_bit(MPTCP_CONNECTED
, &mptcp_sk(sk
)->flags
);
392 mptcp_data_unlock(sk
);
395 static void subflow_finish_connect(struct sock
*sk
, const struct sk_buff
*skb
)
397 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
398 struct mptcp_options_received mp_opt
;
399 struct sock
*parent
= subflow
->conn
;
401 subflow
->icsk_af_ops
->sk_rx_dst_set(sk
, skb
);
403 /* be sure no special action on any packet other than syn-ack */
404 if (subflow
->conn_finished
)
407 mptcp_propagate_sndbuf(parent
, sk
);
408 subflow
->rel_write_seq
= 1;
409 subflow
->conn_finished
= 1;
410 subflow
->ssn_offset
= TCP_SKB_CB(skb
)->seq
;
411 pr_debug("subflow=%p synack seq=%x", subflow
, subflow
->ssn_offset
);
413 mptcp_get_options(sk
, skb
, &mp_opt
);
414 if (subflow
->request_mptcp
) {
415 if (!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPC
)) {
416 MPTCP_INC_STATS(sock_net(sk
),
417 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK
);
418 mptcp_do_fallback(sk
);
419 pr_fallback(mptcp_sk(subflow
->conn
));
423 if (mp_opt
.suboptions
& OPTION_MPTCP_CSUMREQD
)
424 WRITE_ONCE(mptcp_sk(parent
)->csum_enabled
, true);
425 if (mp_opt
.deny_join_id0
)
426 WRITE_ONCE(mptcp_sk(parent
)->pm
.remote_deny_join_id0
, true);
427 subflow
->mp_capable
= 1;
428 subflow
->can_ack
= 1;
429 subflow
->remote_key
= mp_opt
.sndr_key
;
430 pr_debug("subflow=%p, remote_key=%llu", subflow
,
431 subflow
->remote_key
);
432 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_MPCAPABLEACTIVEACK
);
433 mptcp_finish_connect(sk
);
434 mptcp_set_connected(parent
);
435 } else if (subflow
->request_join
) {
436 u8 hmac
[SHA256_DIGEST_SIZE
];
438 if (!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPJ
)) {
439 subflow
->reset_reason
= MPTCP_RST_EMPTCP
;
443 subflow
->backup
= mp_opt
.backup
;
444 subflow
->thmac
= mp_opt
.thmac
;
445 subflow
->remote_nonce
= mp_opt
.nonce
;
446 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
447 subflow
, subflow
->thmac
, subflow
->remote_nonce
,
450 if (!subflow_thmac_valid(subflow
)) {
451 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_JOINACKMAC
);
452 subflow
->reset_reason
= MPTCP_RST_EMPTCP
;
456 if (!mptcp_finish_join(sk
))
459 subflow_generate_hmac(subflow
->local_key
, subflow
->remote_key
,
460 subflow
->local_nonce
,
461 subflow
->remote_nonce
,
463 memcpy(subflow
->hmac
, hmac
, MPTCPOPT_HMAC_LEN
);
465 subflow
->mp_join
= 1;
466 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_JOINSYNACKRX
);
468 if (subflow_use_different_dport(mptcp_sk(parent
), sk
)) {
469 pr_debug("synack inet_dport=%d %d",
470 ntohs(inet_sk(sk
)->inet_dport
),
471 ntohs(inet_sk(parent
)->inet_dport
));
472 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_JOINPORTSYNACKRX
);
474 } else if (mptcp_check_fallback(sk
)) {
476 mptcp_rcv_space_init(mptcp_sk(parent
), sk
);
477 mptcp_set_connected(parent
);
482 subflow
->reset_transient
= 0;
483 mptcp_subflow_reset(sk
);
486 struct request_sock_ops mptcp_subflow_request_sock_ops
;
487 EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops
);
488 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops
;
490 static int subflow_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
492 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
494 pr_debug("subflow=%p", subflow
);
496 /* Never answer to SYNs sent to broadcast or multicast */
497 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
500 return tcp_conn_request(&mptcp_subflow_request_sock_ops
,
501 &subflow_request_sock_ipv4_ops
,
508 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
509 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops
;
510 static struct inet_connection_sock_af_ops subflow_v6_specific
;
511 static struct inet_connection_sock_af_ops subflow_v6m_specific
;
512 static struct proto tcpv6_prot_override
;
514 static int subflow_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
516 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
518 pr_debug("subflow=%p", subflow
);
520 if (skb
->protocol
== htons(ETH_P_IP
))
521 return subflow_v4_conn_request(sk
, skb
);
523 if (!ipv6_unicast_destination(skb
))
526 if (ipv6_addr_v4mapped(&ipv6_hdr(skb
)->saddr
)) {
527 __IP6_INC_STATS(sock_net(sk
), NULL
, IPSTATS_MIB_INHDRERRORS
);
531 return tcp_conn_request(&mptcp_subflow_request_sock_ops
,
532 &subflow_request_sock_ipv6_ops
, sk
, skb
);
536 return 0; /* don't send reset */
540 /* validate hmac received in third ACK */
541 static bool subflow_hmac_valid(const struct request_sock
*req
,
542 const struct mptcp_options_received
*mp_opt
)
544 const struct mptcp_subflow_request_sock
*subflow_req
;
545 u8 hmac
[SHA256_DIGEST_SIZE
];
546 struct mptcp_sock
*msk
;
548 subflow_req
= mptcp_subflow_rsk(req
);
549 msk
= subflow_req
->msk
;
553 subflow_generate_hmac(msk
->remote_key
, msk
->local_key
,
554 subflow_req
->remote_nonce
,
555 subflow_req
->local_nonce
, hmac
);
557 return !crypto_memneq(hmac
, mp_opt
->hmac
, MPTCPOPT_HMAC_LEN
);
560 static void mptcp_sock_destruct(struct sock
*sk
)
562 /* if new mptcp socket isn't accepted, it is free'd
563 * from the tcp listener sockets request queue, linked
564 * from req->sk. The tcp socket is released.
565 * This calls the ULP release function which will
566 * also remove the mptcp socket, via
567 * sock_put(ctx->conn).
569 * Problem is that the mptcp socket will be in
570 * ESTABLISHED state and will not have the SOCK_DEAD flag.
571 * Both result in warnings from inet_sock_destruct.
573 if ((1 << sk
->sk_state
) & (TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) {
574 sk
->sk_state
= TCP_CLOSE
;
575 WARN_ON_ONCE(sk
->sk_socket
);
579 mptcp_destroy_common(mptcp_sk(sk
));
580 inet_sock_destruct(sk
);
583 static void mptcp_force_close(struct sock
*sk
)
585 /* the msk is not yet exposed to user-space */
586 inet_sk_state_store(sk
, TCP_CLOSE
);
587 sk_common_release(sk
);
590 static void subflow_ulp_fallback(struct sock
*sk
,
591 struct mptcp_subflow_context
*old_ctx
)
593 struct inet_connection_sock
*icsk
= inet_csk(sk
);
595 mptcp_subflow_tcp_fallback(sk
, old_ctx
);
596 icsk
->icsk_ulp_ops
= NULL
;
597 rcu_assign_pointer(icsk
->icsk_ulp_data
, NULL
);
598 tcp_sk(sk
)->is_mptcp
= 0;
600 mptcp_subflow_ops_undo_override(sk
);
603 static void subflow_drop_ctx(struct sock
*ssk
)
605 struct mptcp_subflow_context
*ctx
= mptcp_subflow_ctx(ssk
);
610 subflow_ulp_fallback(ssk
, ctx
);
617 void mptcp_subflow_fully_established(struct mptcp_subflow_context
*subflow
,
618 struct mptcp_options_received
*mp_opt
)
620 struct mptcp_sock
*msk
= mptcp_sk(subflow
->conn
);
622 subflow
->remote_key
= mp_opt
->sndr_key
;
623 subflow
->fully_established
= 1;
624 subflow
->can_ack
= 1;
625 WRITE_ONCE(msk
->fully_established
, true);
628 static struct sock
*subflow_syn_recv_sock(const struct sock
*sk
,
630 struct request_sock
*req
,
631 struct dst_entry
*dst
,
632 struct request_sock
*req_unhash
,
635 struct mptcp_subflow_context
*listener
= mptcp_subflow_ctx(sk
);
636 struct mptcp_subflow_request_sock
*subflow_req
;
637 struct mptcp_options_received mp_opt
;
638 bool fallback
, fallback_is_fatal
;
639 struct sock
*new_msk
= NULL
;
642 pr_debug("listener=%p, req=%p, conn=%p", listener
, req
, listener
->conn
);
644 /* After child creation we must look for MPC even when options
647 mp_opt
.suboptions
= 0;
649 /* hopefully temporary handling for MP_JOIN+syncookie */
650 subflow_req
= mptcp_subflow_rsk(req
);
651 fallback_is_fatal
= tcp_rsk(req
)->is_mptcp
&& subflow_req
->mp_join
;
652 fallback
= !tcp_rsk(req
)->is_mptcp
;
656 /* if the sk is MP_CAPABLE, we try to fetch the client key */
657 if (subflow_req
->mp_capable
) {
658 /* we can receive and accept an in-window, out-of-order pkt,
659 * which may not carry the MP_CAPABLE opt even on mptcp enabled
660 * paths: always try to extract the peer key, and fallback
661 * for packets missing it.
662 * Even OoO DSS packets coming legitly after dropped or
663 * reordered MPC will cause fallback, but we don't have other
666 mptcp_get_options(sk
, skb
, &mp_opt
);
667 if (!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPC
)) {
672 new_msk
= mptcp_sk_clone(listener
->conn
, &mp_opt
, req
);
675 } else if (subflow_req
->mp_join
) {
676 mptcp_get_options(sk
, skb
, &mp_opt
);
677 if (!(mp_opt
.suboptions
& OPTIONS_MPTCP_MPJ
) ||
678 !subflow_hmac_valid(req
, &mp_opt
) ||
679 !mptcp_can_accept_new_subflow(subflow_req
->msk
)) {
680 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_JOINACKMAC
);
686 child
= listener
->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, dst
,
687 req_unhash
, own_req
);
689 if (child
&& *own_req
) {
690 struct mptcp_subflow_context
*ctx
= mptcp_subflow_ctx(child
);
692 tcp_rsk(req
)->drop_req
= false;
694 /* we need to fallback on ctx allocation failure and on pre-reqs
695 * checking above. In the latter scenario we additionally need
696 * to reset the context to non MPTCP status.
698 if (!ctx
|| fallback
) {
699 if (fallback_is_fatal
) {
700 subflow_add_reset_reason(skb
, MPTCP_RST_EMPTCP
);
704 subflow_drop_ctx(child
);
708 /* ssk inherits options of listener sk */
709 ctx
->setsockopt_seq
= listener
->setsockopt_seq
;
711 if (ctx
->mp_capable
) {
712 /* this can't race with mptcp_close(), as the msk is
713 * not yet exposted to user-space
715 inet_sk_state_store((void *)new_msk
, TCP_ESTABLISHED
);
717 /* record the newly created socket as the first msk
718 * subflow, but don't link it yet into conn_list
720 WRITE_ONCE(mptcp_sk(new_msk
)->first
, child
);
722 /* new mpc subflow takes ownership of the newly
723 * created mptcp socket
725 new_msk
->sk_destruct
= mptcp_sock_destruct
;
726 mptcp_sk(new_msk
)->setsockopt_seq
= ctx
->setsockopt_seq
;
727 mptcp_pm_new_connection(mptcp_sk(new_msk
), child
, 1);
728 mptcp_token_accept(subflow_req
, mptcp_sk(new_msk
));
732 /* with OoO packets we can reach here without ingress
735 if (mp_opt
.suboptions
& OPTIONS_MPTCP_MPC
)
736 mptcp_subflow_fully_established(ctx
, &mp_opt
);
737 } else if (ctx
->mp_join
) {
738 struct mptcp_sock
*owner
;
740 owner
= subflow_req
->msk
;
742 subflow_add_reset_reason(skb
, MPTCP_RST_EPROHIBIT
);
746 /* move the msk reference ownership to the subflow */
747 subflow_req
->msk
= NULL
;
748 ctx
->conn
= (struct sock
*)owner
;
750 if (subflow_use_different_sport(owner
, sk
)) {
751 pr_debug("ack inet_sport=%d %d",
752 ntohs(inet_sk(sk
)->inet_sport
),
753 ntohs(inet_sk((struct sock
*)owner
)->inet_sport
));
754 if (!mptcp_pm_sport_in_anno_list(owner
, sk
)) {
755 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_MISMATCHPORTACKRX
);
758 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_JOINPORTACKRX
);
761 if (!mptcp_finish_join(child
))
764 SUBFLOW_REQ_INC_STATS(req
, MPTCP_MIB_JOINACKRX
);
765 tcp_rsk(req
)->drop_req
= true;
770 /* dispose of the left over mptcp master, if any */
771 if (unlikely(new_msk
))
772 mptcp_force_close(new_msk
);
774 /* check for expected invariant - should never trigger, just help
775 * catching eariler subtle bugs
777 WARN_ON_ONCE(child
&& *own_req
&& tcp_sk(child
)->is_mptcp
&&
778 (!mptcp_subflow_ctx(child
) ||
779 !mptcp_subflow_ctx(child
)->conn
));
783 subflow_drop_ctx(child
);
784 tcp_rsk(req
)->drop_req
= true;
785 inet_csk_prepare_for_destroy_sock(child
);
787 req
->rsk_ops
->send_reset(sk
, skb
);
789 /* The last child reference will be released by the caller */
793 static struct inet_connection_sock_af_ops subflow_specific
;
794 static struct proto tcp_prot_override
;
796 enum mapping_status
{
804 static void dbg_bad_map(struct mptcp_subflow_context
*subflow
, u32 ssn
)
806 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
807 ssn
, subflow
->map_subflow_seq
, subflow
->map_data_len
);
810 static bool skb_is_fully_mapped(struct sock
*ssk
, struct sk_buff
*skb
)
812 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
813 unsigned int skb_consumed
;
815 skb_consumed
= tcp_sk(ssk
)->copied_seq
- TCP_SKB_CB(skb
)->seq
;
816 if (WARN_ON_ONCE(skb_consumed
>= skb
->len
))
819 return skb
->len
- skb_consumed
<= subflow
->map_data_len
-
820 mptcp_subflow_get_map_offset(subflow
);
823 static bool validate_mapping(struct sock
*ssk
, struct sk_buff
*skb
)
825 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
826 u32 ssn
= tcp_sk(ssk
)->copied_seq
- subflow
->ssn_offset
;
828 if (unlikely(before(ssn
, subflow
->map_subflow_seq
))) {
829 /* Mapping covers data later in the subflow stream,
830 * currently unsupported.
832 dbg_bad_map(subflow
, ssn
);
835 if (unlikely(!before(ssn
, subflow
->map_subflow_seq
+
836 subflow
->map_data_len
))) {
837 /* Mapping does covers past subflow data, invalid */
838 dbg_bad_map(subflow
, ssn
);
844 static enum mapping_status
validate_data_csum(struct sock
*ssk
, struct sk_buff
*skb
,
847 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
848 struct csum_pseudo_header header
;
849 u32 offset
, seq
, delta
;
856 /* mapping already validated on previous traversal */
857 if (subflow
->map_csum_len
== subflow
->map_data_len
)
860 /* traverse the receive queue, ensuring it contains a full
861 * DSS mapping and accumulating the related csum.
862 * Preserve the accoumlate csum across multiple calls, to compute
865 delta
= subflow
->map_data_len
- subflow
->map_csum_len
;
867 seq
= tcp_sk(ssk
)->copied_seq
+ subflow
->map_csum_len
;
868 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
870 /* if the current skb has not been accounted yet, csum its contents
871 * up to the amount covered by the current DSS
873 if (offset
< skb
->len
) {
876 len
= min(skb
->len
- offset
, delta
);
877 csum
= skb_checksum(skb
, offset
, len
, 0);
878 subflow
->map_data_csum
= csum_block_add(subflow
->map_data_csum
, csum
,
879 subflow
->map_csum_len
);
882 subflow
->map_csum_len
+= len
;
887 if (skb_queue_is_last(&ssk
->sk_receive_queue
, skb
)) {
888 /* if this subflow is closed, the partial mapping
889 * will be never completed; flush the pending skbs, so
890 * that subflow_sched_work_if_closed() can kick in
892 if (unlikely(ssk
->sk_state
== TCP_CLOSE
))
893 while ((skb
= skb_peek(&ssk
->sk_receive_queue
)))
894 sk_eat_skb(ssk
, skb
);
896 /* not enough data to validate the csum */
897 return MAPPING_EMPTY
;
900 /* the DSS mapping for next skbs will be validated later,
901 * when a get_mapping_status call will process such skb
906 /* note that 'map_data_len' accounts only for the carried data, does
907 * not include the eventual seq increment due to the data fin,
908 * while the pseudo header requires the original DSS data len,
911 header
.data_seq
= cpu_to_be64(subflow
->map_seq
);
912 header
.subflow_seq
= htonl(subflow
->map_subflow_seq
);
913 header
.data_len
= htons(subflow
->map_data_len
+ subflow
->map_data_fin
);
916 csum
= csum_partial(&header
, sizeof(header
), subflow
->map_data_csum
);
917 if (unlikely(csum_fold(csum
))) {
918 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_DATACSUMERR
);
919 subflow
->send_mp_fail
= 1;
920 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_MPFAILTX
);
921 return subflow
->mp_join
? MAPPING_INVALID
: MAPPING_DUMMY
;
927 static enum mapping_status
get_mapping_status(struct sock
*ssk
,
928 struct mptcp_sock
*msk
)
930 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
931 bool csum_reqd
= READ_ONCE(msk
->csum_enabled
);
932 struct mptcp_ext
*mpext
;
937 skb
= skb_peek(&ssk
->sk_receive_queue
);
939 return MAPPING_EMPTY
;
941 if (mptcp_check_fallback(ssk
))
942 return MAPPING_DUMMY
;
944 mpext
= mptcp_get_ext(skb
);
945 if (!mpext
|| !mpext
->use_map
) {
946 if (!subflow
->map_valid
&& !skb
->len
) {
947 /* the TCP stack deliver 0 len FIN pkt to the receive
948 * queue, that is the only 0len pkts ever expected here,
949 * and we can admit no mapping only for 0 len pkts
951 if (!(TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
))
952 WARN_ONCE(1, "0len seq %d:%d flags %x",
953 TCP_SKB_CB(skb
)->seq
,
954 TCP_SKB_CB(skb
)->end_seq
,
955 TCP_SKB_CB(skb
)->tcp_flags
);
956 sk_eat_skb(ssk
, skb
);
957 return MAPPING_EMPTY
;
960 if (!subflow
->map_valid
)
961 return MAPPING_INVALID
;
966 trace_get_mapping_status(mpext
);
968 data_len
= mpext
->data_len
;
970 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_INFINITEMAPRX
);
971 return MAPPING_INVALID
;
974 if (mpext
->data_fin
== 1) {
976 bool updated
= mptcp_update_rcv_data_fin(msk
, mpext
->data_seq
,
978 pr_debug("DATA_FIN with no payload seq=%llu", mpext
->data_seq
);
979 if (subflow
->map_valid
) {
980 /* A DATA_FIN might arrive in a DSS
981 * option before the previous mapping
982 * has been fully consumed. Continue
983 * handling the existing mapping.
985 skb_ext_del(skb
, SKB_EXT_MPTCP
);
988 if (updated
&& schedule_work(&msk
->work
))
989 sock_hold((struct sock
*)msk
);
991 return MAPPING_DATA_FIN
;
994 u64 data_fin_seq
= mpext
->data_seq
+ data_len
- 1;
996 /* If mpext->data_seq is a 32-bit value, data_fin_seq
997 * must also be limited to 32 bits.
1000 data_fin_seq
&= GENMASK_ULL(31, 0);
1002 mptcp_update_rcv_data_fin(msk
, data_fin_seq
, mpext
->dsn64
);
1003 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1004 data_fin_seq
, mpext
->dsn64
);
1007 /* Adjust for DATA_FIN using 1 byte of sequence space */
1011 map_seq
= mptcp_expand_seq(READ_ONCE(msk
->ack_seq
), mpext
->data_seq
, mpext
->dsn64
);
1012 WRITE_ONCE(mptcp_sk(subflow
->conn
)->use_64bit_ack
, !!mpext
->dsn64
);
1014 if (subflow
->map_valid
) {
1015 /* Allow replacing only with an identical map */
1016 if (subflow
->map_seq
== map_seq
&&
1017 subflow
->map_subflow_seq
== mpext
->subflow_seq
&&
1018 subflow
->map_data_len
== data_len
&&
1019 subflow
->map_csum_reqd
== mpext
->csum_reqd
) {
1020 skb_ext_del(skb
, SKB_EXT_MPTCP
);
1024 /* If this skb data are fully covered by the current mapping,
1025 * the new map would need caching, which is not supported
1027 if (skb_is_fully_mapped(ssk
, skb
)) {
1028 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_DSSNOMATCH
);
1029 return MAPPING_INVALID
;
1032 /* will validate the next map after consuming the current one */
1036 subflow
->map_seq
= map_seq
;
1037 subflow
->map_subflow_seq
= mpext
->subflow_seq
;
1038 subflow
->map_data_len
= data_len
;
1039 subflow
->map_valid
= 1;
1040 subflow
->map_data_fin
= mpext
->data_fin
;
1041 subflow
->mpc_map
= mpext
->mpc_map
;
1042 subflow
->map_csum_reqd
= mpext
->csum_reqd
;
1043 subflow
->map_csum_len
= 0;
1044 subflow
->map_data_csum
= csum_unfold(mpext
->csum
);
1046 /* Cfr RFC 8684 Section 3.3.0 */
1047 if (unlikely(subflow
->map_csum_reqd
!= csum_reqd
))
1048 return MAPPING_INVALID
;
1050 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1051 subflow
->map_seq
, subflow
->map_subflow_seq
,
1052 subflow
->map_data_len
, subflow
->map_csum_reqd
,
1053 subflow
->map_data_csum
);
1056 /* we revalidate valid mapping on new skb, because we must ensure
1057 * the current skb is completely covered by the available mapping
1059 if (!validate_mapping(ssk
, skb
)) {
1060 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_DSSTCPMISMATCH
);
1061 return MAPPING_INVALID
;
1064 skb_ext_del(skb
, SKB_EXT_MPTCP
);
1067 return validate_data_csum(ssk
, skb
, csum_reqd
);
1070 static void mptcp_subflow_discard_data(struct sock
*ssk
, struct sk_buff
*skb
,
1073 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1074 bool fin
= TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
;
1077 incr
= limit
>= skb
->len
? skb
->len
+ fin
: limit
;
1079 pr_debug("discarding=%d len=%d seq=%d", incr
, skb
->len
,
1080 subflow
->map_subflow_seq
);
1081 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_DUPDATA
);
1082 tcp_sk(ssk
)->copied_seq
+= incr
;
1083 if (!before(tcp_sk(ssk
)->copied_seq
, TCP_SKB_CB(skb
)->end_seq
))
1084 sk_eat_skb(ssk
, skb
);
1085 if (mptcp_subflow_get_map_offset(subflow
) >= subflow
->map_data_len
)
1086 subflow
->map_valid
= 0;
1089 /* sched mptcp worker to remove the subflow if no more data is pending */
1090 static void subflow_sched_work_if_closed(struct mptcp_sock
*msk
, struct sock
*ssk
)
1092 struct sock
*sk
= (struct sock
*)msk
;
1094 if (likely(ssk
->sk_state
!= TCP_CLOSE
))
1097 if (skb_queue_empty(&ssk
->sk_receive_queue
) &&
1098 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
)) {
1100 if (!schedule_work(&msk
->work
))
1105 static bool subflow_check_data_avail(struct sock
*ssk
)
1107 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1108 enum mapping_status status
;
1109 struct mptcp_sock
*msk
;
1110 struct sk_buff
*skb
;
1112 if (!skb_peek(&ssk
->sk_receive_queue
))
1113 WRITE_ONCE(subflow
->data_avail
, 0);
1114 if (subflow
->data_avail
)
1117 msk
= mptcp_sk(subflow
->conn
);
1122 status
= get_mapping_status(ssk
, msk
);
1123 trace_subflow_check_data_avail(status
, skb_peek(&ssk
->sk_receive_queue
));
1124 if (unlikely(status
== MAPPING_INVALID
))
1127 if (unlikely(status
== MAPPING_DUMMY
))
1130 if (status
!= MAPPING_OK
)
1133 skb
= skb_peek(&ssk
->sk_receive_queue
);
1134 if (WARN_ON_ONCE(!skb
))
1137 /* if msk lacks the remote key, this subflow must provide an
1138 * MP_CAPABLE-based mapping
1140 if (unlikely(!READ_ONCE(msk
->can_ack
))) {
1141 if (!subflow
->mpc_map
)
1143 WRITE_ONCE(msk
->remote_key
, subflow
->remote_key
);
1144 WRITE_ONCE(msk
->ack_seq
, subflow
->map_seq
);
1145 WRITE_ONCE(msk
->can_ack
, true);
1148 old_ack
= READ_ONCE(msk
->ack_seq
);
1149 ack_seq
= mptcp_subflow_get_mapped_dsn(subflow
);
1150 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack
,
1152 if (unlikely(before64(ack_seq
, old_ack
))) {
1153 mptcp_subflow_discard_data(ssk
, skb
, old_ack
- ack_seq
);
1157 WRITE_ONCE(subflow
->data_avail
, MPTCP_SUBFLOW_DATA_AVAIL
);
1163 subflow_sched_work_if_closed(msk
, ssk
);
1167 /* RFC 8684 section 3.7. */
1168 if (subflow
->send_mp_fail
) {
1169 if (mptcp_has_another_subflow(ssk
)) {
1170 while ((skb
= skb_peek(&ssk
->sk_receive_queue
)))
1171 sk_eat_skb(ssk
, skb
);
1173 ssk
->sk_err
= EBADMSG
;
1174 tcp_set_state(ssk
, TCP_CLOSE
);
1175 subflow
->reset_transient
= 0;
1176 subflow
->reset_reason
= MPTCP_RST_EMIDDLEBOX
;
1177 tcp_send_active_reset(ssk
, GFP_ATOMIC
);
1178 WRITE_ONCE(subflow
->data_avail
, 0);
1182 if (subflow
->mp_join
|| subflow
->fully_established
) {
1183 /* fatal protocol error, close the socket.
1184 * subflow_error_report() will introduce the appropriate barriers
1186 ssk
->sk_err
= EBADMSG
;
1187 tcp_set_state(ssk
, TCP_CLOSE
);
1188 subflow
->reset_transient
= 0;
1189 subflow
->reset_reason
= MPTCP_RST_EMPTCP
;
1190 tcp_send_active_reset(ssk
, GFP_ATOMIC
);
1191 WRITE_ONCE(subflow
->data_avail
, 0);
1195 __mptcp_do_fallback(msk
);
1196 skb
= skb_peek(&ssk
->sk_receive_queue
);
1197 subflow
->map_valid
= 1;
1198 subflow
->map_seq
= READ_ONCE(msk
->ack_seq
);
1199 subflow
->map_data_len
= skb
->len
;
1200 subflow
->map_subflow_seq
= tcp_sk(ssk
)->copied_seq
- subflow
->ssn_offset
;
1201 WRITE_ONCE(subflow
->data_avail
, MPTCP_SUBFLOW_DATA_AVAIL
);
1205 bool mptcp_subflow_data_available(struct sock
*sk
)
1207 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
1209 /* check if current mapping is still valid */
1210 if (subflow
->map_valid
&&
1211 mptcp_subflow_get_map_offset(subflow
) >= subflow
->map_data_len
) {
1212 subflow
->map_valid
= 0;
1213 WRITE_ONCE(subflow
->data_avail
, 0);
1215 pr_debug("Done with mapping: seq=%u data_len=%u",
1216 subflow
->map_subflow_seq
,
1217 subflow
->map_data_len
);
1220 return subflow_check_data_avail(sk
);
1223 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1226 * In mptcp, rwin is about the mptcp-level connection data.
1228 * Data that is still on the ssk rx queue can thus be ignored,
1229 * as far as mptcp peer is concerned that data is still inflight.
1230 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1232 void mptcp_space(const struct sock
*ssk
, int *space
, int *full_space
)
1234 const struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1235 const struct sock
*sk
= subflow
->conn
;
1237 *space
= __mptcp_space(sk
);
1238 *full_space
= tcp_full_space(sk
);
1241 void __mptcp_error_report(struct sock
*sk
)
1243 struct mptcp_subflow_context
*subflow
;
1244 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1246 mptcp_for_each_subflow(msk
, subflow
) {
1247 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1248 int err
= sock_error(ssk
);
1253 /* only propagate errors on fallen-back sockets or
1256 if (sk
->sk_state
!= TCP_SYN_SENT
&& !__mptcp_check_fallback(msk
))
1259 inet_sk_state_store(sk
, inet_sk_state_load(ssk
));
1262 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
1264 sk_error_report(sk
);
1269 static void subflow_error_report(struct sock
*ssk
)
1271 struct sock
*sk
= mptcp_subflow_ctx(ssk
)->conn
;
1273 mptcp_data_lock(sk
);
1274 if (!sock_owned_by_user(sk
))
1275 __mptcp_error_report(sk
);
1277 set_bit(MPTCP_ERROR_REPORT
, &mptcp_sk(sk
)->flags
);
1278 mptcp_data_unlock(sk
);
1281 static void subflow_data_ready(struct sock
*sk
)
1283 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
1284 u16 state
= 1 << inet_sk_state_load(sk
);
1285 struct sock
*parent
= subflow
->conn
;
1286 struct mptcp_sock
*msk
;
1288 msk
= mptcp_sk(parent
);
1289 if (state
& TCPF_LISTEN
) {
1290 /* MPJ subflow are removed from accept queue before reaching here,
1291 * avoid stray wakeups
1293 if (reqsk_queue_empty(&inet_csk(sk
)->icsk_accept_queue
))
1296 set_bit(MPTCP_DATA_READY
, &msk
->flags
);
1297 parent
->sk_data_ready(parent
);
1301 WARN_ON_ONCE(!__mptcp_check_fallback(msk
) && !subflow
->mp_capable
&&
1302 !subflow
->mp_join
&& !(state
& TCPF_CLOSE
));
1304 if (mptcp_subflow_data_available(sk
))
1305 mptcp_data_ready(parent
, sk
);
1306 else if (unlikely(sk
->sk_err
))
1307 subflow_error_report(sk
);
1310 static void subflow_write_space(struct sock
*ssk
)
1312 struct sock
*sk
= mptcp_subflow_ctx(ssk
)->conn
;
1314 mptcp_propagate_sndbuf(sk
, ssk
);
1315 mptcp_write_space(sk
);
1318 static struct inet_connection_sock_af_ops
*
1319 subflow_default_af_ops(struct sock
*sk
)
1321 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1322 if (sk
->sk_family
== AF_INET6
)
1323 return &subflow_v6_specific
;
1325 return &subflow_specific
;
1328 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1329 void mptcpv6_handle_mapped(struct sock
*sk
, bool mapped
)
1331 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
1332 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1333 struct inet_connection_sock_af_ops
*target
;
1335 target
= mapped
? &subflow_v6m_specific
: subflow_default_af_ops(sk
);
1337 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1338 subflow
, sk
->sk_family
, icsk
->icsk_af_ops
, target
, mapped
);
1340 if (likely(icsk
->icsk_af_ops
== target
))
1343 subflow
->icsk_af_ops
= icsk
->icsk_af_ops
;
1344 icsk
->icsk_af_ops
= target
;
1348 void mptcp_info2sockaddr(const struct mptcp_addr_info
*info
,
1349 struct sockaddr_storage
*addr
,
1350 unsigned short family
)
1352 memset(addr
, 0, sizeof(*addr
));
1353 addr
->ss_family
= family
;
1354 if (addr
->ss_family
== AF_INET
) {
1355 struct sockaddr_in
*in_addr
= (struct sockaddr_in
*)addr
;
1357 if (info
->family
== AF_INET
)
1358 in_addr
->sin_addr
= info
->addr
;
1359 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1360 else if (ipv6_addr_v4mapped(&info
->addr6
))
1361 in_addr
->sin_addr
.s_addr
= info
->addr6
.s6_addr32
[3];
1363 in_addr
->sin_port
= info
->port
;
1365 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1366 else if (addr
->ss_family
== AF_INET6
) {
1367 struct sockaddr_in6
*in6_addr
= (struct sockaddr_in6
*)addr
;
1369 if (info
->family
== AF_INET
)
1370 ipv6_addr_set_v4mapped(info
->addr
.s_addr
,
1371 &in6_addr
->sin6_addr
);
1373 in6_addr
->sin6_addr
= info
->addr6
;
1374 in6_addr
->sin6_port
= info
->port
;
1379 int __mptcp_subflow_connect(struct sock
*sk
, const struct mptcp_addr_info
*loc
,
1380 const struct mptcp_addr_info
*remote
)
1382 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1383 struct mptcp_subflow_context
*subflow
;
1384 struct sockaddr_storage addr
;
1385 int remote_id
= remote
->id
;
1386 int local_id
= loc
->id
;
1395 if (!mptcp_is_fully_established(sk
))
1398 err
= mptcp_subflow_create_socket(sk
, &sf
);
1403 subflow
= mptcp_subflow_ctx(ssk
);
1405 get_random_bytes(&subflow
->local_nonce
, sizeof(u32
));
1406 } while (!subflow
->local_nonce
);
1409 err
= mptcp_pm_get_local_id(msk
, (struct sock_common
*)ssk
);
1416 mptcp_pm_get_flags_and_ifindex_by_id(sock_net(sk
), local_id
,
1418 subflow
->remote_key
= msk
->remote_key
;
1419 subflow
->local_key
= msk
->local_key
;
1420 subflow
->token
= msk
->token
;
1421 mptcp_info2sockaddr(loc
, &addr
, ssk
->sk_family
);
1423 addrlen
= sizeof(struct sockaddr_in
);
1424 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1425 if (addr
.ss_family
== AF_INET6
)
1426 addrlen
= sizeof(struct sockaddr_in6
);
1428 ssk
->sk_bound_dev_if
= ifindex
;
1429 err
= kernel_bind(sf
, (struct sockaddr
*)&addr
, addrlen
);
1433 mptcp_crypto_key_sha(subflow
->remote_key
, &remote_token
, NULL
);
1434 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk
,
1435 remote_token
, local_id
, remote_id
);
1436 subflow
->remote_token
= remote_token
;
1437 subflow
->local_id
= local_id
;
1438 subflow
->remote_id
= remote_id
;
1439 subflow
->request_join
= 1;
1440 subflow
->request_bkup
= !!(flags
& MPTCP_PM_ADDR_FLAG_BACKUP
);
1441 mptcp_info2sockaddr(remote
, &addr
, ssk
->sk_family
);
1443 mptcp_add_pending_subflow(msk
, subflow
);
1444 mptcp_sockopt_sync(msk
, ssk
);
1445 err
= kernel_connect(sf
, (struct sockaddr
*)&addr
, addrlen
, O_NONBLOCK
);
1446 if (err
&& err
!= -EINPROGRESS
)
1449 /* discard the subflow socket */
1450 mptcp_sock_graft(ssk
, sk
->sk_socket
);
1451 iput(SOCK_INODE(sf
));
1455 spin_lock_bh(&msk
->join_list_lock
);
1456 list_del(&subflow
->node
);
1457 spin_unlock_bh(&msk
->join_list_lock
);
1458 sock_put(mptcp_subflow_tcp_sock(subflow
));
1461 subflow
->disposable
= 1;
1466 static void mptcp_attach_cgroup(struct sock
*parent
, struct sock
*child
)
1468 #ifdef CONFIG_SOCK_CGROUP_DATA
1469 struct sock_cgroup_data
*parent_skcd
= &parent
->sk_cgrp_data
,
1470 *child_skcd
= &child
->sk_cgrp_data
;
1472 /* only the additional subflows created by kworkers have to be modified */
1473 if (cgroup_id(sock_cgroup_ptr(parent_skcd
)) !=
1474 cgroup_id(sock_cgroup_ptr(child_skcd
))) {
1476 struct mem_cgroup
*memcg
= parent
->sk_memcg
;
1478 mem_cgroup_sk_free(child
);
1479 if (memcg
&& css_tryget(&memcg
->css
))
1480 child
->sk_memcg
= memcg
;
1481 #endif /* CONFIG_MEMCG */
1483 cgroup_sk_free(child_skcd
);
1484 *child_skcd
= *parent_skcd
;
1485 cgroup_sk_clone(child_skcd
);
1487 #endif /* CONFIG_SOCK_CGROUP_DATA */
1490 static void mptcp_subflow_ops_override(struct sock
*ssk
)
1492 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1493 if (ssk
->sk_prot
== &tcpv6_prot
)
1494 ssk
->sk_prot
= &tcpv6_prot_override
;
1497 ssk
->sk_prot
= &tcp_prot_override
;
1500 static void mptcp_subflow_ops_undo_override(struct sock
*ssk
)
1502 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1503 if (ssk
->sk_prot
== &tcpv6_prot_override
)
1504 ssk
->sk_prot
= &tcpv6_prot
;
1507 ssk
->sk_prot
= &tcp_prot
;
1509 int mptcp_subflow_create_socket(struct sock
*sk
, struct socket
**new_sock
)
1511 struct mptcp_subflow_context
*subflow
;
1512 struct net
*net
= sock_net(sk
);
1516 /* un-accepted server sockets can reach here - on bad configuration
1517 * bail early to avoid greater trouble later
1519 if (unlikely(!sk
->sk_socket
))
1522 err
= sock_create_kern(net
, sk
->sk_family
, SOCK_STREAM
, IPPROTO_TCP
,
1529 /* the newly created socket has to be in the same cgroup as its parent */
1530 mptcp_attach_cgroup(sk
, sf
->sk
);
1532 /* kernel sockets do not by default acquire net ref, but TCP timer
1535 sf
->sk
->sk_net_refcnt
= 1;
1537 #ifdef CONFIG_PROC_FS
1538 this_cpu_add(*net
->core
.sock_inuse
, 1);
1540 err
= tcp_set_ulp(sf
->sk
, "mptcp");
1541 release_sock(sf
->sk
);
1548 /* the newly created socket really belongs to the owning MPTCP master
1549 * socket, even if for additional subflows the allocation is performed
1550 * by a kernel workqueue. Adjust inode references, so that the
1551 * procfs/diag interaces really show this one belonging to the correct
1554 SOCK_INODE(sf
)->i_ino
= SOCK_INODE(sk
->sk_socket
)->i_ino
;
1555 SOCK_INODE(sf
)->i_uid
= SOCK_INODE(sk
->sk_socket
)->i_uid
;
1556 SOCK_INODE(sf
)->i_gid
= SOCK_INODE(sk
->sk_socket
)->i_gid
;
1558 subflow
= mptcp_subflow_ctx(sf
->sk
);
1559 pr_debug("subflow=%p", subflow
);
1564 mptcp_subflow_ops_override(sf
->sk
);
1569 static struct mptcp_subflow_context
*subflow_create_ctx(struct sock
*sk
,
1572 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1573 struct mptcp_subflow_context
*ctx
;
1575 ctx
= kzalloc(sizeof(*ctx
), priority
);
1579 rcu_assign_pointer(icsk
->icsk_ulp_data
, ctx
);
1580 INIT_LIST_HEAD(&ctx
->node
);
1581 INIT_LIST_HEAD(&ctx
->delegated_node
);
1583 pr_debug("subflow=%p", ctx
);
1590 static void __subflow_state_change(struct sock
*sk
)
1592 struct socket_wq
*wq
;
1595 wq
= rcu_dereference(sk
->sk_wq
);
1596 if (skwq_has_sleeper(wq
))
1597 wake_up_interruptible_all(&wq
->wait
);
1601 static bool subflow_is_done(const struct sock
*sk
)
1603 return sk
->sk_shutdown
& RCV_SHUTDOWN
|| sk
->sk_state
== TCP_CLOSE
;
1606 static void subflow_state_change(struct sock
*sk
)
1608 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
1609 struct sock
*parent
= subflow
->conn
;
1611 __subflow_state_change(sk
);
1613 if (subflow_simultaneous_connect(sk
)) {
1614 mptcp_propagate_sndbuf(parent
, sk
);
1615 mptcp_do_fallback(sk
);
1616 mptcp_rcv_space_init(mptcp_sk(parent
), sk
);
1617 pr_fallback(mptcp_sk(parent
));
1618 subflow
->conn_finished
= 1;
1619 mptcp_set_connected(parent
);
1622 /* as recvmsg() does not acquire the subflow socket for ssk selection
1623 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1624 * the data available machinery here.
1626 if (mptcp_subflow_data_available(sk
))
1627 mptcp_data_ready(parent
, sk
);
1628 else if (unlikely(sk
->sk_err
))
1629 subflow_error_report(sk
);
1631 subflow_sched_work_if_closed(mptcp_sk(parent
), sk
);
1633 if (__mptcp_check_fallback(mptcp_sk(parent
)) &&
1634 !subflow
->rx_eof
&& subflow_is_done(sk
)) {
1635 subflow
->rx_eof
= 1;
1636 mptcp_subflow_eof(parent
);
1640 static int subflow_ulp_init(struct sock
*sk
)
1642 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1643 struct mptcp_subflow_context
*ctx
;
1644 struct tcp_sock
*tp
= tcp_sk(sk
);
1647 /* disallow attaching ULP to a socket unless it has been
1648 * created with sock_create_kern()
1650 if (!sk
->sk_kern_sock
) {
1655 ctx
= subflow_create_ctx(sk
, GFP_KERNEL
);
1661 pr_debug("subflow=%p, family=%d", ctx
, sk
->sk_family
);
1664 ctx
->icsk_af_ops
= icsk
->icsk_af_ops
;
1665 icsk
->icsk_af_ops
= subflow_default_af_ops(sk
);
1666 ctx
->tcp_data_ready
= sk
->sk_data_ready
;
1667 ctx
->tcp_state_change
= sk
->sk_state_change
;
1668 ctx
->tcp_write_space
= sk
->sk_write_space
;
1669 ctx
->tcp_error_report
= sk
->sk_error_report
;
1670 sk
->sk_data_ready
= subflow_data_ready
;
1671 sk
->sk_write_space
= subflow_write_space
;
1672 sk
->sk_state_change
= subflow_state_change
;
1673 sk
->sk_error_report
= subflow_error_report
;
1678 static void subflow_ulp_release(struct sock
*ssk
)
1680 struct mptcp_subflow_context
*ctx
= mptcp_subflow_ctx(ssk
);
1681 bool release
= true;
1689 /* if the msk has been orphaned, keep the ctx
1690 * alive, will be freed by __mptcp_close_ssk(),
1691 * when the subflow is still unaccepted
1693 release
= ctx
->disposable
|| list_empty(&ctx
->node
);
1697 mptcp_subflow_ops_undo_override(ssk
);
1699 kfree_rcu(ctx
, rcu
);
1702 static void subflow_ulp_clone(const struct request_sock
*req
,
1704 const gfp_t priority
)
1706 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
1707 struct mptcp_subflow_context
*old_ctx
= mptcp_subflow_ctx(newsk
);
1708 struct mptcp_subflow_context
*new_ctx
;
1710 if (!tcp_rsk(req
)->is_mptcp
||
1711 (!subflow_req
->mp_capable
&& !subflow_req
->mp_join
)) {
1712 subflow_ulp_fallback(newsk
, old_ctx
);
1716 new_ctx
= subflow_create_ctx(newsk
, priority
);
1718 subflow_ulp_fallback(newsk
, old_ctx
);
1722 new_ctx
->conn_finished
= 1;
1723 new_ctx
->icsk_af_ops
= old_ctx
->icsk_af_ops
;
1724 new_ctx
->tcp_data_ready
= old_ctx
->tcp_data_ready
;
1725 new_ctx
->tcp_state_change
= old_ctx
->tcp_state_change
;
1726 new_ctx
->tcp_write_space
= old_ctx
->tcp_write_space
;
1727 new_ctx
->tcp_error_report
= old_ctx
->tcp_error_report
;
1728 new_ctx
->rel_write_seq
= 1;
1729 new_ctx
->tcp_sock
= newsk
;
1731 if (subflow_req
->mp_capable
) {
1732 /* see comments in subflow_syn_recv_sock(), MPTCP connection
1733 * is fully established only after we receive the remote key
1735 new_ctx
->mp_capable
= 1;
1736 new_ctx
->local_key
= subflow_req
->local_key
;
1737 new_ctx
->token
= subflow_req
->token
;
1738 new_ctx
->ssn_offset
= subflow_req
->ssn_offset
;
1739 new_ctx
->idsn
= subflow_req
->idsn
;
1740 } else if (subflow_req
->mp_join
) {
1741 new_ctx
->ssn_offset
= subflow_req
->ssn_offset
;
1742 new_ctx
->mp_join
= 1;
1743 new_ctx
->fully_established
= 1;
1744 new_ctx
->backup
= subflow_req
->backup
;
1745 new_ctx
->local_id
= subflow_req
->local_id
;
1746 new_ctx
->remote_id
= subflow_req
->remote_id
;
1747 new_ctx
->token
= subflow_req
->token
;
1748 new_ctx
->thmac
= subflow_req
->thmac
;
1752 static void tcp_release_cb_override(struct sock
*ssk
)
1754 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1756 if (mptcp_subflow_has_delegated_action(subflow
))
1757 mptcp_subflow_process_delegated(ssk
);
1759 tcp_release_cb(ssk
);
1762 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly
= {
1764 .owner
= THIS_MODULE
,
1765 .init
= subflow_ulp_init
,
1766 .release
= subflow_ulp_release
,
1767 .clone
= subflow_ulp_clone
,
1770 static int subflow_ops_init(struct request_sock_ops
*subflow_ops
)
1772 subflow_ops
->obj_size
= sizeof(struct mptcp_subflow_request_sock
);
1773 subflow_ops
->slab_name
= "request_sock_subflow";
1775 subflow_ops
->slab
= kmem_cache_create(subflow_ops
->slab_name
,
1776 subflow_ops
->obj_size
, 0,
1778 SLAB_TYPESAFE_BY_RCU
,
1780 if (!subflow_ops
->slab
)
1783 subflow_ops
->destructor
= subflow_req_destructor
;
1788 void __init
mptcp_subflow_init(void)
1790 mptcp_subflow_request_sock_ops
= tcp_request_sock_ops
;
1791 if (subflow_ops_init(&mptcp_subflow_request_sock_ops
) != 0)
1792 panic("MPTCP: failed to init subflow request sock ops\n");
1794 subflow_request_sock_ipv4_ops
= tcp_request_sock_ipv4_ops
;
1795 subflow_request_sock_ipv4_ops
.route_req
= subflow_v4_route_req
;
1797 subflow_specific
= ipv4_specific
;
1798 subflow_specific
.conn_request
= subflow_v4_conn_request
;
1799 subflow_specific
.syn_recv_sock
= subflow_syn_recv_sock
;
1800 subflow_specific
.sk_rx_dst_set
= subflow_finish_connect
;
1802 tcp_prot_override
= tcp_prot
;
1803 tcp_prot_override
.release_cb
= tcp_release_cb_override
;
1805 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1806 subflow_request_sock_ipv6_ops
= tcp_request_sock_ipv6_ops
;
1807 subflow_request_sock_ipv6_ops
.route_req
= subflow_v6_route_req
;
1809 subflow_v6_specific
= ipv6_specific
;
1810 subflow_v6_specific
.conn_request
= subflow_v6_conn_request
;
1811 subflow_v6_specific
.syn_recv_sock
= subflow_syn_recv_sock
;
1812 subflow_v6_specific
.sk_rx_dst_set
= subflow_finish_connect
;
1814 subflow_v6m_specific
= subflow_v6_specific
;
1815 subflow_v6m_specific
.queue_xmit
= ipv4_specific
.queue_xmit
;
1816 subflow_v6m_specific
.send_check
= ipv4_specific
.send_check
;
1817 subflow_v6m_specific
.net_header_len
= ipv4_specific
.net_header_len
;
1818 subflow_v6m_specific
.mtu_reduced
= ipv4_specific
.mtu_reduced
;
1819 subflow_v6m_specific
.net_frag_header_len
= 0;
1821 tcpv6_prot_override
= tcpv6_prot
;
1822 tcpv6_prot_override
.release_cb
= tcp_release_cb_override
;
1825 mptcp_diag_subflow_init(&subflow_ulp_ops
);
1827 if (tcp_register_ulp(&subflow_ulp_ops
) != 0)
1828 panic("MPTCP: failed to register subflows to ULP\n");