]>
Commit | Line | Data |
---|---|---|
f870fa0b MM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2017 - 2019, Intel Corporation. | |
5 | */ | |
6 | ||
7 | #define pr_fmt(fmt) "MPTCP: " fmt | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
7a6a6cbc PA |
12 | #include <linux/sched/signal.h> |
13 | #include <linux/atomic.h> | |
e4fae864 | 14 | #include <linux/igmp.h> |
f870fa0b MM |
15 | #include <net/sock.h> |
16 | #include <net/inet_common.h> | |
17 | #include <net/inet_hashtables.h> | |
18 | #include <net/protocol.h> | |
19 | #include <net/tcp.h> | |
3721b9b6 | 20 | #include <net/tcp_states.h> |
cf7da0d6 PK |
21 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
22 | #include <net/transp_v6.h> | |
e4fae864 | 23 | #include <net/addrconf.h> |
cf7da0d6 | 24 | #endif |
f870fa0b | 25 | #include <net/mptcp.h> |
e16163b6 | 26 | #include <net/xfrm.h> |
f870fa0b | 27 | #include "protocol.h" |
fc518953 | 28 | #include "mib.h" |
f870fa0b | 29 | |
b0519de8 FW |
30 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
31 | struct mptcp6_sock { | |
32 | struct mptcp_sock msk; | |
33 | struct ipv6_pinfo np; | |
34 | }; | |
35 | #endif | |
36 | ||
6771bfd9 | 37 | struct mptcp_skb_cb { |
ab174ad8 PA |
38 | u64 map_seq; |
39 | u64 end_seq; | |
6771bfd9 FW |
40 | u32 offset; |
41 | }; | |
42 | ||
43 | #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0])) | |
44 | ||
d027236c PA |
45 | static struct percpu_counter mptcp_sockets_allocated; |
46 | ||
e16163b6 | 47 | static void __mptcp_destroy_sock(struct sock *sk); |
d9ca1de8 | 48 | static void __mptcp_check_send_data_fin(struct sock *sk); |
e16163b6 | 49 | |
2303f994 PK |
50 | /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not |
51 | * completed yet or has failed, return the subflow socket. | |
52 | * Otherwise return NULL. | |
53 | */ | |
54 | static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) | |
55 | { | |
d22f4988 | 56 | if (!msk->subflow || READ_ONCE(msk->can_ack)) |
2303f994 PK |
57 | return NULL; |
58 | ||
59 | return msk->subflow; | |
60 | } | |
61 | ||
6f8a612a FW |
62 | /* Returns end sequence number of the receiver's advertised window */ |
63 | static u64 mptcp_wnd_end(const struct mptcp_sock *msk) | |
64 | { | |
7439d687 | 65 | return READ_ONCE(msk->wnd_end); |
6f8a612a FW |
66 | } |
67 | ||
d2f77c53 | 68 | static bool mptcp_is_tcpsk(struct sock *sk) |
0b4f33de FW |
69 | { |
70 | struct socket *sock = sk->sk_socket; | |
71 | ||
0b4f33de FW |
72 | if (unlikely(sk->sk_prot == &tcp_prot)) { |
73 | /* we are being invoked after mptcp_accept() has | |
74 | * accepted a non-mp-capable flow: sk is a tcp_sk, | |
75 | * not an mptcp one. | |
76 | * | |
77 | * Hand the socket over to tcp so all further socket ops | |
78 | * bypass mptcp. | |
79 | */ | |
80 | sock->ops = &inet_stream_ops; | |
d2f77c53 | 81 | return true; |
0b4f33de FW |
82 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
83 | } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { | |
84 | sock->ops = &inet6_stream_ops; | |
d2f77c53 | 85 | return true; |
0b4f33de FW |
86 | #endif |
87 | } | |
88 | ||
d2f77c53 | 89 | return false; |
0b4f33de FW |
90 | } |
91 | ||
76660afb | 92 | static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) |
cec37a6e | 93 | { |
cec37a6e PK |
94 | sock_owned_by_me((const struct sock *)msk); |
95 | ||
e1ff9e82 | 96 | if (likely(!__mptcp_check_fallback(msk))) |
cec37a6e PK |
97 | return NULL; |
98 | ||
76660afb | 99 | return msk->first; |
cec37a6e PK |
100 | } |
101 | ||
fa68018d | 102 | static int __mptcp_socket_create(struct mptcp_sock *msk) |
2303f994 PK |
103 | { |
104 | struct mptcp_subflow_context *subflow; | |
105 | struct sock *sk = (struct sock *)msk; | |
106 | struct socket *ssock; | |
107 | int err; | |
108 | ||
2303f994 PK |
109 | err = mptcp_subflow_create_socket(sk, &ssock); |
110 | if (err) | |
fa68018d | 111 | return err; |
2303f994 | 112 | |
8ab183de | 113 | msk->first = ssock->sk; |
2303f994 PK |
114 | msk->subflow = ssock; |
115 | subflow = mptcp_subflow_ctx(ssock->sk); | |
cec37a6e | 116 | list_add(&subflow->node, &msk->conn_list); |
e16163b6 | 117 | sock_hold(ssock->sk); |
2303f994 | 118 | subflow->request_mptcp = 1; |
133f0169 | 119 | mptcp_sock_graft(msk->first, sk->sk_socket); |
e1ff9e82 | 120 | |
fa68018d | 121 | return 0; |
2303f994 PK |
122 | } |
123 | ||
ab174ad8 PA |
124 | static void mptcp_drop(struct sock *sk, struct sk_buff *skb) |
125 | { | |
126 | sk_drops_add(sk, skb); | |
127 | __kfree_skb(skb); | |
128 | } | |
129 | ||
8268ed4c PA |
130 | static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, |
131 | struct sk_buff *from) | |
132 | { | |
133 | bool fragstolen; | |
134 | int delta; | |
135 | ||
136 | if (MPTCP_SKB_CB(from)->offset || | |
137 | !skb_try_coalesce(to, from, &fragstolen, &delta)) | |
138 | return false; | |
139 | ||
06242e44 PA |
140 | pr_debug("colesced seq %llx into %llx new len %d new end seq %llx", |
141 | MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, | |
142 | to->len, MPTCP_SKB_CB(from)->end_seq); | |
ab174ad8 | 143 | MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; |
8268ed4c PA |
144 | kfree_skb_partial(from, fragstolen); |
145 | atomic_add(delta, &sk->sk_rmem_alloc); | |
146 | sk_mem_charge(sk, delta); | |
147 | return true; | |
148 | } | |
149 | ||
ab174ad8 PA |
150 | static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, |
151 | struct sk_buff *from) | |
152 | { | |
153 | if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) | |
154 | return false; | |
155 | ||
156 | return mptcp_try_coalesce((struct sock *)msk, to, from); | |
157 | } | |
158 | ||
159 | /* "inspired" by tcp_data_queue_ofo(), main differences: | |
160 | * - use mptcp seqs | |
161 | * - don't cope with sacks | |
162 | */ | |
163 | static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) | |
164 | { | |
165 | struct sock *sk = (struct sock *)msk; | |
166 | struct rb_node **p, *parent; | |
167 | u64 seq, end_seq, max_seq; | |
168 | struct sk_buff *skb1; | |
169 | ||
170 | seq = MPTCP_SKB_CB(skb)->map_seq; | |
171 | end_seq = MPTCP_SKB_CB(skb)->end_seq; | |
fa3fe2b1 | 172 | max_seq = READ_ONCE(msk->rcv_wnd_sent); |
ab174ad8 | 173 | |
06242e44 PA |
174 | pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq, |
175 | RB_EMPTY_ROOT(&msk->out_of_order_queue)); | |
fa3fe2b1 | 176 | if (after64(end_seq, max_seq)) { |
ab174ad8 PA |
177 | /* out of window */ |
178 | mptcp_drop(sk, skb); | |
fa3fe2b1 FW |
179 | pr_debug("oow by %lld, rcv_wnd_sent %llu\n", |
180 | (unsigned long long)end_seq - (unsigned long)max_seq, | |
181 | (unsigned long long)msk->rcv_wnd_sent); | |
06242e44 | 182 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); |
ab174ad8 PA |
183 | return; |
184 | } | |
185 | ||
186 | p = &msk->out_of_order_queue.rb_node; | |
06242e44 | 187 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); |
ab174ad8 PA |
188 | if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { |
189 | rb_link_node(&skb->rbnode, NULL, p); | |
190 | rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); | |
191 | msk->ooo_last_skb = skb; | |
192 | goto end; | |
193 | } | |
194 | ||
195 | /* with 2 subflows, adding at end of ooo queue is quite likely | |
196 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. | |
197 | */ | |
06242e44 PA |
198 | if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { |
199 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); | |
200 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); | |
ab174ad8 | 201 | return; |
06242e44 | 202 | } |
ab174ad8 PA |
203 | |
204 | /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ | |
205 | if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { | |
06242e44 | 206 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); |
ab174ad8 PA |
207 | parent = &msk->ooo_last_skb->rbnode; |
208 | p = &parent->rb_right; | |
209 | goto insert; | |
210 | } | |
211 | ||
212 | /* Find place to insert this segment. Handle overlaps on the way. */ | |
213 | parent = NULL; | |
214 | while (*p) { | |
215 | parent = *p; | |
216 | skb1 = rb_to_skb(parent); | |
217 | if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { | |
218 | p = &parent->rb_left; | |
219 | continue; | |
220 | } | |
221 | if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { | |
222 | if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { | |
223 | /* All the bits are present. Drop. */ | |
224 | mptcp_drop(sk, skb); | |
06242e44 | 225 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
226 | return; |
227 | } | |
228 | if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { | |
229 | /* partial overlap: | |
230 | * | skb | | |
231 | * | skb1 | | |
232 | * continue traversing | |
233 | */ | |
234 | } else { | |
235 | /* skb's seq == skb1's seq and skb covers skb1. | |
236 | * Replace skb1 with skb. | |
237 | */ | |
238 | rb_replace_node(&skb1->rbnode, &skb->rbnode, | |
239 | &msk->out_of_order_queue); | |
240 | mptcp_drop(sk, skb1); | |
06242e44 | 241 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
242 | goto merge_right; |
243 | } | |
244 | } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { | |
06242e44 | 245 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); |
ab174ad8 PA |
246 | return; |
247 | } | |
248 | p = &parent->rb_right; | |
249 | } | |
06242e44 | 250 | |
ab174ad8 PA |
251 | insert: |
252 | /* Insert segment into RB tree. */ | |
253 | rb_link_node(&skb->rbnode, parent, p); | |
254 | rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); | |
255 | ||
256 | merge_right: | |
257 | /* Remove other segments covered by skb. */ | |
258 | while ((skb1 = skb_rb_next(skb)) != NULL) { | |
259 | if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) | |
260 | break; | |
261 | rb_erase(&skb1->rbnode, &msk->out_of_order_queue); | |
262 | mptcp_drop(sk, skb1); | |
06242e44 | 263 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
264 | } |
265 | /* If there is no skb after us, we are the last_skb ! */ | |
266 | if (!skb1) | |
267 | msk->ooo_last_skb = skb; | |
268 | ||
269 | end: | |
270 | skb_condense(skb); | |
271 | skb_set_owner_r(skb, sk); | |
272 | } | |
273 | ||
274 | static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, | |
275 | struct sk_buff *skb, unsigned int offset, | |
276 | size_t copy_len) | |
6771bfd9 | 277 | { |
ab174ad8 | 278 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
6771bfd9 | 279 | struct sock *sk = (struct sock *)msk; |
4e637c70 | 280 | struct sk_buff *tail; |
6771bfd9 FW |
281 | |
282 | __skb_unlink(skb, &ssk->sk_receive_queue); | |
6771bfd9 | 283 | |
4e637c70 FW |
284 | skb_ext_reset(skb); |
285 | skb_orphan(skb); | |
ab174ad8 | 286 | |
9c3f94e1 PA |
287 | /* try to fetch required memory from subflow */ |
288 | if (!sk_rmem_schedule(sk, skb, skb->truesize)) { | |
289 | if (ssk->sk_forward_alloc < skb->truesize) | |
290 | goto drop; | |
291 | __sk_mem_reclaim(ssk, skb->truesize); | |
292 | if (!sk_rmem_schedule(sk, skb, skb->truesize)) | |
293 | goto drop; | |
294 | } | |
295 | ||
ab174ad8 PA |
296 | /* the skb map_seq accounts for the skb offset: |
297 | * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq | |
298 | * value | |
299 | */ | |
300 | MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); | |
301 | MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; | |
8268ed4c | 302 | MPTCP_SKB_CB(skb)->offset = offset; |
4e637c70 | 303 | |
ab174ad8 PA |
304 | if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { |
305 | /* in sequence */ | |
8b0308fe | 306 | WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); |
ab174ad8 PA |
307 | tail = skb_peek_tail(&sk->sk_receive_queue); |
308 | if (tail && mptcp_try_coalesce(sk, tail, skb)) | |
309 | return true; | |
4e637c70 | 310 | |
ab174ad8 PA |
311 | skb_set_owner_r(skb, sk); |
312 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
313 | return true; | |
314 | } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { | |
315 | mptcp_data_queue_ofo(msk, skb); | |
316 | return false; | |
317 | } | |
318 | ||
319 | /* old data, keep it simple and drop the whole pkt, sender | |
320 | * will retransmit as needed, if needed. | |
321 | */ | |
06242e44 | 322 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
9c3f94e1 | 323 | drop: |
ab174ad8 PA |
324 | mptcp_drop(sk, skb); |
325 | return false; | |
6771bfd9 FW |
326 | } |
327 | ||
16a9a9da MM |
328 | static void mptcp_stop_timer(struct sock *sk) |
329 | { | |
330 | struct inet_connection_sock *icsk = inet_csk(sk); | |
331 | ||
332 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
333 | mptcp_sk(sk)->timer_ival = 0; | |
334 | } | |
335 | ||
e16163b6 PA |
336 | static void mptcp_close_wake_up(struct sock *sk) |
337 | { | |
338 | if (sock_flag(sk, SOCK_DEAD)) | |
339 | return; | |
340 | ||
341 | sk->sk_state_change(sk); | |
342 | if (sk->sk_shutdown == SHUTDOWN_MASK || | |
343 | sk->sk_state == TCP_CLOSE) | |
344 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | |
345 | else | |
346 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
347 | } | |
348 | ||
6e628cd3 | 349 | static bool mptcp_pending_data_fin_ack(struct sock *sk) |
16a9a9da MM |
350 | { |
351 | struct mptcp_sock *msk = mptcp_sk(sk); | |
352 | ||
6e628cd3 PA |
353 | return !__mptcp_check_fallback(msk) && |
354 | ((1 << sk->sk_state) & | |
355 | (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && | |
356 | msk->write_seq == READ_ONCE(msk->snd_una); | |
357 | } | |
358 | ||
359 | static void mptcp_check_data_fin_ack(struct sock *sk) | |
360 | { | |
361 | struct mptcp_sock *msk = mptcp_sk(sk); | |
16a9a9da MM |
362 | |
363 | /* Look for an acknowledged DATA_FIN */ | |
6e628cd3 | 364 | if (mptcp_pending_data_fin_ack(sk)) { |
16a9a9da MM |
365 | WRITE_ONCE(msk->snd_data_fin_enable, 0); |
366 | ||
367 | switch (sk->sk_state) { | |
368 | case TCP_FIN_WAIT1: | |
369 | inet_sk_state_store(sk, TCP_FIN_WAIT2); | |
16a9a9da MM |
370 | break; |
371 | case TCP_CLOSING: | |
16a9a9da MM |
372 | case TCP_LAST_ACK: |
373 | inet_sk_state_store(sk, TCP_CLOSE); | |
16a9a9da MM |
374 | break; |
375 | } | |
376 | ||
e16163b6 | 377 | mptcp_close_wake_up(sk); |
16a9a9da MM |
378 | } |
379 | } | |
380 | ||
3721b9b6 MM |
381 | static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) |
382 | { | |
383 | struct mptcp_sock *msk = mptcp_sk(sk); | |
384 | ||
385 | if (READ_ONCE(msk->rcv_data_fin) && | |
386 | ((1 << sk->sk_state) & | |
387 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { | |
388 | u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); | |
389 | ||
390 | if (msk->ack_seq == rcv_data_fin_seq) { | |
391 | if (seq) | |
392 | *seq = rcv_data_fin_seq; | |
393 | ||
394 | return true; | |
395 | } | |
396 | } | |
397 | ||
398 | return false; | |
399 | } | |
400 | ||
401 | static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk) | |
402 | { | |
403 | long tout = ssk && inet_csk(ssk)->icsk_pending ? | |
404 | inet_csk(ssk)->icsk_timeout - jiffies : 0; | |
405 | ||
406 | if (tout <= 0) | |
407 | tout = mptcp_sk(sk)->timer_ival; | |
408 | mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; | |
409 | } | |
410 | ||
ea4ca586 PA |
411 | static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) |
412 | { | |
413 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
414 | ||
415 | /* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */ | |
416 | if (subflow->request_join && !subflow->fully_established) | |
417 | return false; | |
418 | ||
419 | /* only send if our side has not closed yet */ | |
420 | return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)); | |
421 | } | |
422 | ||
fd897679 PA |
423 | static bool tcp_can_send_ack(const struct sock *ssk) |
424 | { | |
425 | return !((1 << inet_sk_state_load(ssk)) & | |
20bc80b6 | 426 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN)); |
fd897679 PA |
427 | } |
428 | ||
429 | static void mptcp_send_ack(struct mptcp_sock *msk) | |
7ed90803 PA |
430 | { |
431 | struct mptcp_subflow_context *subflow; | |
432 | ||
433 | mptcp_for_each_subflow(msk, subflow) { | |
434 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
435 | ||
fd897679 PA |
436 | lock_sock(ssk); |
437 | if (tcp_can_send_ack(ssk)) | |
ea4ca586 | 438 | tcp_send_ack(ssk); |
fd897679 | 439 | release_sock(ssk); |
ea4ca586 | 440 | } |
fd897679 PA |
441 | } |
442 | ||
443 | static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk) | |
444 | { | |
445 | int ret; | |
446 | ||
447 | lock_sock(ssk); | |
448 | ret = tcp_can_send_ack(ssk); | |
449 | if (ret) | |
450 | tcp_cleanup_rbuf(ssk, 1); | |
451 | release_sock(ssk); | |
452 | return ret; | |
453 | } | |
454 | ||
455 | static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) | |
456 | { | |
87952603 | 457 | struct sock *ack_hint = READ_ONCE(msk->ack_hint); |
fd897679 PA |
458 | struct mptcp_subflow_context *subflow; |
459 | ||
460 | /* if the hinted ssk is still active, try to use it */ | |
87952603 | 461 | if (likely(ack_hint)) { |
fd897679 PA |
462 | mptcp_for_each_subflow(msk, subflow) { |
463 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
464 | ||
87952603 | 465 | if (ack_hint == ssk && mptcp_subflow_cleanup_rbuf(ssk)) |
fd897679 PA |
466 | return; |
467 | } | |
7ed90803 | 468 | } |
fd897679 PA |
469 | |
470 | /* otherwise pick the first active subflow */ | |
471 | mptcp_for_each_subflow(msk, subflow) | |
472 | if (mptcp_subflow_cleanup_rbuf(mptcp_subflow_tcp_sock(subflow))) | |
473 | return; | |
7ed90803 PA |
474 | } |
475 | ||
476 | static bool mptcp_check_data_fin(struct sock *sk) | |
3721b9b6 MM |
477 | { |
478 | struct mptcp_sock *msk = mptcp_sk(sk); | |
479 | u64 rcv_data_fin_seq; | |
7ed90803 | 480 | bool ret = false; |
3721b9b6 MM |
481 | |
482 | if (__mptcp_check_fallback(msk) || !msk->first) | |
7ed90803 | 483 | return ret; |
3721b9b6 MM |
484 | |
485 | /* Need to ack a DATA_FIN received from a peer while this side | |
486 | * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. | |
487 | * msk->rcv_data_fin was set when parsing the incoming options | |
488 | * at the subflow level and the msk lock was not held, so this | |
489 | * is the first opportunity to act on the DATA_FIN and change | |
490 | * the msk state. | |
491 | * | |
492 | * If we are caught up to the sequence number of the incoming | |
493 | * DATA_FIN, send the DATA_ACK now and do state transition. If | |
494 | * not caught up, do nothing and let the recv code send DATA_ACK | |
495 | * when catching up. | |
496 | */ | |
497 | ||
498 | if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { | |
917944da | 499 | WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); |
3721b9b6 MM |
500 | WRITE_ONCE(msk->rcv_data_fin, 0); |
501 | ||
502 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
16a9a9da MM |
503 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ |
504 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
3721b9b6 MM |
505 | |
506 | switch (sk->sk_state) { | |
507 | case TCP_ESTABLISHED: | |
508 | inet_sk_state_store(sk, TCP_CLOSE_WAIT); | |
509 | break; | |
510 | case TCP_FIN_WAIT1: | |
511 | inet_sk_state_store(sk, TCP_CLOSING); | |
512 | break; | |
513 | case TCP_FIN_WAIT2: | |
514 | inet_sk_state_store(sk, TCP_CLOSE); | |
3721b9b6 MM |
515 | break; |
516 | default: | |
517 | /* Other states not expected */ | |
518 | WARN_ON_ONCE(1); | |
519 | break; | |
520 | } | |
521 | ||
7ed90803 | 522 | ret = true; |
3721b9b6 | 523 | mptcp_set_timeout(sk, NULL); |
fd897679 | 524 | mptcp_send_ack(msk); |
e16163b6 | 525 | mptcp_close_wake_up(sk); |
3721b9b6 | 526 | } |
7ed90803 | 527 | return ret; |
3721b9b6 MM |
528 | } |
529 | ||
6771bfd9 FW |
530 | static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, |
531 | struct sock *ssk, | |
532 | unsigned int *bytes) | |
533 | { | |
534 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
600911ff | 535 | struct sock *sk = (struct sock *)msk; |
6771bfd9 FW |
536 | unsigned int moved = 0; |
537 | bool more_data_avail; | |
538 | struct tcp_sock *tp; | |
539 | bool done = false; | |
13c7ba0c FW |
540 | int sk_rbuf; |
541 | ||
542 | sk_rbuf = READ_ONCE(sk->sk_rcvbuf); | |
543 | ||
544 | if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | |
545 | int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); | |
546 | ||
547 | if (unlikely(ssk_rbuf > sk_rbuf)) { | |
548 | WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); | |
549 | sk_rbuf = ssk_rbuf; | |
550 | } | |
551 | } | |
600911ff | 552 | |
ab174ad8 | 553 | pr_debug("msk=%p ssk=%p", msk, ssk); |
6771bfd9 FW |
554 | tp = tcp_sk(ssk); |
555 | do { | |
556 | u32 map_remaining, offset; | |
557 | u32 seq = tp->copied_seq; | |
558 | struct sk_buff *skb; | |
559 | bool fin; | |
560 | ||
561 | /* try to move as much data as available */ | |
562 | map_remaining = subflow->map_data_len - | |
563 | mptcp_subflow_get_map_offset(subflow); | |
564 | ||
565 | skb = skb_peek(&ssk->sk_receive_queue); | |
d9fb8c50 PA |
566 | if (!skb) { |
567 | /* if no data is found, a racing workqueue/recvmsg | |
568 | * already processed the new data, stop here or we | |
569 | * can enter an infinite loop | |
570 | */ | |
571 | if (!moved) | |
572 | done = true; | |
6771bfd9 | 573 | break; |
d9fb8c50 | 574 | } |
6771bfd9 | 575 | |
e1ff9e82 DC |
576 | if (__mptcp_check_fallback(msk)) { |
577 | /* if we are running under the workqueue, TCP could have | |
578 | * collapsed skbs between dummy map creation and now | |
579 | * be sure to adjust the size | |
580 | */ | |
581 | map_remaining = skb->len; | |
582 | subflow->map_data_len = skb->len; | |
583 | } | |
584 | ||
6771bfd9 FW |
585 | offset = seq - TCP_SKB_CB(skb)->seq; |
586 | fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; | |
587 | if (fin) { | |
588 | done = true; | |
589 | seq++; | |
590 | } | |
591 | ||
592 | if (offset < skb->len) { | |
593 | size_t len = skb->len - offset; | |
594 | ||
595 | if (tp->urg_data) | |
596 | done = true; | |
597 | ||
ab174ad8 PA |
598 | if (__mptcp_move_skb(msk, ssk, skb, offset, len)) |
599 | moved += len; | |
6771bfd9 | 600 | seq += len; |
6771bfd9 FW |
601 | |
602 | if (WARN_ON_ONCE(map_remaining < len)) | |
603 | break; | |
604 | } else { | |
605 | WARN_ON_ONCE(!fin); | |
606 | sk_eat_skb(ssk, skb); | |
607 | done = true; | |
608 | } | |
609 | ||
610 | WRITE_ONCE(tp->copied_seq, seq); | |
611 | more_data_avail = mptcp_subflow_data_available(ssk); | |
600911ff | 612 | |
13c7ba0c | 613 | if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) { |
600911ff FW |
614 | done = true; |
615 | break; | |
616 | } | |
6771bfd9 | 617 | } while (more_data_avail); |
87952603 | 618 | WRITE_ONCE(msk->ack_hint, ssk); |
6771bfd9 | 619 | |
6719331c | 620 | *bytes += moved; |
6771bfd9 FW |
621 | return done; |
622 | } | |
623 | ||
87952603 | 624 | static bool __mptcp_ofo_queue(struct mptcp_sock *msk) |
ab174ad8 PA |
625 | { |
626 | struct sock *sk = (struct sock *)msk; | |
627 | struct sk_buff *skb, *tail; | |
628 | bool moved = false; | |
629 | struct rb_node *p; | |
630 | u64 end_seq; | |
631 | ||
632 | p = rb_first(&msk->out_of_order_queue); | |
06242e44 | 633 | pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); |
ab174ad8 PA |
634 | while (p) { |
635 | skb = rb_to_skb(p); | |
636 | if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) | |
637 | break; | |
638 | ||
639 | p = rb_next(p); | |
640 | rb_erase(&skb->rbnode, &msk->out_of_order_queue); | |
641 | ||
642 | if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, | |
643 | msk->ack_seq))) { | |
644 | mptcp_drop(sk, skb); | |
06242e44 | 645 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); |
ab174ad8 PA |
646 | continue; |
647 | } | |
648 | ||
649 | end_seq = MPTCP_SKB_CB(skb)->end_seq; | |
650 | tail = skb_peek_tail(&sk->sk_receive_queue); | |
651 | if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { | |
652 | int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; | |
653 | ||
654 | /* skip overlapping data, if any */ | |
06242e44 PA |
655 | pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d", |
656 | MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, | |
657 | delta); | |
ab174ad8 PA |
658 | MPTCP_SKB_CB(skb)->offset += delta; |
659 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
660 | } | |
661 | msk->ack_seq = end_seq; | |
662 | moved = true; | |
663 | } | |
664 | return moved; | |
665 | } | |
666 | ||
2e52213c FW |
667 | /* In most cases we will be able to lock the mptcp socket. If its already |
668 | * owned, we need to defer to the work queue to avoid ABBA deadlock. | |
669 | */ | |
87952603 | 670 | static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) |
2e52213c FW |
671 | { |
672 | struct sock *sk = (struct sock *)msk; | |
673 | unsigned int moved = 0; | |
674 | ||
87952603 PA |
675 | if (inet_sk_state_load(sk) == TCP_CLOSE) |
676 | return; | |
ab174ad8 | 677 | |
87952603 | 678 | mptcp_data_lock(sk); |
2e52213c | 679 | |
87952603 PA |
680 | __mptcp_move_skbs_from_subflow(msk, ssk, &moved); |
681 | __mptcp_ofo_queue(msk); | |
2e52213c | 682 | |
87952603 PA |
683 | /* If the moves have caught up with the DATA_FIN sequence number |
684 | * it's time to ack the DATA_FIN and change socket state, but | |
685 | * this is not a good place to change state. Let the workqueue | |
686 | * do it. | |
687 | */ | |
688 | if (mptcp_pending_data_fin(sk, NULL)) | |
689 | mptcp_schedule_work(sk); | |
690 | mptcp_data_unlock(sk); | |
2e52213c FW |
691 | } |
692 | ||
693 | void mptcp_data_ready(struct sock *sk, struct sock *ssk) | |
101f6f85 | 694 | { |
6719331c | 695 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
101f6f85 | 696 | struct mptcp_sock *msk = mptcp_sk(sk); |
13c7ba0c | 697 | int sk_rbuf, ssk_rbuf; |
6719331c | 698 | bool wake; |
101f6f85 | 699 | |
d7b1bfd0 PA |
700 | /* The peer can send data while we are shutting down this |
701 | * subflow at msk destruction time, but we must avoid enqueuing | |
702 | * more data to the msk receive queue | |
703 | */ | |
704 | if (unlikely(subflow->disposable)) | |
705 | return; | |
706 | ||
6719331c PA |
707 | /* move_skbs_to_msk below can legitly clear the data_avail flag, |
708 | * but we will need later to properly woke the reader, cache its | |
709 | * value | |
710 | */ | |
711 | wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL; | |
712 | if (wake) | |
713 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
6771bfd9 | 714 | |
13c7ba0c FW |
715 | ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); |
716 | sk_rbuf = READ_ONCE(sk->sk_rcvbuf); | |
717 | if (unlikely(ssk_rbuf > sk_rbuf)) | |
718 | sk_rbuf = ssk_rbuf; | |
719 | ||
720 | /* over limit? can't append more skbs to msk */ | |
721 | if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) | |
2e52213c FW |
722 | goto wake; |
723 | ||
ea4ca586 | 724 | move_skbs_to_msk(msk, ssk); |
600911ff | 725 | |
600911ff | 726 | wake: |
6719331c PA |
727 | if (wake) |
728 | sk->sk_data_ready(sk); | |
101f6f85 FW |
729 | } |
730 | ||
84dfe367 | 731 | void __mptcp_flush_join_list(struct mptcp_sock *msk) |
ec3edaa7 PK |
732 | { |
733 | if (likely(list_empty(&msk->join_list))) | |
734 | return; | |
735 | ||
736 | spin_lock_bh(&msk->join_list_lock); | |
737 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
738 | spin_unlock_bh(&msk->join_list_lock); | |
739 | } | |
740 | ||
b51f9b80 PA |
741 | static bool mptcp_timer_pending(struct sock *sk) |
742 | { | |
743 | return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); | |
744 | } | |
745 | ||
746 | static void mptcp_reset_timer(struct sock *sk) | |
747 | { | |
748 | struct inet_connection_sock *icsk = inet_csk(sk); | |
749 | unsigned long tout; | |
750 | ||
e16163b6 PA |
751 | /* prevent rescheduling on close */ |
752 | if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) | |
753 | return; | |
754 | ||
b51f9b80 PA |
755 | /* should never be called with mptcp level timer cleared */ |
756 | tout = READ_ONCE(mptcp_sk(sk)->timer_ival); | |
757 | if (WARN_ON_ONCE(!tout)) | |
758 | tout = TCP_RTO_MIN; | |
759 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); | |
760 | } | |
761 | ||
ba8f48f7 PA |
762 | bool mptcp_schedule_work(struct sock *sk) |
763 | { | |
764 | if (inet_sk_state_load(sk) != TCP_CLOSE && | |
765 | schedule_work(&mptcp_sk(sk)->work)) { | |
766 | /* each subflow already holds a reference to the sk, and the | |
767 | * workqueue is invoked by a subflow, so sk can't go away here. | |
768 | */ | |
769 | sock_hold(sk); | |
770 | return true; | |
771 | } | |
772 | return false; | |
773 | } | |
774 | ||
59832e24 FW |
775 | void mptcp_subflow_eof(struct sock *sk) |
776 | { | |
ba8f48f7 PA |
777 | if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags)) |
778 | mptcp_schedule_work(sk); | |
59832e24 FW |
779 | } |
780 | ||
5969856a PA |
781 | static void mptcp_check_for_eof(struct mptcp_sock *msk) |
782 | { | |
783 | struct mptcp_subflow_context *subflow; | |
784 | struct sock *sk = (struct sock *)msk; | |
785 | int receivers = 0; | |
786 | ||
787 | mptcp_for_each_subflow(msk, subflow) | |
788 | receivers += !subflow->rx_eof; | |
e16163b6 PA |
789 | if (receivers) |
790 | return; | |
5969856a | 791 | |
e16163b6 | 792 | if (!(sk->sk_shutdown & RCV_SHUTDOWN)) { |
5969856a PA |
793 | /* hopefully temporary hack: propagate shutdown status |
794 | * to msk, when all subflows agree on it | |
795 | */ | |
796 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
797 | ||
798 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ | |
799 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
800 | sk->sk_data_ready(sk); | |
801 | } | |
e16163b6 PA |
802 | |
803 | switch (sk->sk_state) { | |
804 | case TCP_ESTABLISHED: | |
805 | inet_sk_state_store(sk, TCP_CLOSE_WAIT); | |
806 | break; | |
807 | case TCP_FIN_WAIT1: | |
26aa2314 PA |
808 | inet_sk_state_store(sk, TCP_CLOSING); |
809 | break; | |
810 | case TCP_FIN_WAIT2: | |
e16163b6 PA |
811 | inet_sk_state_store(sk, TCP_CLOSE); |
812 | break; | |
813 | default: | |
814 | return; | |
815 | } | |
816 | mptcp_close_wake_up(sk); | |
5969856a PA |
817 | } |
818 | ||
7a6a6cbc PA |
819 | static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) |
820 | { | |
821 | struct mptcp_subflow_context *subflow; | |
822 | struct sock *sk = (struct sock *)msk; | |
823 | ||
824 | sock_owned_by_me(sk); | |
825 | ||
826 | mptcp_for_each_subflow(msk, subflow) { | |
827 | if (subflow->data_avail) | |
828 | return mptcp_subflow_tcp_sock(subflow); | |
829 | } | |
830 | ||
831 | return NULL; | |
832 | } | |
833 | ||
3f8e0aae PA |
834 | static bool mptcp_skb_can_collapse_to(u64 write_seq, |
835 | const struct sk_buff *skb, | |
836 | const struct mptcp_ext *mpext) | |
57040755 PA |
837 | { |
838 | if (!tcp_skb_can_collapse_to(skb)) | |
839 | return false; | |
840 | ||
5a369ca6 PA |
841 | /* can collapse only if MPTCP level sequence is in order and this |
842 | * mapping has not been xmitted yet | |
843 | */ | |
844 | return mpext && mpext->data_seq + mpext->data_len == write_seq && | |
845 | !mpext->frozen; | |
57040755 PA |
846 | } |
847 | ||
18b683bf PA |
848 | static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, |
849 | const struct page_frag *pfrag, | |
850 | const struct mptcp_data_frag *df) | |
851 | { | |
852 | return df && pfrag->page == df->page && | |
d9ca1de8 | 853 | pfrag->size - pfrag->offset > 0 && |
18b683bf PA |
854 | df->data_seq + df->data_len == msk->write_seq; |
855 | } | |
856 | ||
724cfd2e | 857 | static int mptcp_wmem_with_overhead(struct sock *sk, int size) |
e93da928 | 858 | { |
724cfd2e PA |
859 | struct mptcp_sock *msk = mptcp_sk(sk); |
860 | int ret, skbs; | |
861 | ||
862 | ret = size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT); | |
863 | skbs = (msk->tx_pending_data + size) / msk->size_goal_cache; | |
864 | if (skbs < msk->skb_tx_cache.qlen) | |
865 | return ret; | |
866 | ||
867 | return ret + (skbs - msk->skb_tx_cache.qlen) * SKB_TRUESIZE(MAX_TCP_HEADER); | |
e93da928 PA |
868 | } |
869 | ||
870 | static void __mptcp_wmem_reserve(struct sock *sk, int size) | |
871 | { | |
724cfd2e | 872 | int amount = mptcp_wmem_with_overhead(sk, size); |
e93da928 PA |
873 | struct mptcp_sock *msk = mptcp_sk(sk); |
874 | ||
875 | WARN_ON_ONCE(msk->wmem_reserved); | |
e7579d5d DC |
876 | if (WARN_ON_ONCE(amount < 0)) |
877 | amount = 0; | |
878 | ||
e93da928 PA |
879 | if (amount <= sk->sk_forward_alloc) |
880 | goto reserve; | |
881 | ||
882 | /* under memory pressure try to reserve at most a single page | |
883 | * otherwise try to reserve the full estimate and fallback | |
884 | * to a single page before entering the error path | |
885 | */ | |
886 | if ((tcp_under_memory_pressure(sk) && amount > PAGE_SIZE) || | |
887 | !sk_wmem_schedule(sk, amount)) { | |
888 | if (amount <= PAGE_SIZE) | |
889 | goto nomem; | |
890 | ||
891 | amount = PAGE_SIZE; | |
892 | if (!sk_wmem_schedule(sk, amount)) | |
893 | goto nomem; | |
894 | } | |
895 | ||
896 | reserve: | |
897 | msk->wmem_reserved = amount; | |
898 | sk->sk_forward_alloc -= amount; | |
899 | return; | |
900 | ||
901 | nomem: | |
902 | /* we will wait for memory on next allocation */ | |
903 | msk->wmem_reserved = -1; | |
904 | } | |
905 | ||
906 | static void __mptcp_update_wmem(struct sock *sk) | |
907 | { | |
908 | struct mptcp_sock *msk = mptcp_sk(sk); | |
909 | ||
910 | if (!msk->wmem_reserved) | |
911 | return; | |
912 | ||
913 | if (msk->wmem_reserved < 0) | |
914 | msk->wmem_reserved = 0; | |
915 | if (msk->wmem_reserved > 0) { | |
916 | sk->sk_forward_alloc += msk->wmem_reserved; | |
917 | msk->wmem_reserved = 0; | |
918 | } | |
919 | } | |
920 | ||
921 | static bool mptcp_wmem_alloc(struct sock *sk, int size) | |
922 | { | |
923 | struct mptcp_sock *msk = mptcp_sk(sk); | |
924 | ||
925 | /* check for pre-existing error condition */ | |
926 | if (msk->wmem_reserved < 0) | |
927 | return false; | |
928 | ||
929 | if (msk->wmem_reserved >= size) | |
930 | goto account; | |
931 | ||
87952603 PA |
932 | mptcp_data_lock(sk); |
933 | if (!sk_wmem_schedule(sk, size)) { | |
934 | mptcp_data_unlock(sk); | |
e93da928 | 935 | return false; |
87952603 | 936 | } |
e93da928 PA |
937 | |
938 | sk->sk_forward_alloc -= size; | |
939 | msk->wmem_reserved += size; | |
87952603 | 940 | mptcp_data_unlock(sk); |
e93da928 PA |
941 | |
942 | account: | |
943 | msk->wmem_reserved -= size; | |
944 | return true; | |
945 | } | |
946 | ||
87952603 PA |
947 | static void mptcp_wmem_uncharge(struct sock *sk, int size) |
948 | { | |
949 | struct mptcp_sock *msk = mptcp_sk(sk); | |
950 | ||
951 | if (msk->wmem_reserved < 0) | |
952 | msk->wmem_reserved = 0; | |
953 | msk->wmem_reserved += size; | |
954 | } | |
955 | ||
724cfd2e PA |
956 | static void mptcp_mem_reclaim_partial(struct sock *sk) |
957 | { | |
958 | struct mptcp_sock *msk = mptcp_sk(sk); | |
959 | ||
960 | /* if we are experiencing a transint allocation error, | |
961 | * the forward allocation memory has been already | |
962 | * released | |
963 | */ | |
964 | if (msk->wmem_reserved < 0) | |
965 | return; | |
966 | ||
967 | mptcp_data_lock(sk); | |
968 | sk->sk_forward_alloc += msk->wmem_reserved; | |
969 | sk_mem_reclaim_partial(sk); | |
970 | msk->wmem_reserved = sk->sk_forward_alloc; | |
971 | sk->sk_forward_alloc = 0; | |
972 | mptcp_data_unlock(sk); | |
973 | } | |
974 | ||
d027236c PA |
975 | static void dfrag_uncharge(struct sock *sk, int len) |
976 | { | |
977 | sk_mem_uncharge(sk, len); | |
7948f6cc | 978 | sk_wmem_queued_add(sk, -len); |
d027236c PA |
979 | } |
980 | ||
981 | static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) | |
18b683bf | 982 | { |
d027236c PA |
983 | int len = dfrag->data_len + dfrag->overhead; |
984 | ||
18b683bf | 985 | list_del(&dfrag->list); |
d027236c | 986 | dfrag_uncharge(sk, len); |
18b683bf PA |
987 | put_page(dfrag->page); |
988 | } | |
989 | ||
6e628cd3 | 990 | static void __mptcp_clean_una(struct sock *sk) |
18b683bf PA |
991 | { |
992 | struct mptcp_sock *msk = mptcp_sk(sk); | |
993 | struct mptcp_data_frag *dtmp, *dfrag; | |
d027236c | 994 | bool cleaned = false; |
e1ff9e82 DC |
995 | u64 snd_una; |
996 | ||
997 | /* on fallback we just need to ignore snd_una, as this is really | |
998 | * plain TCP | |
999 | */ | |
1000 | if (__mptcp_check_fallback(msk)) | |
7439d687 | 1001 | msk->snd_una = READ_ONCE(msk->snd_nxt); |
6f8a612a | 1002 | |
7439d687 | 1003 | snd_una = msk->snd_una; |
18b683bf PA |
1004 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { |
1005 | if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) | |
1006 | break; | |
1007 | ||
d9ca1de8 PA |
1008 | if (WARN_ON_ONCE(dfrag == msk->first_pending)) |
1009 | break; | |
d027236c PA |
1010 | dfrag_clear(sk, dfrag); |
1011 | cleaned = true; | |
1012 | } | |
1013 | ||
7948f6cc FW |
1014 | dfrag = mptcp_rtx_head(sk); |
1015 | if (dfrag && after64(snd_una, dfrag->data_seq)) { | |
53eb4c38 PA |
1016 | u64 delta = snd_una - dfrag->data_seq; |
1017 | ||
d9ca1de8 | 1018 | if (WARN_ON_ONCE(delta > dfrag->already_sent)) |
53eb4c38 | 1019 | goto out; |
7948f6cc FW |
1020 | |
1021 | dfrag->data_seq += delta; | |
53eb4c38 | 1022 | dfrag->offset += delta; |
7948f6cc | 1023 | dfrag->data_len -= delta; |
d9ca1de8 | 1024 | dfrag->already_sent -= delta; |
7948f6cc FW |
1025 | |
1026 | dfrag_uncharge(sk, delta); | |
1027 | cleaned = true; | |
1028 | } | |
1029 | ||
53eb4c38 | 1030 | out: |
6e628cd3 PA |
1031 | if (cleaned) { |
1032 | if (tcp_under_memory_pressure(sk)) { | |
1033 | __mptcp_update_wmem(sk); | |
1034 | sk_mem_reclaim_partial(sk); | |
1035 | } | |
63561a40 | 1036 | |
6e628cd3 PA |
1037 | if (sk_stream_is_writeable(sk)) { |
1038 | /* pairs with memory barrier in mptcp_poll */ | |
1039 | smp_mb(); | |
1040 | if (test_and_clear_bit(MPTCP_NOSPACE, &msk->flags)) | |
1041 | sk_stream_write_space(sk); | |
1042 | } | |
1043 | } | |
95ed690e | 1044 | |
6e628cd3 PA |
1045 | if (snd_una == READ_ONCE(msk->snd_nxt)) { |
1046 | if (msk->timer_ival) | |
1047 | mptcp_stop_timer(sk); | |
1048 | } else { | |
1049 | mptcp_reset_timer(sk); | |
18b683bf PA |
1050 | } |
1051 | } | |
1052 | ||
724cfd2e | 1053 | static void mptcp_enter_memory_pressure(struct sock *sk) |
18b683bf | 1054 | { |
d9ca1de8 PA |
1055 | struct mptcp_subflow_context *subflow; |
1056 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1057 | bool first = true; | |
1058 | ||
18b683bf | 1059 | sk_stream_moderate_sndbuf(sk); |
d9ca1de8 PA |
1060 | mptcp_for_each_subflow(msk, subflow) { |
1061 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1062 | ||
1063 | if (first) | |
1064 | tcp_enter_memory_pressure(ssk); | |
1065 | sk_stream_moderate_sndbuf(ssk); | |
1066 | first = false; | |
1067 | } | |
724cfd2e PA |
1068 | } |
1069 | ||
1070 | /* ensure we get enough memory for the frag hdr, beyond some minimal amount of | |
1071 | * data | |
1072 | */ | |
1073 | static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |
1074 | { | |
1075 | if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), | |
1076 | pfrag, sk->sk_allocation))) | |
1077 | return true; | |
1078 | ||
1079 | mptcp_enter_memory_pressure(sk); | |
18b683bf PA |
1080 | return false; |
1081 | } | |
1082 | ||
1083 | static struct mptcp_data_frag * | |
1084 | mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, | |
1085 | int orig_offset) | |
1086 | { | |
1087 | int offset = ALIGN(orig_offset, sizeof(long)); | |
1088 | struct mptcp_data_frag *dfrag; | |
1089 | ||
1090 | dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); | |
1091 | dfrag->data_len = 0; | |
1092 | dfrag->data_seq = msk->write_seq; | |
1093 | dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); | |
1094 | dfrag->offset = offset + sizeof(struct mptcp_data_frag); | |
d9ca1de8 | 1095 | dfrag->already_sent = 0; |
18b683bf PA |
1096 | dfrag->page = pfrag->page; |
1097 | ||
1098 | return dfrag; | |
1099 | } | |
1100 | ||
caf971df PA |
1101 | struct mptcp_sendmsg_info { |
1102 | int mss_now; | |
1103 | int size_goal; | |
d9ca1de8 PA |
1104 | u16 limit; |
1105 | u16 sent; | |
1106 | unsigned int flags; | |
caf971df PA |
1107 | }; |
1108 | ||
6f8a612a FW |
1109 | static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq, |
1110 | int avail_size) | |
1111 | { | |
1112 | u64 window_end = mptcp_wnd_end(msk); | |
1113 | ||
1114 | if (__mptcp_check_fallback(msk)) | |
1115 | return avail_size; | |
1116 | ||
1117 | if (!before64(data_seq + avail_size, window_end)) { | |
1118 | u64 allowed_size = window_end - data_seq; | |
1119 | ||
1120 | return min_t(unsigned int, allowed_size, avail_size); | |
1121 | } | |
1122 | ||
1123 | return avail_size; | |
1124 | } | |
1125 | ||
724cfd2e PA |
1126 | static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp) |
1127 | { | |
1128 | struct skb_ext *mpext = __skb_ext_alloc(gfp); | |
1129 | ||
1130 | if (!mpext) | |
1131 | return false; | |
1132 | __skb_ext_set(skb, SKB_EXT_MPTCP, mpext); | |
1133 | return true; | |
1134 | } | |
1135 | ||
6e628cd3 | 1136 | static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) |
724cfd2e PA |
1137 | { |
1138 | struct sk_buff *skb; | |
1139 | ||
6e628cd3 | 1140 | skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); |
724cfd2e | 1141 | if (likely(skb)) { |
6e628cd3 | 1142 | if (likely(__mptcp_add_ext(skb, gfp))) { |
724cfd2e PA |
1143 | skb_reserve(skb, MAX_TCP_HEADER); |
1144 | skb->reserved_tailroom = skb->end - skb->tail; | |
1145 | return skb; | |
1146 | } | |
1147 | __kfree_skb(skb); | |
1148 | } else { | |
1149 | mptcp_enter_memory_pressure(sk); | |
1150 | } | |
1151 | return NULL; | |
1152 | } | |
1153 | ||
1154 | static bool mptcp_tx_cache_refill(struct sock *sk, int size, | |
1155 | struct sk_buff_head *skbs, int *total_ts) | |
1156 | { | |
1157 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1158 | struct sk_buff *skb; | |
1159 | int space_needed; | |
1160 | ||
1161 | if (unlikely(tcp_under_memory_pressure(sk))) { | |
1162 | mptcp_mem_reclaim_partial(sk); | |
1163 | ||
1164 | /* under pressure pre-allocate at most a single skb */ | |
1165 | if (msk->skb_tx_cache.qlen) | |
1166 | return true; | |
1167 | space_needed = msk->size_goal_cache; | |
1168 | } else { | |
1169 | space_needed = msk->tx_pending_data + size - | |
1170 | msk->skb_tx_cache.qlen * msk->size_goal_cache; | |
1171 | } | |
1172 | ||
1173 | while (space_needed > 0) { | |
6e628cd3 | 1174 | skb = __mptcp_do_alloc_tx_skb(sk, sk->sk_allocation); |
724cfd2e PA |
1175 | if (unlikely(!skb)) { |
1176 | /* under memory pressure, try to pass the caller a | |
1177 | * single skb to allow forward progress | |
1178 | */ | |
1179 | while (skbs->qlen > 1) { | |
1180 | skb = __skb_dequeue_tail(skbs); | |
fecf66c4 | 1181 | *total_ts -= skb->truesize; |
724cfd2e PA |
1182 | __kfree_skb(skb); |
1183 | } | |
1184 | return skbs->qlen > 0; | |
1185 | } | |
1186 | ||
1187 | *total_ts += skb->truesize; | |
1188 | __skb_queue_tail(skbs, skb); | |
1189 | space_needed -= msk->size_goal_cache; | |
1190 | } | |
1191 | return true; | |
1192 | } | |
1193 | ||
6e628cd3 | 1194 | static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) |
724cfd2e PA |
1195 | { |
1196 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1197 | struct sk_buff *skb; | |
1198 | ||
1199 | if (ssk->sk_tx_skb_cache) { | |
1200 | skb = ssk->sk_tx_skb_cache; | |
1201 | if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) && | |
6e628cd3 | 1202 | !__mptcp_add_ext(skb, gfp))) |
724cfd2e PA |
1203 | return false; |
1204 | return true; | |
1205 | } | |
1206 | ||
1207 | skb = skb_peek(&msk->skb_tx_cache); | |
1208 | if (skb) { | |
1209 | if (likely(sk_wmem_schedule(ssk, skb->truesize))) { | |
1210 | skb = __skb_dequeue(&msk->skb_tx_cache); | |
1211 | if (WARN_ON_ONCE(!skb)) | |
1212 | return false; | |
1213 | ||
1214 | mptcp_wmem_uncharge(sk, skb->truesize); | |
1215 | ssk->sk_tx_skb_cache = skb; | |
1216 | return true; | |
1217 | } | |
1218 | ||
1219 | /* over memory limit, no point to try to allocate a new skb */ | |
1220 | return false; | |
1221 | } | |
1222 | ||
6e628cd3 | 1223 | skb = __mptcp_do_alloc_tx_skb(sk, gfp); |
724cfd2e PA |
1224 | if (!skb) |
1225 | return false; | |
1226 | ||
1227 | if (likely(sk_wmem_schedule(ssk, skb->truesize))) { | |
1228 | ssk->sk_tx_skb_cache = skb; | |
1229 | return true; | |
1230 | } | |
1231 | kfree_skb(skb); | |
1232 | return false; | |
1233 | } | |
1234 | ||
1235 | static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk) | |
1236 | { | |
1237 | return !ssk->sk_tx_skb_cache && | |
1238 | !skb_peek(&mptcp_sk(sk)->skb_tx_cache) && | |
1239 | tcp_under_memory_pressure(sk); | |
1240 | } | |
1241 | ||
1242 | static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk) | |
1243 | { | |
1244 | if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) | |
1245 | mptcp_mem_reclaim_partial(sk); | |
6e628cd3 | 1246 | return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation); |
724cfd2e PA |
1247 | } |
1248 | ||
6d0060f6 | 1249 | static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, |
d9ca1de8 | 1250 | struct mptcp_data_frag *dfrag, |
caf971df | 1251 | struct mptcp_sendmsg_info *info) |
6d0060f6 | 1252 | { |
d9ca1de8 | 1253 | u64 data_seq = dfrag->data_seq + info->sent; |
6d0060f6 | 1254 | struct mptcp_sock *msk = mptcp_sk(sk); |
6f8a612a | 1255 | bool zero_window_probe = false; |
6d0060f6 | 1256 | struct mptcp_ext *mpext = NULL; |
57040755 | 1257 | struct sk_buff *skb, *tail; |
d9ca1de8 | 1258 | bool can_collapse = false; |
15e6ca97 | 1259 | int size_bias = 0; |
d9ca1de8 | 1260 | int avail_size; |
724cfd2e | 1261 | size_t ret = 0; |
6d0060f6 | 1262 | |
d9ca1de8 PA |
1263 | pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d", |
1264 | msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); | |
1265 | ||
1266 | /* compute send limit */ | |
1267 | info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); | |
caf971df | 1268 | avail_size = info->size_goal; |
724cfd2e | 1269 | msk->size_goal_cache = info->size_goal; |
57040755 PA |
1270 | skb = tcp_write_queue_tail(ssk); |
1271 | if (skb) { | |
57040755 PA |
1272 | /* Limit the write to the size available in the |
1273 | * current skb, if any, so that we create at most a new skb. | |
1274 | * Explicitly tells TCP internals to avoid collapsing on later | |
1275 | * queue management operation, to avoid breaking the ext <-> | |
1276 | * SSN association set here | |
1277 | */ | |
d9ca1de8 | 1278 | mpext = skb_ext_find(skb, SKB_EXT_MPTCP); |
caf971df | 1279 | can_collapse = (info->size_goal - skb->len > 0) && |
d9ca1de8 | 1280 | mptcp_skb_can_collapse_to(data_seq, skb, mpext); |
15e6ca97 | 1281 | if (!can_collapse) { |
57040755 | 1282 | TCP_SKB_CB(skb)->eor = 1; |
15e6ca97 PA |
1283 | } else { |
1284 | size_bias = skb->len; | |
caf971df | 1285 | avail_size = info->size_goal - skb->len; |
15e6ca97 | 1286 | } |
57040755 | 1287 | } |
18b683bf | 1288 | |
6f8a612a FW |
1289 | /* Zero window and all data acked? Probe. */ |
1290 | avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size); | |
1291 | if (avail_size == 0) { | |
7439d687 PA |
1292 | u64 snd_una = READ_ONCE(msk->snd_una); |
1293 | ||
1294 | if (skb || snd_una != msk->snd_nxt) | |
6f8a612a FW |
1295 | return 0; |
1296 | zero_window_probe = true; | |
7439d687 | 1297 | data_seq = snd_una - 1; |
6f8a612a FW |
1298 | avail_size = 1; |
1299 | } | |
1300 | ||
d9ca1de8 PA |
1301 | if (WARN_ON_ONCE(info->sent > info->limit || |
1302 | info->limit > dfrag->data_len)) | |
1303 | return 0; | |
d027236c | 1304 | |
d9ca1de8 | 1305 | ret = info->limit - info->sent; |
15e6ca97 PA |
1306 | tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags, |
1307 | dfrag->page, dfrag->offset + info->sent, &ret); | |
e2223995 PA |
1308 | if (!tail) { |
1309 | tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk)); | |
1310 | return -ENOMEM; | |
35759383 | 1311 | } |
18b683bf | 1312 | |
e2223995 | 1313 | /* if the tail skb is still the cached one, collapsing really happened. |
57040755 | 1314 | */ |
e2223995 | 1315 | if (skb == tail) { |
15e6ca97 | 1316 | TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH; |
57040755 | 1317 | mpext->data_len += ret; |
15e6ca97 | 1318 | WARN_ON_ONCE(!can_collapse); |
6f8a612a | 1319 | WARN_ON_ONCE(zero_window_probe); |
57040755 PA |
1320 | goto out; |
1321 | } | |
1322 | ||
724cfd2e PA |
1323 | mpext = skb_ext_find(tail, SKB_EXT_MPTCP); |
1324 | if (WARN_ON_ONCE(!mpext)) { | |
1325 | /* should never reach here, stream corrupted */ | |
1326 | return -EINVAL; | |
1327 | } | |
6d0060f6 MM |
1328 | |
1329 | memset(mpext, 0, sizeof(*mpext)); | |
d9ca1de8 | 1330 | mpext->data_seq = data_seq; |
6d0060f6 MM |
1331 | mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; |
1332 | mpext->data_len = ret; | |
1333 | mpext->use_map = 1; | |
1334 | mpext->dsn64 = 1; | |
1335 | ||
1336 | pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", | |
1337 | mpext->data_seq, mpext->subflow_seq, mpext->data_len, | |
1338 | mpext->dsn64); | |
1339 | ||
6f8a612a FW |
1340 | if (zero_window_probe) { |
1341 | mptcp_subflow_ctx(ssk)->rel_write_seq += ret; | |
1342 | mpext->frozen = 1; | |
1343 | ret = 0; | |
1344 | tcp_push_pending_frames(ssk); | |
1345 | } | |
57040755 | 1346 | out: |
6d0060f6 | 1347 | mptcp_subflow_ctx(ssk)->rel_write_seq += ret; |
6d0060f6 MM |
1348 | return ret; |
1349 | } | |
1350 | ||
d5f49190 PA |
1351 | #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ |
1352 | sizeof(struct tcphdr) - \ | |
1353 | MAX_TCP_OPTION_SPACE - \ | |
1354 | sizeof(struct ipv6hdr) - \ | |
1355 | sizeof(struct frag_hdr)) | |
1356 | ||
1357 | struct subflow_send_info { | |
1358 | struct sock *ssk; | |
1359 | u64 ratio; | |
1360 | }; | |
1361 | ||
da51aef5 PA |
1362 | static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk, |
1363 | u32 *sndbuf) | |
f296234c | 1364 | { |
d5f49190 | 1365 | struct subflow_send_info send_info[2]; |
f296234c | 1366 | struct mptcp_subflow_context *subflow; |
d5f49190 PA |
1367 | int i, nr_active = 0; |
1368 | struct sock *ssk; | |
1369 | u64 ratio; | |
1370 | u32 pace; | |
f296234c | 1371 | |
d5f49190 | 1372 | sock_owned_by_me((struct sock *)msk); |
f296234c | 1373 | |
da51aef5 | 1374 | *sndbuf = 0; |
d5f49190 PA |
1375 | if (__mptcp_check_fallback(msk)) { |
1376 | if (!msk->first) | |
f296234c | 1377 | return NULL; |
d5f49190 PA |
1378 | *sndbuf = msk->first->sk_sndbuf; |
1379 | return sk_stream_memory_free(msk->first) ? msk->first : NULL; | |
1380 | } | |
1381 | ||
1382 | /* re-use last subflow, if the burst allow that */ | |
1383 | if (msk->last_snd && msk->snd_burst > 0 && | |
1384 | sk_stream_memory_free(msk->last_snd) && | |
1385 | mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) { | |
1386 | mptcp_for_each_subflow(msk, subflow) { | |
1387 | ssk = mptcp_subflow_tcp_sock(subflow); | |
1388 | *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf); | |
f296234c | 1389 | } |
d5f49190 PA |
1390 | return msk->last_snd; |
1391 | } | |
f296234c | 1392 | |
d5f49190 PA |
1393 | /* pick the subflow with the lower wmem/wspace ratio */ |
1394 | for (i = 0; i < 2; ++i) { | |
1395 | send_info[i].ssk = NULL; | |
1396 | send_info[i].ratio = -1; | |
1397 | } | |
1398 | mptcp_for_each_subflow(msk, subflow) { | |
1399 | ssk = mptcp_subflow_tcp_sock(subflow); | |
1400 | if (!mptcp_subflow_active(subflow)) | |
1401 | continue; | |
1402 | ||
1403 | nr_active += !subflow->backup; | |
da51aef5 | 1404 | *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf); |
d5f49190 PA |
1405 | if (!sk_stream_memory_free(subflow->tcp_sock)) |
1406 | continue; | |
f296234c | 1407 | |
d5f49190 PA |
1408 | pace = READ_ONCE(ssk->sk_pacing_rate); |
1409 | if (!pace) | |
f296234c | 1410 | continue; |
f296234c | 1411 | |
d5f49190 PA |
1412 | ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, |
1413 | pace); | |
1414 | if (ratio < send_info[subflow->backup].ratio) { | |
1415 | send_info[subflow->backup].ssk = ssk; | |
1416 | send_info[subflow->backup].ratio = ratio; | |
1417 | } | |
f296234c PK |
1418 | } |
1419 | ||
d5f49190 PA |
1420 | pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld", |
1421 | msk, nr_active, send_info[0].ssk, send_info[0].ratio, | |
1422 | send_info[1].ssk, send_info[1].ratio); | |
1423 | ||
1424 | /* pick the best backup if no other subflow is active */ | |
1425 | if (!nr_active) | |
1426 | send_info[0].ssk = send_info[1].ssk; | |
1427 | ||
1428 | if (send_info[0].ssk) { | |
1429 | msk->last_snd = send_info[0].ssk; | |
1430 | msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE, | |
1431 | sk_stream_wspace(msk->last_snd)); | |
1432 | return msk->last_snd; | |
1433 | } | |
1434 | return NULL; | |
f296234c PK |
1435 | } |
1436 | ||
d9ca1de8 PA |
1437 | static void mptcp_push_release(struct sock *sk, struct sock *ssk, |
1438 | struct mptcp_sendmsg_info *info) | |
1439 | { | |
1440 | mptcp_set_timeout(sk, ssk); | |
1441 | tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); | |
1442 | release_sock(ssk); | |
1443 | } | |
1444 | ||
3f8f491c | 1445 | static void __mptcp_push_pending(struct sock *sk, unsigned int flags) |
f870fa0b | 1446 | { |
d9ca1de8 | 1447 | struct sock *prev_ssk = NULL, *ssk = NULL; |
f870fa0b | 1448 | struct mptcp_sock *msk = mptcp_sk(sk); |
caf971df | 1449 | struct mptcp_sendmsg_info info = { |
d9ca1de8 | 1450 | .flags = flags, |
caf971df | 1451 | }; |
d9ca1de8 PA |
1452 | struct mptcp_data_frag *dfrag; |
1453 | int len, copied = 0; | |
1454 | u32 sndbuf; | |
1455 | ||
1456 | while ((dfrag = mptcp_send_head(sk))) { | |
1457 | info.sent = dfrag->already_sent; | |
1458 | info.limit = dfrag->data_len; | |
1459 | len = dfrag->data_len - dfrag->already_sent; | |
1460 | while (len > 0) { | |
1461 | int ret = 0; | |
1462 | ||
1463 | prev_ssk = ssk; | |
1464 | __mptcp_flush_join_list(msk); | |
1465 | ssk = mptcp_subflow_get_send(msk, &sndbuf); | |
1466 | ||
1467 | /* do auto tuning */ | |
1468 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && | |
1469 | sndbuf > READ_ONCE(sk->sk_sndbuf)) | |
1470 | WRITE_ONCE(sk->sk_sndbuf, sndbuf); | |
1471 | ||
1472 | /* try to keep the subflow socket lock across | |
1473 | * consecutive xmit on the same socket | |
1474 | */ | |
1475 | if (ssk != prev_ssk && prev_ssk) | |
1476 | mptcp_push_release(sk, prev_ssk, &info); | |
1477 | if (!ssk) | |
1478 | goto out; | |
1479 | ||
1480 | if (ssk != prev_ssk || !prev_ssk) | |
1481 | lock_sock(ssk); | |
1482 | ||
724cfd2e PA |
1483 | /* keep it simple and always provide a new skb for the |
1484 | * subflow, even if we will not use it when collapsing | |
1485 | * on the pending one | |
1486 | */ | |
1487 | if (!mptcp_alloc_tx_skb(sk, ssk)) { | |
1488 | mptcp_push_release(sk, ssk, &info); | |
1489 | goto out; | |
1490 | } | |
1491 | ||
d9ca1de8 PA |
1492 | ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); |
1493 | if (ret <= 0) { | |
1494 | mptcp_push_release(sk, ssk, &info); | |
1495 | goto out; | |
1496 | } | |
1497 | ||
1498 | info.sent += ret; | |
1499 | dfrag->already_sent += ret; | |
1500 | msk->snd_nxt += ret; | |
1501 | msk->snd_burst -= ret; | |
724cfd2e | 1502 | msk->tx_pending_data -= ret; |
d9ca1de8 PA |
1503 | copied += ret; |
1504 | len -= ret; | |
1505 | } | |
1506 | WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); | |
1507 | } | |
1508 | ||
1509 | /* at this point we held the socket lock for the last subflow we used */ | |
1510 | if (ssk) | |
1511 | mptcp_push_release(sk, ssk, &info); | |
1512 | ||
1513 | out: | |
b680a214 PA |
1514 | if (copied) { |
1515 | /* start the timer, if it's not pending */ | |
1516 | if (!mptcp_timer_pending(sk)) | |
1517 | mptcp_reset_timer(sk); | |
d9ca1de8 | 1518 | __mptcp_check_send_data_fin(sk); |
b680a214 | 1519 | } |
d9ca1de8 PA |
1520 | } |
1521 | ||
6e628cd3 PA |
1522 | static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) |
1523 | { | |
1524 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1525 | struct mptcp_sendmsg_info info; | |
1526 | struct mptcp_data_frag *dfrag; | |
1527 | int len, copied = 0; | |
1528 | ||
1529 | info.flags = 0; | |
1530 | while ((dfrag = mptcp_send_head(sk))) { | |
1531 | info.sent = dfrag->already_sent; | |
1532 | info.limit = dfrag->data_len; | |
1533 | len = dfrag->data_len - dfrag->already_sent; | |
1534 | while (len > 0) { | |
1535 | int ret = 0; | |
1536 | ||
1537 | /* do auto tuning */ | |
1538 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && | |
1539 | ssk->sk_sndbuf > READ_ONCE(sk->sk_sndbuf)) | |
1540 | WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf); | |
1541 | ||
1542 | if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) { | |
1543 | __mptcp_update_wmem(sk); | |
1544 | sk_mem_reclaim_partial(sk); | |
1545 | } | |
1546 | if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC)) | |
1547 | goto out; | |
1548 | ||
1549 | ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); | |
1550 | if (ret <= 0) | |
1551 | goto out; | |
1552 | ||
1553 | info.sent += ret; | |
1554 | dfrag->already_sent += ret; | |
1555 | msk->snd_nxt += ret; | |
1556 | msk->snd_burst -= ret; | |
1557 | msk->tx_pending_data -= ret; | |
1558 | copied += ret; | |
1559 | len -= ret; | |
1560 | } | |
1561 | WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); | |
1562 | } | |
1563 | ||
1564 | out: | |
1565 | /* __mptcp_alloc_tx_skb could have released some wmem and we are | |
1566 | * not going to flush it via release_sock() | |
1567 | */ | |
1568 | __mptcp_update_wmem(sk); | |
1569 | if (copied) { | |
1570 | mptcp_set_timeout(sk, ssk); | |
1571 | tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, | |
1572 | info.size_goal); | |
f6bb2471 PA |
1573 | if (!mptcp_timer_pending(sk)) |
1574 | mptcp_reset_timer(sk); | |
1575 | ||
6e628cd3 PA |
1576 | if (msk->snd_data_fin_enable && |
1577 | msk->snd_nxt + 1 == msk->write_seq) | |
1578 | mptcp_schedule_work(sk); | |
1579 | } | |
1580 | } | |
1581 | ||
d9ca1de8 PA |
1582 | static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
1583 | { | |
1584 | struct mptcp_sock *msk = mptcp_sk(sk); | |
17091708 | 1585 | struct page_frag *pfrag; |
6d0060f6 | 1586 | size_t copied = 0; |
caf971df | 1587 | int ret = 0; |
6d0060f6 | 1588 | long timeo; |
f870fa0b MM |
1589 | |
1590 | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) | |
1591 | return -EOPNOTSUPP; | |
1592 | ||
e7579d5d | 1593 | mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len))); |
1954b860 MM |
1594 | |
1595 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | |
1596 | ||
1597 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { | |
1598 | ret = sk_stream_wait_connect(sk, &timeo); | |
1599 | if (ret) | |
1600 | goto out; | |
1601 | } | |
1602 | ||
17091708 | 1603 | pfrag = sk_page_frag(sk); |
18b683bf | 1604 | |
d9ca1de8 | 1605 | while (msg_data_left(msg)) { |
724cfd2e | 1606 | int total_ts, frag_truesize = 0; |
d9ca1de8 | 1607 | struct mptcp_data_frag *dfrag; |
724cfd2e | 1608 | struct sk_buff_head skbs; |
d9ca1de8 PA |
1609 | bool dfrag_collapsed; |
1610 | size_t psize, offset; | |
18b683bf | 1611 | |
d9ca1de8 PA |
1612 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { |
1613 | ret = -EPIPE; | |
f296234c PK |
1614 | goto out; |
1615 | } | |
da51aef5 | 1616 | |
d9ca1de8 PA |
1617 | /* reuse tail pfrag, if possible, or carve a new one from the |
1618 | * page allocator | |
1619 | */ | |
1620 | dfrag = mptcp_pending_tail(sk); | |
1621 | dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); | |
1622 | if (!dfrag_collapsed) { | |
6e628cd3 PA |
1623 | if (!sk_stream_memory_free(sk)) |
1624 | goto wait_for_memory; | |
1625 | ||
d9ca1de8 PA |
1626 | if (!mptcp_page_frag_refill(sk, pfrag)) |
1627 | goto wait_for_memory; | |
1628 | ||
1629 | dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); | |
1630 | frag_truesize = dfrag->overhead; | |
72511aab | 1631 | } |
6d0060f6 | 1632 | |
d9ca1de8 PA |
1633 | /* we do not bound vs wspace, to allow a single packet. |
1634 | * memory accounting will prevent execessive memory usage | |
1635 | * anyway | |
d5f49190 | 1636 | */ |
d9ca1de8 PA |
1637 | offset = dfrag->offset + dfrag->data_len; |
1638 | psize = pfrag->size - offset; | |
1639 | psize = min_t(size_t, psize, msg_data_left(msg)); | |
724cfd2e PA |
1640 | total_ts = psize + frag_truesize; |
1641 | __skb_queue_head_init(&skbs); | |
1642 | if (!mptcp_tx_cache_refill(sk, psize, &skbs, &total_ts)) | |
d9ca1de8 PA |
1643 | goto wait_for_memory; |
1644 | ||
724cfd2e PA |
1645 | if (!mptcp_wmem_alloc(sk, total_ts)) { |
1646 | __skb_queue_purge(&skbs); | |
1647 | goto wait_for_memory; | |
1648 | } | |
1649 | ||
1650 | skb_queue_splice_tail(&skbs, &msk->skb_tx_cache); | |
d9ca1de8 PA |
1651 | if (copy_page_from_iter(dfrag->page, offset, psize, |
1652 | &msg->msg_iter) != psize) { | |
87952603 | 1653 | mptcp_wmem_uncharge(sk, psize + frag_truesize); |
d9ca1de8 PA |
1654 | ret = -EFAULT; |
1655 | goto out; | |
72511aab FW |
1656 | } |
1657 | ||
d9ca1de8 PA |
1658 | /* data successfully copied into the write queue */ |
1659 | copied += psize; | |
1660 | dfrag->data_len += psize; | |
1661 | frag_truesize += psize; | |
1662 | pfrag->offset += frag_truesize; | |
1663 | WRITE_ONCE(msk->write_seq, msk->write_seq + psize); | |
13e16037 | 1664 | msk->tx_pending_data += psize; |
d9ca1de8 PA |
1665 | |
1666 | /* charge data on mptcp pending queue to the msk socket | |
1667 | * Note: we charge such data both to sk and ssk | |
fb529e62 | 1668 | */ |
d9ca1de8 | 1669 | sk_wmem_queued_add(sk, frag_truesize); |
d9ca1de8 PA |
1670 | if (!dfrag_collapsed) { |
1671 | get_page(dfrag->page); | |
1672 | list_add_tail(&dfrag->list, &msk->rtx_queue); | |
1673 | if (!msk->first_pending) | |
1674 | WRITE_ONCE(msk->first_pending, dfrag); | |
fb529e62 | 1675 | } |
d9ca1de8 PA |
1676 | pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk, |
1677 | dfrag->data_seq, dfrag->data_len, dfrag->already_sent, | |
1678 | !dfrag_collapsed); | |
6d0060f6 | 1679 | |
d9ca1de8 | 1680 | continue; |
b51f9b80 | 1681 | |
d9ca1de8 | 1682 | wait_for_memory: |
6e628cd3 | 1683 | set_bit(MPTCP_NOSPACE, &msk->flags); |
3f8f491c | 1684 | __mptcp_push_pending(sk, msg->msg_flags); |
d9ca1de8 PA |
1685 | ret = sk_stream_wait_memory(sk, &timeo); |
1686 | if (ret) | |
1687 | goto out; | |
57040755 | 1688 | } |
6d0060f6 | 1689 | |
13e16037 | 1690 | if (copied) |
3f8f491c | 1691 | __mptcp_push_pending(sk, msg->msg_flags); |
d9ca1de8 | 1692 | |
1954b860 | 1693 | out: |
cec37a6e | 1694 | release_sock(sk); |
8555c6bf | 1695 | return copied ? : ret; |
f870fa0b MM |
1696 | } |
1697 | ||
7a6a6cbc PA |
1698 | static void mptcp_wait_data(struct sock *sk, long *timeo) |
1699 | { | |
1700 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
1701 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1702 | ||
1703 | add_wait_queue(sk_sleep(sk), &wait); | |
1704 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
1705 | ||
1706 | sk_wait_event(sk, timeo, | |
1707 | test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait); | |
1708 | ||
1709 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
1710 | remove_wait_queue(sk_sleep(sk), &wait); | |
1711 | } | |
1712 | ||
6771bfd9 FW |
1713 | static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, |
1714 | struct msghdr *msg, | |
1715 | size_t len) | |
1716 | { | |
6771bfd9 FW |
1717 | struct sk_buff *skb; |
1718 | int copied = 0; | |
1719 | ||
87952603 | 1720 | while ((skb = skb_peek(&msk->receive_queue)) != NULL) { |
6771bfd9 FW |
1721 | u32 offset = MPTCP_SKB_CB(skb)->offset; |
1722 | u32 data_len = skb->len - offset; | |
1723 | u32 count = min_t(size_t, len - copied, data_len); | |
1724 | int err; | |
1725 | ||
1726 | err = skb_copy_datagram_msg(skb, offset, msg, count); | |
1727 | if (unlikely(err < 0)) { | |
1728 | if (!copied) | |
1729 | return err; | |
1730 | break; | |
1731 | } | |
1732 | ||
1733 | copied += count; | |
1734 | ||
1735 | if (count < data_len) { | |
1736 | MPTCP_SKB_CB(skb)->offset += count; | |
1737 | break; | |
1738 | } | |
1739 | ||
87952603 PA |
1740 | /* we will bulk release the skb memory later */ |
1741 | skb->destructor = NULL; | |
1742 | msk->rmem_released += skb->truesize; | |
1743 | __skb_unlink(skb, &msk->receive_queue); | |
6771bfd9 FW |
1744 | __kfree_skb(skb); |
1745 | ||
1746 | if (copied >= len) | |
1747 | break; | |
1748 | } | |
1749 | ||
1750 | return copied; | |
1751 | } | |
1752 | ||
a6b118fe FW |
1753 | /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. |
1754 | * | |
1755 | * Only difference: Use highest rtt estimate of the subflows in use. | |
1756 | */ | |
1757 | static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) | |
1758 | { | |
1759 | struct mptcp_subflow_context *subflow; | |
1760 | struct sock *sk = (struct sock *)msk; | |
1761 | u32 time, advmss = 1; | |
1762 | u64 rtt_us, mstamp; | |
1763 | ||
1764 | sock_owned_by_me(sk); | |
1765 | ||
1766 | if (copied <= 0) | |
1767 | return; | |
1768 | ||
1769 | msk->rcvq_space.copied += copied; | |
1770 | ||
1771 | mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); | |
1772 | time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); | |
1773 | ||
1774 | rtt_us = msk->rcvq_space.rtt_us; | |
1775 | if (rtt_us && time < (rtt_us >> 3)) | |
1776 | return; | |
1777 | ||
1778 | rtt_us = 0; | |
1779 | mptcp_for_each_subflow(msk, subflow) { | |
1780 | const struct tcp_sock *tp; | |
1781 | u64 sf_rtt_us; | |
1782 | u32 sf_advmss; | |
1783 | ||
1784 | tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); | |
1785 | ||
1786 | sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); | |
1787 | sf_advmss = READ_ONCE(tp->advmss); | |
1788 | ||
1789 | rtt_us = max(sf_rtt_us, rtt_us); | |
1790 | advmss = max(sf_advmss, advmss); | |
1791 | } | |
1792 | ||
1793 | msk->rcvq_space.rtt_us = rtt_us; | |
1794 | if (time < (rtt_us >> 3) || rtt_us == 0) | |
1795 | return; | |
1796 | ||
1797 | if (msk->rcvq_space.copied <= msk->rcvq_space.space) | |
1798 | goto new_measure; | |
1799 | ||
1800 | if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && | |
1801 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | |
1802 | int rcvmem, rcvbuf; | |
1803 | u64 rcvwin, grow; | |
1804 | ||
1805 | rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; | |
1806 | ||
1807 | grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); | |
1808 | ||
1809 | do_div(grow, msk->rcvq_space.space); | |
1810 | rcvwin += (grow << 1); | |
1811 | ||
1812 | rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER); | |
1813 | while (tcp_win_from_space(sk, rcvmem) < advmss) | |
1814 | rcvmem += 128; | |
1815 | ||
1816 | do_div(rcvwin, advmss); | |
1817 | rcvbuf = min_t(u64, rcvwin * rcvmem, | |
1818 | sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); | |
1819 | ||
1820 | if (rcvbuf > sk->sk_rcvbuf) { | |
1821 | u32 window_clamp; | |
1822 | ||
1823 | window_clamp = tcp_win_from_space(sk, rcvbuf); | |
1824 | WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); | |
1825 | ||
1826 | /* Make subflows follow along. If we do not do this, we | |
1827 | * get drops at subflow level if skbs can't be moved to | |
1828 | * the mptcp rx queue fast enough (announced rcv_win can | |
1829 | * exceed ssk->sk_rcvbuf). | |
1830 | */ | |
1831 | mptcp_for_each_subflow(msk, subflow) { | |
1832 | struct sock *ssk; | |
c76c6956 | 1833 | bool slow; |
a6b118fe FW |
1834 | |
1835 | ssk = mptcp_subflow_tcp_sock(subflow); | |
c76c6956 | 1836 | slow = lock_sock_fast(ssk); |
a6b118fe FW |
1837 | WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); |
1838 | tcp_sk(ssk)->window_clamp = window_clamp; | |
c76c6956 PA |
1839 | tcp_cleanup_rbuf(ssk, 1); |
1840 | unlock_sock_fast(ssk, slow); | |
a6b118fe FW |
1841 | } |
1842 | } | |
1843 | } | |
1844 | ||
1845 | msk->rcvq_space.space = msk->rcvq_space.copied; | |
1846 | new_measure: | |
1847 | msk->rcvq_space.copied = 0; | |
1848 | msk->rcvq_space.time = mstamp; | |
1849 | } | |
1850 | ||
87952603 PA |
1851 | static void __mptcp_update_rmem(struct sock *sk) |
1852 | { | |
1853 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1854 | ||
1855 | if (!msk->rmem_released) | |
1856 | return; | |
1857 | ||
1858 | atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); | |
1859 | sk_mem_uncharge(sk, msk->rmem_released); | |
1860 | msk->rmem_released = 0; | |
1861 | } | |
1862 | ||
1863 | static void __mptcp_splice_receive_queue(struct sock *sk) | |
1864 | { | |
1865 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1866 | ||
1867 | skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue); | |
1868 | } | |
1869 | ||
ea4ca586 | 1870 | static bool __mptcp_move_skbs(struct mptcp_sock *msk, unsigned int rcv) |
6771bfd9 | 1871 | { |
87952603 | 1872 | struct sock *sk = (struct sock *)msk; |
6771bfd9 | 1873 | unsigned int moved = 0; |
87952603 | 1874 | bool ret, done; |
d5f49190 PA |
1875 | |
1876 | __mptcp_flush_join_list(msk); | |
6771bfd9 FW |
1877 | do { |
1878 | struct sock *ssk = mptcp_subflow_recv_lookup(msk); | |
65f49fe7 | 1879 | bool slowpath; |
6771bfd9 | 1880 | |
87952603 PA |
1881 | /* we can have data pending in the subflows only if the msk |
1882 | * receive buffer was full at subflow_data_ready() time, | |
1883 | * that is an unlikely slow path. | |
1884 | */ | |
1885 | if (likely(!ssk)) | |
6771bfd9 FW |
1886 | break; |
1887 | ||
65f49fe7 | 1888 | slowpath = lock_sock_fast(ssk); |
87952603 | 1889 | mptcp_data_lock(sk); |
6771bfd9 | 1890 | done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); |
87952603 | 1891 | mptcp_data_unlock(sk); |
ea4ca586 PA |
1892 | if (moved && rcv) { |
1893 | WRITE_ONCE(msk->rmem_pending, min(rcv, moved)); | |
1894 | tcp_cleanup_rbuf(ssk, 1); | |
1895 | WRITE_ONCE(msk->rmem_pending, 0); | |
1896 | } | |
65f49fe7 | 1897 | unlock_sock_fast(ssk, slowpath); |
6771bfd9 FW |
1898 | } while (!done); |
1899 | ||
87952603 PA |
1900 | /* acquire the data lock only if some input data is pending */ |
1901 | ret = moved > 0; | |
1902 | if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) || | |
1903 | !skb_queue_empty_lockless(&sk->sk_receive_queue)) { | |
1904 | mptcp_data_lock(sk); | |
1905 | __mptcp_update_rmem(sk); | |
1906 | ret |= __mptcp_ofo_queue(msk); | |
1907 | __mptcp_splice_receive_queue(sk); | |
1908 | mptcp_data_unlock(sk); | |
ab174ad8 | 1909 | } |
87952603 PA |
1910 | if (ret) |
1911 | mptcp_check_data_fin((struct sock *)msk); | |
1912 | return !skb_queue_empty(&msk->receive_queue); | |
6771bfd9 FW |
1913 | } |
1914 | ||
f870fa0b MM |
1915 | static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
1916 | int nonblock, int flags, int *addr_len) | |
1917 | { | |
1918 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 1919 | int copied = 0; |
7a6a6cbc PA |
1920 | int target; |
1921 | long timeo; | |
f870fa0b MM |
1922 | |
1923 | if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT)) | |
1924 | return -EOPNOTSUPP; | |
1925 | ||
87952603 | 1926 | mptcp_lock_sock(sk, __mptcp_splice_receive_queue(sk)); |
fd897679 PA |
1927 | if (unlikely(sk->sk_state == TCP_LISTEN)) { |
1928 | copied = -ENOTCONN; | |
1929 | goto out_err; | |
1930 | } | |
1931 | ||
7a6a6cbc PA |
1932 | timeo = sock_rcvtimeo(sk, nonblock); |
1933 | ||
1934 | len = min_t(size_t, len, INT_MAX); | |
1935 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
1936 | ||
05e3ecea | 1937 | while (copied < len) { |
ea4ca586 | 1938 | int bytes_read, old_space; |
7a6a6cbc | 1939 | |
6771bfd9 FW |
1940 | bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied); |
1941 | if (unlikely(bytes_read < 0)) { | |
1942 | if (!copied) | |
1943 | copied = bytes_read; | |
1944 | goto out_err; | |
1945 | } | |
7a6a6cbc | 1946 | |
6771bfd9 | 1947 | copied += bytes_read; |
7a6a6cbc | 1948 | |
87952603 | 1949 | if (skb_queue_empty(&msk->receive_queue) && |
ea4ca586 | 1950 | __mptcp_move_skbs(msk, len - copied)) |
6771bfd9 | 1951 | continue; |
7a6a6cbc | 1952 | |
ea4ca586 PA |
1953 | /* be sure to advertise window change */ |
1954 | old_space = READ_ONCE(msk->old_wspace); | |
1955 | if ((tcp_space(sk) - old_space) >= old_space) | |
fd897679 | 1956 | mptcp_cleanup_rbuf(msk); |
ea4ca586 | 1957 | |
7a6a6cbc PA |
1958 | /* only the master socket status is relevant here. The exit |
1959 | * conditions mirror closely tcp_recvmsg() | |
1960 | */ | |
1961 | if (copied >= target) | |
1962 | break; | |
1963 | ||
1964 | if (copied) { | |
1965 | if (sk->sk_err || | |
1966 | sk->sk_state == TCP_CLOSE || | |
1967 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1968 | !timeo || | |
1969 | signal_pending(current)) | |
1970 | break; | |
1971 | } else { | |
1972 | if (sk->sk_err) { | |
1973 | copied = sock_error(sk); | |
1974 | break; | |
1975 | } | |
1976 | ||
5969856a PA |
1977 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1978 | mptcp_check_for_eof(msk); | |
1979 | ||
87952603 PA |
1980 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
1981 | /* race breaker: the shutdown could be after the | |
1982 | * previous receive queue check | |
1983 | */ | |
1984 | if (__mptcp_move_skbs(msk, len - copied)) | |
1985 | continue; | |
7a6a6cbc | 1986 | break; |
87952603 | 1987 | } |
7a6a6cbc PA |
1988 | |
1989 | if (sk->sk_state == TCP_CLOSE) { | |
1990 | copied = -ENOTCONN; | |
1991 | break; | |
1992 | } | |
1993 | ||
1994 | if (!timeo) { | |
1995 | copied = -EAGAIN; | |
1996 | break; | |
1997 | } | |
1998 | ||
1999 | if (signal_pending(current)) { | |
2000 | copied = sock_intr_errno(timeo); | |
2001 | break; | |
2002 | } | |
2003 | } | |
2004 | ||
2005 | pr_debug("block timeout %ld", timeo); | |
7a6a6cbc | 2006 | mptcp_wait_data(sk, &timeo); |
cec37a6e PK |
2007 | } |
2008 | ||
87952603 PA |
2009 | if (skb_queue_empty_lockless(&sk->sk_receive_queue) && |
2010 | skb_queue_empty(&msk->receive_queue)) { | |
6771bfd9 | 2011 | /* entire backlog drained, clear DATA_READY. */ |
7a6a6cbc | 2012 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cec37a6e | 2013 | |
6771bfd9 FW |
2014 | /* .. race-breaker: ssk might have gotten new data |
2015 | * after last __mptcp_move_skbs() returned false. | |
7a6a6cbc | 2016 | */ |
ea4ca586 | 2017 | if (unlikely(__mptcp_move_skbs(msk, 0))) |
7a6a6cbc | 2018 | set_bit(MPTCP_DATA_READY, &msk->flags); |
6771bfd9 FW |
2019 | } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) { |
2020 | /* data to read but mptcp_wait_data() cleared DATA_READY */ | |
2021 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
7a6a6cbc | 2022 | } |
6771bfd9 | 2023 | out_err: |
6719331c PA |
2024 | pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d", |
2025 | msk, test_bit(MPTCP_DATA_READY, &msk->flags), | |
87952603 | 2026 | skb_queue_empty_lockless(&sk->sk_receive_queue), copied); |
a6b118fe FW |
2027 | mptcp_rcv_space_adjust(msk, copied); |
2028 | ||
7a6a6cbc | 2029 | release_sock(sk); |
cec37a6e PK |
2030 | return copied; |
2031 | } | |
2032 | ||
b51f9b80 PA |
2033 | static void mptcp_retransmit_handler(struct sock *sk) |
2034 | { | |
2035 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2036 | ||
7439d687 PA |
2037 | set_bit(MPTCP_WORK_RTX, &msk->flags); |
2038 | mptcp_schedule_work(sk); | |
b51f9b80 PA |
2039 | } |
2040 | ||
2041 | static void mptcp_retransmit_timer(struct timer_list *t) | |
2042 | { | |
2043 | struct inet_connection_sock *icsk = from_timer(icsk, t, | |
2044 | icsk_retransmit_timer); | |
2045 | struct sock *sk = &icsk->icsk_inet.sk; | |
2046 | ||
2047 | bh_lock_sock(sk); | |
2048 | if (!sock_owned_by_user(sk)) { | |
2049 | mptcp_retransmit_handler(sk); | |
2050 | } else { | |
2051 | /* delegate our work to tcp_release_cb() */ | |
2052 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, | |
2053 | &sk->sk_tsq_flags)) | |
2054 | sock_hold(sk); | |
2055 | } | |
2056 | bh_unlock_sock(sk); | |
2057 | sock_put(sk); | |
2058 | } | |
2059 | ||
e16163b6 PA |
2060 | static void mptcp_timeout_timer(struct timer_list *t) |
2061 | { | |
2062 | struct sock *sk = from_timer(sk, t, sk_timer); | |
2063 | ||
2064 | mptcp_schedule_work(sk); | |
b6d69fc8 | 2065 | sock_put(sk); |
e16163b6 PA |
2066 | } |
2067 | ||
3b1d6210 PA |
2068 | /* Find an idle subflow. Return NULL if there is unacked data at tcp |
2069 | * level. | |
2070 | * | |
2071 | * A backup subflow is returned only if that is the only kind available. | |
2072 | */ | |
2073 | static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk) | |
2074 | { | |
2075 | struct mptcp_subflow_context *subflow; | |
2076 | struct sock *backup = NULL; | |
2077 | ||
2078 | sock_owned_by_me((const struct sock *)msk); | |
2079 | ||
d5f49190 | 2080 | if (__mptcp_check_fallback(msk)) |
d9ca1de8 | 2081 | return NULL; |
d5f49190 | 2082 | |
3b1d6210 PA |
2083 | mptcp_for_each_subflow(msk, subflow) { |
2084 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
2085 | ||
d5f49190 PA |
2086 | if (!mptcp_subflow_active(subflow)) |
2087 | continue; | |
2088 | ||
3b1d6210 | 2089 | /* still data outstanding at TCP level? Don't retransmit. */ |
860975c6 FW |
2090 | if (!tcp_write_queue_empty(ssk)) { |
2091 | if (inet_csk(ssk)->icsk_ca_state >= TCP_CA_Loss) | |
2092 | continue; | |
3b1d6210 | 2093 | return NULL; |
860975c6 | 2094 | } |
3b1d6210 PA |
2095 | |
2096 | if (subflow->backup) { | |
2097 | if (!backup) | |
2098 | backup = ssk; | |
2099 | continue; | |
2100 | } | |
2101 | ||
2102 | return ssk; | |
2103 | } | |
2104 | ||
2105 | return backup; | |
2106 | } | |
2107 | ||
ba30dc32 FW |
2108 | static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk) |
2109 | { | |
2110 | if (msk->subflow) { | |
2111 | iput(SOCK_INODE(msk->subflow)); | |
2112 | msk->subflow = NULL; | |
2113 | } | |
2114 | } | |
2115 | ||
cec37a6e PK |
2116 | /* subflow sockets can be either outgoing (connect) or incoming |
2117 | * (accept). | |
2118 | * | |
2119 | * Outgoing subflows use in-kernel sockets. | |
2120 | * Incoming subflows do not have their own 'struct socket' allocated, | |
2121 | * so we need to use tcp_close() after detaching them from the mptcp | |
2122 | * parent socket. | |
2123 | */ | |
d0876b22 | 2124 | void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, |
e16163b6 | 2125 | struct mptcp_subflow_context *subflow) |
cec37a6e | 2126 | { |
8469f8cc FW |
2127 | struct mptcp_sock *msk = mptcp_sk(sk); |
2128 | ||
cec37a6e PK |
2129 | list_del(&subflow->node); |
2130 | ||
3f8b2667 | 2131 | lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); |
e16163b6 PA |
2132 | |
2133 | /* if we are invoked by the msk cleanup code, the subflow is | |
2134 | * already orphaned | |
2135 | */ | |
133f0169 | 2136 | if (ssk->sk_socket) |
e16163b6 | 2137 | sock_orphan(ssk); |
e16163b6 | 2138 | |
d7b1bfd0 PA |
2139 | subflow->disposable = 1; |
2140 | ||
e16163b6 PA |
2141 | /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops |
2142 | * the ssk has been already destroyed, we just need to release the | |
2143 | * reference owned by msk; | |
2144 | */ | |
2145 | if (!inet_csk(ssk)->icsk_ulp_ops) { | |
2146 | kfree_rcu(subflow, rcu); | |
cec37a6e | 2147 | } else { |
d7b1bfd0 | 2148 | /* otherwise tcp will dispose of the ssk and subflow ctx */ |
e16163b6 PA |
2149 | __tcp_close(ssk, 0); |
2150 | ||
2151 | /* close acquired an extra ref */ | |
2152 | __sock_put(ssk); | |
cec37a6e | 2153 | } |
e16163b6 | 2154 | release_sock(ssk); |
e16163b6 PA |
2155 | |
2156 | sock_put(ssk); | |
8469f8cc FW |
2157 | |
2158 | if (ssk == msk->last_snd) | |
2159 | msk->last_snd = NULL; | |
ba30dc32 FW |
2160 | |
2161 | if (msk->subflow && ssk == msk->subflow->sk) | |
2162 | mptcp_dispose_initial_subflow(msk); | |
f870fa0b MM |
2163 | } |
2164 | ||
dc24f8b4 PA |
2165 | static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) |
2166 | { | |
2167 | return 0; | |
2168 | } | |
2169 | ||
b416268b FW |
2170 | static void pm_work(struct mptcp_sock *msk) |
2171 | { | |
2172 | struct mptcp_pm_data *pm = &msk->pm; | |
2173 | ||
2174 | spin_lock_bh(&msk->pm.lock); | |
2175 | ||
2176 | pr_debug("msk=%p status=%x", msk, pm->status); | |
2177 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
2178 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); | |
2179 | mptcp_pm_nl_add_addr_received(msk); | |
2180 | } | |
84dfe367 GT |
2181 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { |
2182 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); | |
2183 | mptcp_pm_nl_add_addr_send_ack(msk); | |
2184 | } | |
d0876b22 GT |
2185 | if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { |
2186 | pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); | |
2187 | mptcp_pm_nl_rm_addr_received(msk); | |
2188 | } | |
b416268b FW |
2189 | if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { |
2190 | pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); | |
2191 | mptcp_pm_nl_fully_established(msk); | |
2192 | } | |
2193 | if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { | |
2194 | pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); | |
2195 | mptcp_pm_nl_subflow_established(msk); | |
2196 | } | |
2197 | ||
2198 | spin_unlock_bh(&msk->pm.lock); | |
2199 | } | |
2200 | ||
0e4f35d7 PA |
2201 | static void __mptcp_close_subflow(struct mptcp_sock *msk) |
2202 | { | |
2203 | struct mptcp_subflow_context *subflow, *tmp; | |
2204 | ||
51b25e09 FW |
2205 | might_sleep(); |
2206 | ||
0e4f35d7 PA |
2207 | list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { |
2208 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
2209 | ||
2210 | if (inet_sk_state_load(ssk) != TCP_CLOSE) | |
2211 | continue; | |
2212 | ||
e16163b6 | 2213 | __mptcp_close_ssk((struct sock *)msk, ssk, subflow); |
0e4f35d7 PA |
2214 | } |
2215 | } | |
2216 | ||
e16163b6 PA |
2217 | static bool mptcp_check_close_timeout(const struct sock *sk) |
2218 | { | |
2219 | s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp; | |
2220 | struct mptcp_subflow_context *subflow; | |
2221 | ||
2222 | if (delta >= TCP_TIMEWAIT_LEN) | |
2223 | return true; | |
2224 | ||
2225 | /* if all subflows are in closed status don't bother with additional | |
2226 | * timeout | |
2227 | */ | |
2228 | mptcp_for_each_subflow(mptcp_sk(sk), subflow) { | |
2229 | if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) != | |
2230 | TCP_CLOSE) | |
2231 | return false; | |
2232 | } | |
2233 | return true; | |
2234 | } | |
2235 | ||
50c504a2 FW |
2236 | static void mptcp_check_fastclose(struct mptcp_sock *msk) |
2237 | { | |
2238 | struct mptcp_subflow_context *subflow, *tmp; | |
2239 | struct sock *sk = &msk->sk.icsk_inet.sk; | |
2240 | ||
2241 | if (likely(!READ_ONCE(msk->rcv_fastclose))) | |
2242 | return; | |
2243 | ||
2244 | mptcp_token_destroy(msk); | |
2245 | ||
2246 | list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { | |
2247 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
2248 | ||
2249 | lock_sock(tcp_sk); | |
2250 | if (tcp_sk->sk_state != TCP_CLOSE) { | |
2251 | tcp_send_active_reset(tcp_sk, GFP_ATOMIC); | |
2252 | tcp_set_state(tcp_sk, TCP_CLOSE); | |
2253 | } | |
2254 | release_sock(tcp_sk); | |
2255 | } | |
2256 | ||
2257 | inet_sk_state_store(sk, TCP_CLOSE); | |
2258 | sk->sk_shutdown = SHUTDOWN_MASK; | |
2259 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ | |
2260 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
2261 | set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); | |
2262 | ||
2263 | mptcp_close_wake_up(sk); | |
2264 | } | |
2265 | ||
80992017 PA |
2266 | static void mptcp_worker(struct work_struct *work) |
2267 | { | |
2268 | struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); | |
3b1d6210 | 2269 | struct sock *ssk, *sk = &msk->sk.icsk_inet.sk; |
caf971df | 2270 | struct mptcp_sendmsg_info info = {}; |
3b1d6210 | 2271 | struct mptcp_data_frag *dfrag; |
3b1d6210 | 2272 | size_t copied = 0; |
e16163b6 | 2273 | int state, ret; |
80992017 PA |
2274 | |
2275 | lock_sock(sk); | |
e16163b6 PA |
2276 | state = sk->sk_state; |
2277 | if (unlikely(state == TCP_CLOSE)) | |
2278 | goto unlock; | |
2279 | ||
43b54c6e | 2280 | mptcp_check_data_fin_ack(sk); |
ec3edaa7 | 2281 | __mptcp_flush_join_list(msk); |
50c504a2 FW |
2282 | |
2283 | mptcp_check_fastclose(msk); | |
2284 | ||
0e4f35d7 PA |
2285 | if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) |
2286 | __mptcp_close_subflow(msk); | |
2287 | ||
b416268b FW |
2288 | if (msk->pm.status) |
2289 | pm_work(msk); | |
2290 | ||
59832e24 FW |
2291 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
2292 | mptcp_check_for_eof(msk); | |
2293 | ||
6e628cd3 | 2294 | __mptcp_check_send_data_fin(sk); |
43b54c6e MM |
2295 | mptcp_check_data_fin(sk); |
2296 | ||
d4fea39d PA |
2297 | /* There is no point in keeping around an orphaned sk timedout or |
2298 | * closed, but we need the msk around to reply to incoming DATA_FIN, | |
2299 | * even if it is orphaned and in FIN_WAIT2 state | |
e16163b6 PA |
2300 | */ |
2301 | if (sock_flag(sk, SOCK_DEAD) && | |
d4fea39d | 2302 | (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) { |
e16163b6 PA |
2303 | inet_sk_state_store(sk, TCP_CLOSE); |
2304 | __mptcp_destroy_sock(sk); | |
2305 | goto unlock; | |
2306 | } | |
2307 | ||
3b1d6210 PA |
2308 | if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) |
2309 | goto unlock; | |
2310 | ||
0518eb20 | 2311 | __mptcp_clean_una(sk); |
3b1d6210 PA |
2312 | dfrag = mptcp_rtx_head(sk); |
2313 | if (!dfrag) | |
2314 | goto unlock; | |
2315 | ||
2316 | ssk = mptcp_subflow_get_retrans(msk); | |
2317 | if (!ssk) | |
2318 | goto reset_unlock; | |
2319 | ||
2320 | lock_sock(ssk); | |
2321 | ||
d9ca1de8 PA |
2322 | /* limit retransmission to the bytes already sent on some subflows */ |
2323 | info.sent = 0; | |
2324 | info.limit = dfrag->already_sent; | |
2325 | while (info.sent < dfrag->already_sent) { | |
724cfd2e PA |
2326 | if (!mptcp_alloc_tx_skb(sk, ssk)) |
2327 | break; | |
2328 | ||
d9ca1de8 | 2329 | ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); |
6f8a612a | 2330 | if (ret <= 0) |
3b1d6210 PA |
2331 | break; |
2332 | ||
fc518953 | 2333 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); |
3b1d6210 | 2334 | copied += ret; |
d9ca1de8 | 2335 | info.sent += ret; |
3b1d6210 PA |
2336 | } |
2337 | if (copied) | |
caf971df PA |
2338 | tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, |
2339 | info.size_goal); | |
3b1d6210 | 2340 | |
3b1d6210 PA |
2341 | mptcp_set_timeout(sk, ssk); |
2342 | release_sock(ssk); | |
2343 | ||
2344 | reset_unlock: | |
2345 | if (!mptcp_timer_pending(sk)) | |
2346 | mptcp_reset_timer(sk); | |
2347 | ||
2348 | unlock: | |
80992017 PA |
2349 | release_sock(sk); |
2350 | sock_put(sk); | |
2351 | } | |
2352 | ||
784325e9 | 2353 | static int __mptcp_init_sock(struct sock *sk) |
f870fa0b | 2354 | { |
cec37a6e PK |
2355 | struct mptcp_sock *msk = mptcp_sk(sk); |
2356 | ||
ec3edaa7 PK |
2357 | spin_lock_init(&msk->join_list_lock); |
2358 | ||
cec37a6e | 2359 | INIT_LIST_HEAD(&msk->conn_list); |
ec3edaa7 | 2360 | INIT_LIST_HEAD(&msk->join_list); |
18b683bf | 2361 | INIT_LIST_HEAD(&msk->rtx_queue); |
80992017 | 2362 | INIT_WORK(&msk->work, mptcp_worker); |
87952603 | 2363 | __skb_queue_head_init(&msk->receive_queue); |
724cfd2e | 2364 | __skb_queue_head_init(&msk->skb_tx_cache); |
ab174ad8 | 2365 | msk->out_of_order_queue = RB_ROOT; |
f0e6a4cf | 2366 | msk->first_pending = NULL; |
e93da928 | 2367 | msk->wmem_reserved = 0; |
87952603 | 2368 | msk->rmem_released = 0; |
724cfd2e PA |
2369 | msk->tx_pending_data = 0; |
2370 | msk->size_goal_cache = TCP_BASE_MSS; | |
cec37a6e | 2371 | |
ea4ca586 | 2372 | msk->ack_hint = NULL; |
8ab183de | 2373 | msk->first = NULL; |
dc24f8b4 | 2374 | inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; |
8ab183de | 2375 | |
1b1c7a0e PK |
2376 | mptcp_pm_data_init(msk); |
2377 | ||
b51f9b80 PA |
2378 | /* re-use the csk retrans timer for MPTCP-level retrans */ |
2379 | timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); | |
e16163b6 | 2380 | timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0); |
f870fa0b MM |
2381 | return 0; |
2382 | } | |
2383 | ||
784325e9 MB |
2384 | static int mptcp_init_sock(struct sock *sk) |
2385 | { | |
fc518953 FW |
2386 | struct net *net = sock_net(sk); |
2387 | int ret; | |
18b683bf | 2388 | |
b6c08380 GT |
2389 | ret = __mptcp_init_sock(sk); |
2390 | if (ret) | |
2391 | return ret; | |
2392 | ||
fc518953 FW |
2393 | if (!mptcp_is_enabled(net)) |
2394 | return -ENOPROTOOPT; | |
2395 | ||
2396 | if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) | |
2397 | return -ENOMEM; | |
2398 | ||
fa68018d PA |
2399 | ret = __mptcp_socket_create(mptcp_sk(sk)); |
2400 | if (ret) | |
2401 | return ret; | |
2402 | ||
d027236c | 2403 | sk_sockets_allocated_inc(sk); |
a6b118fe | 2404 | sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; |
da51aef5 | 2405 | sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; |
d027236c | 2406 | |
18b683bf PA |
2407 | return 0; |
2408 | } | |
2409 | ||
2410 | static void __mptcp_clear_xmit(struct sock *sk) | |
2411 | { | |
2412 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2413 | struct mptcp_data_frag *dtmp, *dfrag; | |
724cfd2e | 2414 | struct sk_buff *skb; |
18b683bf | 2415 | |
d9ca1de8 | 2416 | WRITE_ONCE(msk->first_pending, NULL); |
18b683bf | 2417 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) |
d027236c | 2418 | dfrag_clear(sk, dfrag); |
724cfd2e PA |
2419 | while ((skb = __skb_dequeue(&msk->skb_tx_cache)) != NULL) { |
2420 | sk->sk_forward_alloc += skb->truesize; | |
2421 | kfree_skb(skb); | |
2422 | } | |
784325e9 MB |
2423 | } |
2424 | ||
80992017 PA |
2425 | static void mptcp_cancel_work(struct sock *sk) |
2426 | { | |
2427 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2428 | ||
b2771d24 | 2429 | if (cancel_work_sync(&msk->work)) |
e16163b6 | 2430 | __sock_put(sk); |
80992017 PA |
2431 | } |
2432 | ||
d0876b22 | 2433 | void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) |
21498490 PK |
2434 | { |
2435 | lock_sock(ssk); | |
2436 | ||
2437 | switch (ssk->sk_state) { | |
2438 | case TCP_LISTEN: | |
2439 | if (!(how & RCV_SHUTDOWN)) | |
2440 | break; | |
df561f66 | 2441 | fallthrough; |
21498490 PK |
2442 | case TCP_SYN_SENT: |
2443 | tcp_disconnect(ssk, O_NONBLOCK); | |
2444 | break; | |
2445 | default: | |
43b54c6e MM |
2446 | if (__mptcp_check_fallback(mptcp_sk(sk))) { |
2447 | pr_debug("Fallback"); | |
2448 | ssk->sk_shutdown |= how; | |
2449 | tcp_shutdown(ssk, how); | |
2450 | } else { | |
2451 | pr_debug("Sending DATA_FIN on subflow %p", ssk); | |
2452 | mptcp_set_timeout(sk, ssk); | |
2453 | tcp_send_ack(ssk); | |
2454 | } | |
21498490 PK |
2455 | break; |
2456 | } | |
2457 | ||
21498490 PK |
2458 | release_sock(ssk); |
2459 | } | |
2460 | ||
6920b851 MM |
2461 | static const unsigned char new_state[16] = { |
2462 | /* current state: new state: action: */ | |
2463 | [0 /* (Invalid) */] = TCP_CLOSE, | |
2464 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
2465 | [TCP_SYN_SENT] = TCP_CLOSE, | |
2466 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
2467 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, | |
2468 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, | |
2469 | [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ | |
2470 | [TCP_CLOSE] = TCP_CLOSE, | |
2471 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, | |
2472 | [TCP_LAST_ACK] = TCP_LAST_ACK, | |
2473 | [TCP_LISTEN] = TCP_CLOSE, | |
2474 | [TCP_CLOSING] = TCP_CLOSING, | |
2475 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ | |
2476 | }; | |
2477 | ||
2478 | static int mptcp_close_state(struct sock *sk) | |
2479 | { | |
2480 | int next = (int)new_state[sk->sk_state]; | |
2481 | int ns = next & TCP_STATE_MASK; | |
2482 | ||
2483 | inet_sk_state_store(sk, ns); | |
2484 | ||
2485 | return next & TCP_ACTION_FIN; | |
2486 | } | |
2487 | ||
e16163b6 | 2488 | static void __mptcp_check_send_data_fin(struct sock *sk) |
f870fa0b | 2489 | { |
e16163b6 | 2490 | struct mptcp_subflow_context *subflow; |
f870fa0b MM |
2491 | struct mptcp_sock *msk = mptcp_sk(sk); |
2492 | ||
e16163b6 PA |
2493 | pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu", |
2494 | msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), | |
2495 | msk->snd_nxt, msk->write_seq); | |
43b54c6e | 2496 | |
e16163b6 PA |
2497 | /* we still need to enqueue subflows or not really shutting down, |
2498 | * skip this | |
2499 | */ | |
2500 | if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || | |
2501 | mptcp_send_head(sk)) | |
2502 | return; | |
2503 | ||
2504 | WRITE_ONCE(msk->snd_nxt, msk->write_seq); | |
2505 | ||
26aa2314 PA |
2506 | /* fallback socket will not get data_fin/ack, can move to the next |
2507 | * state now | |
2508 | */ | |
2509 | if (__mptcp_check_fallback(msk)) { | |
2510 | if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) { | |
2511 | inet_sk_state_store(sk, TCP_CLOSE); | |
2512 | mptcp_close_wake_up(sk); | |
2513 | } else if (sk->sk_state == TCP_FIN_WAIT1) { | |
2514 | inet_sk_state_store(sk, TCP_FIN_WAIT2); | |
2515 | } | |
43b54c6e MM |
2516 | } |
2517 | ||
e16163b6 PA |
2518 | __mptcp_flush_join_list(msk); |
2519 | mptcp_for_each_subflow(msk, subflow) { | |
2520 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
43b54c6e | 2521 | |
e16163b6 | 2522 | mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN); |
43b54c6e | 2523 | } |
e16163b6 | 2524 | } |
2c22c06c | 2525 | |
e16163b6 PA |
2526 | static void __mptcp_wr_shutdown(struct sock *sk) |
2527 | { | |
2528 | struct mptcp_sock *msk = mptcp_sk(sk); | |
43b54c6e | 2529 | |
e16163b6 PA |
2530 | pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d", |
2531 | msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, | |
2532 | !!mptcp_send_head(sk)); | |
2533 | ||
2534 | /* will be ignored by fallback sockets */ | |
2535 | WRITE_ONCE(msk->write_seq, msk->write_seq + 1); | |
2536 | WRITE_ONCE(msk->snd_data_fin_enable, 1); | |
2537 | ||
2538 | __mptcp_check_send_data_fin(sk); | |
2539 | } | |
2540 | ||
2541 | static void __mptcp_destroy_sock(struct sock *sk) | |
2542 | { | |
2543 | struct mptcp_subflow_context *subflow, *tmp; | |
2544 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2545 | LIST_HEAD(conn_list); | |
2546 | ||
2547 | pr_debug("msk=%p", msk); | |
f870fa0b | 2548 | |
51b25e09 FW |
2549 | might_sleep(); |
2550 | ||
10f6d46c PA |
2551 | /* be sure to always acquire the join list lock, to sync vs |
2552 | * mptcp_finish_join(). | |
2553 | */ | |
2554 | spin_lock_bh(&msk->join_list_lock); | |
2555 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
2556 | spin_unlock_bh(&msk->join_list_lock); | |
b2c5b614 FW |
2557 | list_splice_init(&msk->conn_list, &conn_list); |
2558 | ||
6e628cd3 | 2559 | sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer); |
e16163b6 PA |
2560 | sk_stop_timer(sk, &sk->sk_timer); |
2561 | msk->pm.status = 0; | |
b2c5b614 FW |
2562 | |
2563 | list_for_each_entry_safe(subflow, tmp, &conn_list, node) { | |
cec37a6e | 2564 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
e16163b6 | 2565 | __mptcp_close_ssk(sk, ssk, subflow); |
f870fa0b MM |
2566 | } |
2567 | ||
e16163b6 | 2568 | sk->sk_prot->destroy(sk); |
80992017 | 2569 | |
e93da928 | 2570 | WARN_ON_ONCE(msk->wmem_reserved); |
87952603 | 2571 | WARN_ON_ONCE(msk->rmem_released); |
e16163b6 PA |
2572 | sk_stream_kill_queues(sk); |
2573 | xfrm_sk_free_policy(sk); | |
2574 | sk_refcnt_debug_release(sk); | |
ba30dc32 | 2575 | mptcp_dispose_initial_subflow(msk); |
e16163b6 PA |
2576 | sock_put(sk); |
2577 | } | |
2578 | ||
2579 | static void mptcp_close(struct sock *sk, long timeout) | |
2580 | { | |
2581 | struct mptcp_subflow_context *subflow; | |
2582 | bool do_cancel_work = false; | |
2583 | ||
2584 | lock_sock(sk); | |
2585 | sk->sk_shutdown = SHUTDOWN_MASK; | |
2586 | ||
2587 | if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { | |
2588 | inet_sk_state_store(sk, TCP_CLOSE); | |
2589 | goto cleanup; | |
2590 | } | |
6771bfd9 | 2591 | |
e16163b6 PA |
2592 | if (mptcp_close_state(sk)) |
2593 | __mptcp_wr_shutdown(sk); | |
2594 | ||
2595 | sk_stream_wait_close(sk, timeout); | |
2596 | ||
2597 | cleanup: | |
2598 | /* orphan all the subflows */ | |
2599 | inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32; | |
2600 | list_for_each_entry(subflow, &mptcp_sk(sk)->conn_list, node) { | |
2601 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
133f0169 | 2602 | bool slow = lock_sock_fast(ssk); |
e16163b6 | 2603 | |
e16163b6 PA |
2604 | sock_orphan(ssk); |
2605 | unlock_sock_fast(ssk, slow); | |
e16163b6 PA |
2606 | } |
2607 | sock_orphan(sk); | |
2608 | ||
2609 | sock_hold(sk); | |
2610 | pr_debug("msk=%p state=%d", sk, sk->sk_state); | |
2611 | if (sk->sk_state == TCP_CLOSE) { | |
2612 | __mptcp_destroy_sock(sk); | |
2613 | do_cancel_work = true; | |
2614 | } else { | |
2615 | sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN); | |
2616 | } | |
2617 | release_sock(sk); | |
2618 | if (do_cancel_work) | |
2619 | mptcp_cancel_work(sk); | |
2620 | sock_put(sk); | |
f870fa0b MM |
2621 | } |
2622 | ||
cf7da0d6 PK |
2623 | static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) |
2624 | { | |
2625 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
2626 | const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); | |
2627 | struct ipv6_pinfo *msk6 = inet6_sk(msk); | |
2628 | ||
2629 | msk->sk_v6_daddr = ssk->sk_v6_daddr; | |
2630 | msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; | |
2631 | ||
2632 | if (msk6 && ssk6) { | |
2633 | msk6->saddr = ssk6->saddr; | |
2634 | msk6->flow_label = ssk6->flow_label; | |
2635 | } | |
2636 | #endif | |
2637 | ||
2638 | inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; | |
2639 | inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; | |
2640 | inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; | |
2641 | inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; | |
2642 | inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; | |
2643 | inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; | |
2644 | } | |
2645 | ||
18b683bf PA |
2646 | static int mptcp_disconnect(struct sock *sk, int flags) |
2647 | { | |
76e2a55d PA |
2648 | struct mptcp_subflow_context *subflow; |
2649 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2650 | ||
2651 | __mptcp_flush_join_list(msk); | |
13a9499e PA |
2652 | mptcp_for_each_subflow(msk, subflow) { |
2653 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
2654 | ||
2655 | lock_sock(ssk); | |
2656 | tcp_disconnect(ssk, flags); | |
2657 | release_sock(ssk); | |
2658 | } | |
42c556fe | 2659 | return 0; |
18b683bf PA |
2660 | } |
2661 | ||
b0519de8 FW |
2662 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
2663 | static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) | |
2664 | { | |
2665 | unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); | |
2666 | ||
2667 | return (struct ipv6_pinfo *)(((u8 *)sk) + offset); | |
2668 | } | |
2669 | #endif | |
2670 | ||
fca5c82c | 2671 | struct sock *mptcp_sk_clone(const struct sock *sk, |
cfde141e | 2672 | const struct mptcp_options_received *mp_opt, |
fca5c82c | 2673 | struct request_sock *req) |
b0519de8 | 2674 | { |
58b09919 | 2675 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); |
b0519de8 | 2676 | struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); |
58b09919 PA |
2677 | struct mptcp_sock *msk; |
2678 | u64 ack_seq; | |
b0519de8 FW |
2679 | |
2680 | if (!nsk) | |
2681 | return NULL; | |
2682 | ||
2683 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
2684 | if (nsk->sk_family == AF_INET6) | |
2685 | inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); | |
2686 | #endif | |
2687 | ||
58b09919 PA |
2688 | __mptcp_init_sock(nsk); |
2689 | ||
2690 | msk = mptcp_sk(nsk); | |
2691 | msk->local_key = subflow_req->local_key; | |
2692 | msk->token = subflow_req->token; | |
2693 | msk->subflow = NULL; | |
b93df08c | 2694 | WRITE_ONCE(msk->fully_established, false); |
58b09919 | 2695 | |
58b09919 | 2696 | msk->write_seq = subflow_req->idsn + 1; |
eaa2ffab | 2697 | msk->snd_nxt = msk->write_seq; |
7439d687 PA |
2698 | msk->snd_una = msk->write_seq; |
2699 | msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd; | |
6f8a612a | 2700 | |
cfde141e | 2701 | if (mp_opt->mp_capable) { |
58b09919 | 2702 | msk->can_ack = true; |
cfde141e | 2703 | msk->remote_key = mp_opt->sndr_key; |
58b09919 PA |
2704 | mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); |
2705 | ack_seq++; | |
917944da | 2706 | WRITE_ONCE(msk->ack_seq, ack_seq); |
fa3fe2b1 | 2707 | WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); |
58b09919 | 2708 | } |
7f20d5fc | 2709 | |
5e20087d | 2710 | sock_reset_flag(nsk, SOCK_RCU_FREE); |
7f20d5fc PA |
2711 | /* will be fully established after successful MPC subflow creation */ |
2712 | inet_sk_state_store(nsk, TCP_SYN_RECV); | |
0c148460 PA |
2713 | |
2714 | security_inet_csk_clone(nsk, req); | |
58b09919 PA |
2715 | bh_unlock_sock(nsk); |
2716 | ||
2717 | /* keep a single reference */ | |
2718 | __sock_put(nsk); | |
b0519de8 FW |
2719 | return nsk; |
2720 | } | |
2721 | ||
a6b118fe FW |
2722 | void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) |
2723 | { | |
2724 | const struct tcp_sock *tp = tcp_sk(ssk); | |
2725 | ||
2726 | msk->rcvq_space.copied = 0; | |
2727 | msk->rcvq_space.rtt_us = 0; | |
2728 | ||
2729 | msk->rcvq_space.time = tp->tcp_mstamp; | |
2730 | ||
2731 | /* initial rcv_space offering made to peer */ | |
2732 | msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, | |
2733 | TCP_INIT_CWND * tp->advmss); | |
2734 | if (msk->rcvq_space.space == 0) | |
2735 | msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; | |
6f8a612a | 2736 | |
7439d687 | 2737 | WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); |
a6b118fe FW |
2738 | } |
2739 | ||
cf7da0d6 PK |
2740 | static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, |
2741 | bool kern) | |
2742 | { | |
2743 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2744 | struct socket *listener; | |
2745 | struct sock *newsk; | |
2746 | ||
2747 | listener = __mptcp_nmpc_socket(msk); | |
2748 | if (WARN_ON_ONCE(!listener)) { | |
2749 | *err = -EINVAL; | |
2750 | return NULL; | |
2751 | } | |
2752 | ||
2753 | pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); | |
2754 | newsk = inet_csk_accept(listener->sk, flags, err, kern); | |
2755 | if (!newsk) | |
2756 | return NULL; | |
2757 | ||
2758 | pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); | |
cf7da0d6 PK |
2759 | if (sk_is_mptcp(newsk)) { |
2760 | struct mptcp_subflow_context *subflow; | |
2761 | struct sock *new_mptcp_sock; | |
cf7da0d6 PK |
2762 | |
2763 | subflow = mptcp_subflow_ctx(newsk); | |
58b09919 | 2764 | new_mptcp_sock = subflow->conn; |
cf7da0d6 | 2765 | |
58b09919 PA |
2766 | /* is_mptcp should be false if subflow->conn is missing, see |
2767 | * subflow_syn_recv_sock() | |
2768 | */ | |
2769 | if (WARN_ON_ONCE(!new_mptcp_sock)) { | |
2770 | tcp_sk(newsk)->is_mptcp = 0; | |
2771 | return newsk; | |
cf7da0d6 PK |
2772 | } |
2773 | ||
58b09919 PA |
2774 | /* acquire the 2nd reference for the owning socket */ |
2775 | sock_hold(new_mptcp_sock); | |
cf7da0d6 | 2776 | newsk = new_mptcp_sock; |
0397c6d8 | 2777 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); |
fc518953 FW |
2778 | } else { |
2779 | MPTCP_INC_STATS(sock_net(sk), | |
2780 | MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); | |
cf7da0d6 PK |
2781 | } |
2782 | ||
2783 | return newsk; | |
2784 | } | |
2785 | ||
5c8c1640 GT |
2786 | void mptcp_destroy_common(struct mptcp_sock *msk) |
2787 | { | |
87952603 PA |
2788 | struct sock *sk = (struct sock *)msk; |
2789 | ||
6e628cd3 PA |
2790 | __mptcp_clear_xmit(sk); |
2791 | ||
87952603 PA |
2792 | /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ |
2793 | skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); | |
2794 | ||
5c8c1640 GT |
2795 | skb_rbtree_purge(&msk->out_of_order_queue); |
2796 | mptcp_token_destroy(msk); | |
2797 | mptcp_pm_free_anno_list(msk); | |
2798 | } | |
2799 | ||
79c0949e PK |
2800 | static void mptcp_destroy(struct sock *sk) |
2801 | { | |
c9fd9c5f FW |
2802 | struct mptcp_sock *msk = mptcp_sk(sk); |
2803 | ||
5c8c1640 | 2804 | mptcp_destroy_common(msk); |
d027236c | 2805 | sk_sockets_allocated_dec(sk); |
79c0949e PK |
2806 | } |
2807 | ||
fd1452d8 | 2808 | static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, |
a7b75c5a | 2809 | sockptr_t optval, unsigned int optlen) |
fd1452d8 FW |
2810 | { |
2811 | struct sock *sk = (struct sock *)msk; | |
2812 | struct socket *ssock; | |
2813 | int ret; | |
2814 | ||
2815 | switch (optname) { | |
2816 | case SO_REUSEPORT: | |
2817 | case SO_REUSEADDR: | |
2818 | lock_sock(sk); | |
2819 | ssock = __mptcp_nmpc_socket(msk); | |
2820 | if (!ssock) { | |
2821 | release_sock(sk); | |
2822 | return -EINVAL; | |
2823 | } | |
2824 | ||
a7b75c5a | 2825 | ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
2826 | if (ret == 0) { |
2827 | if (optname == SO_REUSEPORT) | |
2828 | sk->sk_reuseport = ssock->sk->sk_reuseport; | |
2829 | else if (optname == SO_REUSEADDR) | |
2830 | sk->sk_reuse = ssock->sk->sk_reuse; | |
2831 | } | |
2832 | release_sock(sk); | |
2833 | return ret; | |
2834 | } | |
2835 | ||
a7b75c5a | 2836 | return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
2837 | } |
2838 | ||
c9b95a13 | 2839 | static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, |
a7b75c5a | 2840 | sockptr_t optval, unsigned int optlen) |
c9b95a13 FW |
2841 | { |
2842 | struct sock *sk = (struct sock *)msk; | |
2843 | int ret = -EOPNOTSUPP; | |
2844 | struct socket *ssock; | |
2845 | ||
2846 | switch (optname) { | |
2847 | case IPV6_V6ONLY: | |
2848 | lock_sock(sk); | |
2849 | ssock = __mptcp_nmpc_socket(msk); | |
2850 | if (!ssock) { | |
2851 | release_sock(sk); | |
2852 | return -EINVAL; | |
2853 | } | |
2854 | ||
2855 | ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); | |
2856 | if (ret == 0) | |
2857 | sk->sk_ipv6only = ssock->sk->sk_ipv6only; | |
2858 | ||
2859 | release_sock(sk); | |
2860 | break; | |
2861 | } | |
2862 | ||
2863 | return ret; | |
2864 | } | |
2865 | ||
717e79c8 | 2866 | static int mptcp_setsockopt(struct sock *sk, int level, int optname, |
a7b75c5a | 2867 | sockptr_t optval, unsigned int optlen) |
717e79c8 PK |
2868 | { |
2869 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 2870 | struct sock *ssk; |
717e79c8 PK |
2871 | |
2872 | pr_debug("msk=%p", msk); | |
2873 | ||
83f0c10b | 2874 | if (level == SOL_SOCKET) |
fd1452d8 | 2875 | return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); |
83f0c10b | 2876 | |
717e79c8 | 2877 | /* @@ the meaning of setsockopt() when the socket is connected and |
b6e4a1ae MM |
2878 | * there are multiple subflows is not yet defined. It is up to the |
2879 | * MPTCP-level socket to configure the subflows until the subflow | |
2880 | * is in TCP fallback, when TCP socket options are passed through | |
2881 | * to the one remaining subflow. | |
717e79c8 PK |
2882 | */ |
2883 | lock_sock(sk); | |
76660afb | 2884 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 2885 | release_sock(sk); |
76660afb PA |
2886 | if (ssk) |
2887 | return tcp_setsockopt(ssk, level, optname, optval, optlen); | |
50e741bb | 2888 | |
c9b95a13 FW |
2889 | if (level == SOL_IPV6) |
2890 | return mptcp_setsockopt_v6(msk, optname, optval, optlen); | |
2891 | ||
b6e4a1ae | 2892 | return -EOPNOTSUPP; |
717e79c8 PK |
2893 | } |
2894 | ||
2895 | static int mptcp_getsockopt(struct sock *sk, int level, int optname, | |
50e741bb | 2896 | char __user *optval, int __user *option) |
717e79c8 PK |
2897 | { |
2898 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 2899 | struct sock *ssk; |
717e79c8 PK |
2900 | |
2901 | pr_debug("msk=%p", msk); | |
2902 | ||
b6e4a1ae MM |
2903 | /* @@ the meaning of setsockopt() when the socket is connected and |
2904 | * there are multiple subflows is not yet defined. It is up to the | |
2905 | * MPTCP-level socket to configure the subflows until the subflow | |
2906 | * is in TCP fallback, when socket options are passed through | |
2907 | * to the one remaining subflow. | |
717e79c8 PK |
2908 | */ |
2909 | lock_sock(sk); | |
76660afb | 2910 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 2911 | release_sock(sk); |
76660afb PA |
2912 | if (ssk) |
2913 | return tcp_getsockopt(ssk, level, optname, optval, option); | |
50e741bb | 2914 | |
b6e4a1ae | 2915 | return -EOPNOTSUPP; |
717e79c8 PK |
2916 | } |
2917 | ||
6e628cd3 PA |
2918 | void __mptcp_data_acked(struct sock *sk) |
2919 | { | |
2920 | if (!sock_owned_by_user(sk)) | |
2921 | __mptcp_clean_una(sk); | |
2922 | else | |
2923 | set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags); | |
2924 | ||
2925 | if (mptcp_pending_data_fin_ack(sk)) | |
2926 | mptcp_schedule_work(sk); | |
2927 | } | |
2928 | ||
219d0499 | 2929 | void __mptcp_check_push(struct sock *sk, struct sock *ssk) |
6e628cd3 PA |
2930 | { |
2931 | if (!mptcp_send_head(sk)) | |
2932 | return; | |
2933 | ||
2934 | if (!sock_owned_by_user(sk)) | |
2935 | __mptcp_subflow_push_pending(sk, ssk); | |
2936 | else | |
2937 | set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); | |
2938 | } | |
2939 | ||
ea4ca586 | 2940 | #define MPTCP_DEFERRED_ALL (TCPF_WRITE_TIMER_DEFERRED) |
14c441b5 | 2941 | |
e93da928 | 2942 | /* processes deferred events and flush wmem */ |
14c441b5 PA |
2943 | static void mptcp_release_cb(struct sock *sk) |
2944 | { | |
2945 | unsigned long flags, nflags; | |
2946 | ||
3f8f491c PA |
2947 | for (;;) { |
2948 | flags = 0; | |
2949 | if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) | |
2950 | flags |= MPTCP_PUSH_PENDING; | |
2951 | if (!flags) | |
2952 | break; | |
2953 | ||
2954 | /* the following actions acquire the subflow socket lock | |
6e628cd3 PA |
2955 | * |
2956 | * 1) can't be invoked in atomic scope | |
2957 | * 2) must avoid ABBA deadlock with msk socket spinlock: the RX | |
2958 | * datapath acquires the msk socket spinlock while helding | |
2959 | * the subflow socket lock | |
2960 | */ | |
2961 | ||
2962 | spin_unlock_bh(&sk->sk_lock.slock); | |
3f8f491c PA |
2963 | if (flags & MPTCP_PUSH_PENDING) |
2964 | __mptcp_push_pending(sk, 0); | |
2965 | ||
2966 | cond_resched(); | |
6e628cd3 PA |
2967 | spin_lock_bh(&sk->sk_lock.slock); |
2968 | } | |
3f8f491c PA |
2969 | |
2970 | if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags)) | |
2971 | __mptcp_clean_una(sk); | |
a5b31c47 PA |
2972 | if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags)) |
2973 | __mptcp_error_report(sk); | |
6e628cd3 | 2974 | |
3f8f491c PA |
2975 | /* push_pending may touch wmem_reserved, ensure we do the cleanup |
2976 | * later | |
2977 | */ | |
e93da928 | 2978 | __mptcp_update_wmem(sk); |
87952603 | 2979 | __mptcp_update_rmem(sk); |
e93da928 | 2980 | |
14c441b5 PA |
2981 | do { |
2982 | flags = sk->sk_tsq_flags; | |
2983 | if (!(flags & MPTCP_DEFERRED_ALL)) | |
2984 | return; | |
2985 | nflags = flags & ~MPTCP_DEFERRED_ALL; | |
2986 | } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); | |
2987 | ||
b51f9b80 PA |
2988 | sock_release_ownership(sk); |
2989 | ||
b51f9b80 PA |
2990 | if (flags & TCPF_WRITE_TIMER_DEFERRED) { |
2991 | mptcp_retransmit_handler(sk); | |
2992 | __sock_put(sk); | |
2993 | } | |
14c441b5 PA |
2994 | } |
2995 | ||
2c5ebd00 PA |
2996 | static int mptcp_hash(struct sock *sk) |
2997 | { | |
2998 | /* should never be called, | |
2999 | * we hash the TCP subflows not the master socket | |
3000 | */ | |
3001 | WARN_ON_ONCE(1); | |
3002 | return 0; | |
3003 | } | |
3004 | ||
3005 | static void mptcp_unhash(struct sock *sk) | |
3006 | { | |
3007 | /* called from sk_common_release(), but nothing to do here */ | |
3008 | } | |
3009 | ||
cec37a6e | 3010 | static int mptcp_get_port(struct sock *sk, unsigned short snum) |
f870fa0b MM |
3011 | { |
3012 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 3013 | struct socket *ssock; |
f870fa0b | 3014 | |
cec37a6e PK |
3015 | ssock = __mptcp_nmpc_socket(msk); |
3016 | pr_debug("msk=%p, subflow=%p", msk, ssock); | |
3017 | if (WARN_ON_ONCE(!ssock)) | |
3018 | return -EINVAL; | |
f870fa0b | 3019 | |
cec37a6e PK |
3020 | return inet_csk_get_port(ssock->sk, snum); |
3021 | } | |
f870fa0b | 3022 | |
cec37a6e PK |
3023 | void mptcp_finish_connect(struct sock *ssk) |
3024 | { | |
3025 | struct mptcp_subflow_context *subflow; | |
3026 | struct mptcp_sock *msk; | |
3027 | struct sock *sk; | |
6d0060f6 | 3028 | u64 ack_seq; |
f870fa0b | 3029 | |
cec37a6e | 3030 | subflow = mptcp_subflow_ctx(ssk); |
cec37a6e PK |
3031 | sk = subflow->conn; |
3032 | msk = mptcp_sk(sk); | |
3033 | ||
648ef4b8 MM |
3034 | pr_debug("msk=%p, token=%u", sk, subflow->token); |
3035 | ||
6d0060f6 MM |
3036 | mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); |
3037 | ack_seq++; | |
648ef4b8 MM |
3038 | subflow->map_seq = ack_seq; |
3039 | subflow->map_subflow_seq = 1; | |
6d0060f6 | 3040 | |
cec37a6e PK |
3041 | /* the socket is not connected yet, no msk/subflow ops can access/race |
3042 | * accessing the field below | |
3043 | */ | |
3044 | WRITE_ONCE(msk->remote_key, subflow->remote_key); | |
3045 | WRITE_ONCE(msk->local_key, subflow->local_key); | |
6d0060f6 | 3046 | WRITE_ONCE(msk->write_seq, subflow->idsn + 1); |
eaa2ffab | 3047 | WRITE_ONCE(msk->snd_nxt, msk->write_seq); |
6d0060f6 | 3048 | WRITE_ONCE(msk->ack_seq, ack_seq); |
fa3fe2b1 | 3049 | WRITE_ONCE(msk->rcv_wnd_sent, ack_seq); |
d22f4988 | 3050 | WRITE_ONCE(msk->can_ack, 1); |
7439d687 | 3051 | WRITE_ONCE(msk->snd_una, msk->write_seq); |
1b1c7a0e PK |
3052 | |
3053 | mptcp_pm_new_connection(msk, 0); | |
a6b118fe FW |
3054 | |
3055 | mptcp_rcv_space_init(msk, ssk); | |
f870fa0b MM |
3056 | } |
3057 | ||
133f0169 | 3058 | void mptcp_sock_graft(struct sock *sk, struct socket *parent) |
cf7da0d6 PK |
3059 | { |
3060 | write_lock_bh(&sk->sk_callback_lock); | |
3061 | rcu_assign_pointer(sk->sk_wq, &parent->wq); | |
3062 | sk_set_socket(sk, parent); | |
3063 | sk->sk_uid = SOCK_INODE(parent)->i_uid; | |
3064 | write_unlock_bh(&sk->sk_callback_lock); | |
3065 | } | |
3066 | ||
e16163b6 | 3067 | bool mptcp_finish_join(struct sock *ssk) |
f296234c | 3068 | { |
e16163b6 | 3069 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
f296234c PK |
3070 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
3071 | struct sock *parent = (void *)msk; | |
3072 | struct socket *parent_sock; | |
ec3edaa7 | 3073 | bool ret; |
f296234c PK |
3074 | |
3075 | pr_debug("msk=%p, subflow=%p", msk, subflow); | |
3076 | ||
3077 | /* mptcp socket already closing? */ | |
b93df08c | 3078 | if (!mptcp_is_fully_established(parent)) |
f296234c PK |
3079 | return false; |
3080 | ||
3081 | if (!msk->pm.server_side) | |
3082 | return true; | |
3083 | ||
10f6d46c PA |
3084 | if (!mptcp_pm_allow_new_subflow(msk)) |
3085 | return false; | |
3086 | ||
3087 | /* active connections are already on conn_list, and we can't acquire | |
3088 | * msk lock here. | |
3089 | * use the join list lock as synchronization point and double-check | |
e16163b6 | 3090 | * msk status to avoid racing with __mptcp_destroy_sock() |
10f6d46c PA |
3091 | */ |
3092 | spin_lock_bh(&msk->join_list_lock); | |
3093 | ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; | |
e16163b6 | 3094 | if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) { |
10f6d46c | 3095 | list_add_tail(&subflow->node, &msk->join_list); |
e16163b6 PA |
3096 | sock_hold(ssk); |
3097 | } | |
10f6d46c PA |
3098 | spin_unlock_bh(&msk->join_list_lock); |
3099 | if (!ret) | |
3100 | return false; | |
3101 | ||
3102 | /* attach to msk socket only after we are sure he will deal with us | |
3103 | * at close time | |
3104 | */ | |
f296234c | 3105 | parent_sock = READ_ONCE(parent->sk_socket); |
e16163b6 PA |
3106 | if (parent_sock && !ssk->sk_socket) |
3107 | mptcp_sock_graft(ssk, parent_sock); | |
917944da | 3108 | subflow->map_seq = READ_ONCE(msk->ack_seq); |
10f6d46c | 3109 | return true; |
f296234c PK |
3110 | } |
3111 | ||
76e2a55d PA |
3112 | static void mptcp_shutdown(struct sock *sk, int how) |
3113 | { | |
3114 | pr_debug("sk=%p, how=%d", sk, how); | |
3115 | ||
3116 | if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) | |
3117 | __mptcp_wr_shutdown(sk); | |
3118 | } | |
3119 | ||
f870fa0b MM |
3120 | static struct proto mptcp_prot = { |
3121 | .name = "MPTCP", | |
3122 | .owner = THIS_MODULE, | |
3123 | .init = mptcp_init_sock, | |
18b683bf | 3124 | .disconnect = mptcp_disconnect, |
f870fa0b | 3125 | .close = mptcp_close, |
cf7da0d6 | 3126 | .accept = mptcp_accept, |
717e79c8 PK |
3127 | .setsockopt = mptcp_setsockopt, |
3128 | .getsockopt = mptcp_getsockopt, | |
76e2a55d | 3129 | .shutdown = mptcp_shutdown, |
79c0949e | 3130 | .destroy = mptcp_destroy, |
f870fa0b MM |
3131 | .sendmsg = mptcp_sendmsg, |
3132 | .recvmsg = mptcp_recvmsg, | |
14c441b5 | 3133 | .release_cb = mptcp_release_cb, |
2c5ebd00 PA |
3134 | .hash = mptcp_hash, |
3135 | .unhash = mptcp_unhash, | |
cec37a6e | 3136 | .get_port = mptcp_get_port, |
d027236c PA |
3137 | .sockets_allocated = &mptcp_sockets_allocated, |
3138 | .memory_allocated = &tcp_memory_allocated, | |
3139 | .memory_pressure = &tcp_memory_pressure, | |
d027236c | 3140 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), |
989ef49b | 3141 | .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), |
d027236c | 3142 | .sysctl_mem = sysctl_tcp_mem, |
f870fa0b | 3143 | .obj_size = sizeof(struct mptcp_sock), |
2c5ebd00 | 3144 | .slab_flags = SLAB_TYPESAFE_BY_RCU, |
f870fa0b MM |
3145 | .no_autobind = true, |
3146 | }; | |
3147 | ||
2303f994 PK |
3148 | static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
3149 | { | |
3150 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
3151 | struct socket *ssock; | |
cf7da0d6 | 3152 | int err; |
2303f994 PK |
3153 | |
3154 | lock_sock(sock->sk); | |
fa68018d PA |
3155 | ssock = __mptcp_nmpc_socket(msk); |
3156 | if (!ssock) { | |
3157 | err = -EINVAL; | |
2303f994 PK |
3158 | goto unlock; |
3159 | } | |
3160 | ||
3161 | err = ssock->ops->bind(ssock, uaddr, addr_len); | |
cf7da0d6 PK |
3162 | if (!err) |
3163 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2303f994 PK |
3164 | |
3165 | unlock: | |
3166 | release_sock(sock->sk); | |
3167 | return err; | |
3168 | } | |
3169 | ||
0235d075 PA |
3170 | static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, |
3171 | struct mptcp_subflow_context *subflow) | |
3172 | { | |
3173 | subflow->request_mptcp = 0; | |
3174 | __mptcp_do_fallback(msk); | |
3175 | } | |
3176 | ||
2303f994 PK |
3177 | static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
3178 | int addr_len, int flags) | |
3179 | { | |
3180 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2c5ebd00 | 3181 | struct mptcp_subflow_context *subflow; |
2303f994 PK |
3182 | struct socket *ssock; |
3183 | int err; | |
3184 | ||
3185 | lock_sock(sock->sk); | |
41be81a8 PA |
3186 | if (sock->state != SS_UNCONNECTED && msk->subflow) { |
3187 | /* pending connection or invalid state, let existing subflow | |
3188 | * cope with that | |
3189 | */ | |
3190 | ssock = msk->subflow; | |
3191 | goto do_connect; | |
3192 | } | |
3193 | ||
fa68018d PA |
3194 | ssock = __mptcp_nmpc_socket(msk); |
3195 | if (!ssock) { | |
3196 | err = -EINVAL; | |
2303f994 PK |
3197 | goto unlock; |
3198 | } | |
3199 | ||
fa68018d PA |
3200 | mptcp_token_destroy(msk); |
3201 | inet_sk_state_store(sock->sk, TCP_SYN_SENT); | |
2c5ebd00 | 3202 | subflow = mptcp_subflow_ctx(ssock->sk); |
cf7da0d6 PK |
3203 | #ifdef CONFIG_TCP_MD5SIG |
3204 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of | |
3205 | * TCP option space. | |
3206 | */ | |
3207 | if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) | |
0235d075 | 3208 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 3209 | #endif |
2c5ebd00 | 3210 | if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) |
0235d075 | 3211 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 3212 | |
41be81a8 | 3213 | do_connect: |
2303f994 | 3214 | err = ssock->ops->connect(ssock, uaddr, addr_len, flags); |
41be81a8 PA |
3215 | sock->state = ssock->state; |
3216 | ||
3217 | /* on successful connect, the msk state will be moved to established by | |
3218 | * subflow_finish_connect() | |
3219 | */ | |
367fe04e | 3220 | if (!err || err == -EINPROGRESS) |
41be81a8 PA |
3221 | mptcp_copy_inaddrs(sock->sk, ssock->sk); |
3222 | else | |
3223 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2303f994 PK |
3224 | |
3225 | unlock: | |
3226 | release_sock(sock->sk); | |
3227 | return err; | |
3228 | } | |
3229 | ||
cf7da0d6 PK |
3230 | static int mptcp_listen(struct socket *sock, int backlog) |
3231 | { | |
3232 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
3233 | struct socket *ssock; | |
3234 | int err; | |
3235 | ||
3236 | pr_debug("msk=%p", msk); | |
3237 | ||
3238 | lock_sock(sock->sk); | |
fa68018d PA |
3239 | ssock = __mptcp_nmpc_socket(msk); |
3240 | if (!ssock) { | |
3241 | err = -EINVAL; | |
cf7da0d6 PK |
3242 | goto unlock; |
3243 | } | |
3244 | ||
fa68018d PA |
3245 | mptcp_token_destroy(msk); |
3246 | inet_sk_state_store(sock->sk, TCP_LISTEN); | |
5e20087d FW |
3247 | sock_set_flag(sock->sk, SOCK_RCU_FREE); |
3248 | ||
cf7da0d6 PK |
3249 | err = ssock->ops->listen(ssock, backlog); |
3250 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
3251 | if (!err) | |
3252 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
3253 | ||
3254 | unlock: | |
3255 | release_sock(sock->sk); | |
3256 | return err; | |
3257 | } | |
3258 | ||
cf7da0d6 PK |
3259 | static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, |
3260 | int flags, bool kern) | |
3261 | { | |
3262 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
3263 | struct socket *ssock; | |
3264 | int err; | |
3265 | ||
3266 | pr_debug("msk=%p", msk); | |
3267 | ||
3268 | lock_sock(sock->sk); | |
3269 | if (sock->sk->sk_state != TCP_LISTEN) | |
3270 | goto unlock_fail; | |
3271 | ||
3272 | ssock = __mptcp_nmpc_socket(msk); | |
3273 | if (!ssock) | |
3274 | goto unlock_fail; | |
3275 | ||
8a05661b | 3276 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cf7da0d6 PK |
3277 | sock_hold(ssock->sk); |
3278 | release_sock(sock->sk); | |
3279 | ||
3280 | err = ssock->ops->accept(sock, newsock, flags, kern); | |
d2f77c53 | 3281 | if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { |
cf7da0d6 PK |
3282 | struct mptcp_sock *msk = mptcp_sk(newsock->sk); |
3283 | struct mptcp_subflow_context *subflow; | |
0397c6d8 PA |
3284 | struct sock *newsk = newsock->sk; |
3285 | bool slowpath; | |
3286 | ||
3287 | slowpath = lock_sock_fast(newsk); | |
5b950ff4 PA |
3288 | |
3289 | /* PM/worker can now acquire the first subflow socket | |
3290 | * lock without racing with listener queue cleanup, | |
3291 | * we can notify it, if needed. | |
3292 | */ | |
3293 | subflow = mptcp_subflow_ctx(msk->first); | |
3294 | list_add(&subflow->node, &msk->conn_list); | |
3295 | sock_hold(msk->first); | |
3296 | if (mptcp_is_fully_established(newsk)) | |
3297 | mptcp_pm_fully_established(msk); | |
3298 | ||
0397c6d8 PA |
3299 | mptcp_copy_inaddrs(newsk, msk->first); |
3300 | mptcp_rcv_space_init(msk, msk->first); | |
cf7da0d6 PK |
3301 | |
3302 | /* set ssk->sk_socket of accept()ed flows to mptcp socket. | |
3303 | * This is needed so NOSPACE flag can be set from tcp stack. | |
3304 | */ | |
ec3edaa7 | 3305 | __mptcp_flush_join_list(msk); |
190f8b06 | 3306 | mptcp_for_each_subflow(msk, subflow) { |
cf7da0d6 PK |
3307 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
3308 | ||
3309 | if (!ssk->sk_socket) | |
3310 | mptcp_sock_graft(ssk, newsock); | |
3311 | } | |
0397c6d8 | 3312 | unlock_sock_fast(newsk, slowpath); |
cf7da0d6 PK |
3313 | } |
3314 | ||
8a05661b PA |
3315 | if (inet_csk_listen_poll(ssock->sk)) |
3316 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
cf7da0d6 PK |
3317 | sock_put(ssock->sk); |
3318 | return err; | |
3319 | ||
3320 | unlock_fail: | |
3321 | release_sock(sock->sk); | |
3322 | return -EINVAL; | |
3323 | } | |
3324 | ||
8a05661b PA |
3325 | static __poll_t mptcp_check_readable(struct mptcp_sock *msk) |
3326 | { | |
3327 | return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : | |
3328 | 0; | |
3329 | } | |
3330 | ||
8edf0864 FW |
3331 | static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) |
3332 | { | |
3333 | struct sock *sk = (struct sock *)msk; | |
8edf0864 FW |
3334 | |
3335 | if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) | |
32c0f44e | 3336 | return EPOLLOUT | EPOLLWRNORM; |
8edf0864 FW |
3337 | |
3338 | if (sk_stream_is_writeable(sk)) | |
3339 | return EPOLLOUT | EPOLLWRNORM; | |
3340 | ||
6e628cd3 PA |
3341 | set_bit(MPTCP_NOSPACE, &msk->flags); |
3342 | smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */ | |
3343 | if (sk_stream_is_writeable(sk)) | |
3344 | return EPOLLOUT | EPOLLWRNORM; | |
8edf0864 | 3345 | |
6e628cd3 | 3346 | return 0; |
8edf0864 FW |
3347 | } |
3348 | ||
2303f994 PK |
3349 | static __poll_t mptcp_poll(struct file *file, struct socket *sock, |
3350 | struct poll_table_struct *wait) | |
3351 | { | |
1891c4a0 | 3352 | struct sock *sk = sock->sk; |
8ab183de | 3353 | struct mptcp_sock *msk; |
2303f994 | 3354 | __poll_t mask = 0; |
8a05661b | 3355 | int state; |
2303f994 | 3356 | |
1891c4a0 | 3357 | msk = mptcp_sk(sk); |
1891c4a0 | 3358 | sock_poll_wait(file, sock, wait); |
1891c4a0 | 3359 | |
8a05661b | 3360 | state = inet_sk_state_load(sk); |
6719331c | 3361 | pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); |
8a05661b PA |
3362 | if (state == TCP_LISTEN) |
3363 | return mptcp_check_readable(msk); | |
3364 | ||
3365 | if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { | |
3366 | mask |= mptcp_check_readable(msk); | |
8edf0864 | 3367 | mask |= mptcp_check_writeable(msk); |
8a05661b | 3368 | } |
32c0f44e PA |
3369 | if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) |
3370 | mask |= EPOLLHUP; | |
1891c4a0 FW |
3371 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
3372 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; | |
3373 | ||
a5b31c47 PA |
3374 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ |
3375 | smp_rmb(); | |
3376 | if (sk->sk_err) | |
3377 | mask |= EPOLLERR; | |
3378 | ||
2303f994 PK |
3379 | return mask; |
3380 | } | |
3381 | ||
e4fae864 FW |
3382 | static int mptcp_release(struct socket *sock) |
3383 | { | |
3384 | struct mptcp_subflow_context *subflow; | |
3385 | struct sock *sk = sock->sk; | |
3386 | struct mptcp_sock *msk; | |
3387 | ||
3388 | if (!sk) | |
3389 | return 0; | |
3390 | ||
3391 | lock_sock(sk); | |
3392 | ||
3393 | msk = mptcp_sk(sk); | |
3394 | ||
3395 | mptcp_for_each_subflow(msk, subflow) { | |
3396 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
3397 | ||
3398 | ip_mc_drop_socket(ssk); | |
3399 | } | |
3400 | ||
3401 | release_sock(sk); | |
3402 | ||
3403 | return inet_release(sock); | |
3404 | } | |
3405 | ||
e42f1ac6 FW |
3406 | static const struct proto_ops mptcp_stream_ops = { |
3407 | .family = PF_INET, | |
3408 | .owner = THIS_MODULE, | |
e4fae864 | 3409 | .release = mptcp_release, |
e42f1ac6 FW |
3410 | .bind = mptcp_bind, |
3411 | .connect = mptcp_stream_connect, | |
3412 | .socketpair = sock_no_socketpair, | |
3413 | .accept = mptcp_stream_accept, | |
d2f77c53 | 3414 | .getname = inet_getname, |
e42f1ac6 FW |
3415 | .poll = mptcp_poll, |
3416 | .ioctl = inet_ioctl, | |
3417 | .gettstamp = sock_gettstamp, | |
3418 | .listen = mptcp_listen, | |
76e2a55d | 3419 | .shutdown = inet_shutdown, |
e42f1ac6 FW |
3420 | .setsockopt = sock_common_setsockopt, |
3421 | .getsockopt = sock_common_getsockopt, | |
3422 | .sendmsg = inet_sendmsg, | |
3423 | .recvmsg = inet_recvmsg, | |
3424 | .mmap = sock_no_mmap, | |
3425 | .sendpage = inet_sendpage, | |
e42f1ac6 | 3426 | }; |
2303f994 | 3427 | |
f870fa0b MM |
3428 | static struct inet_protosw mptcp_protosw = { |
3429 | .type = SOCK_STREAM, | |
3430 | .protocol = IPPROTO_MPTCP, | |
3431 | .prot = &mptcp_prot, | |
2303f994 PK |
3432 | .ops = &mptcp_stream_ops, |
3433 | .flags = INET_PROTOSW_ICSK, | |
f870fa0b MM |
3434 | }; |
3435 | ||
d39dceca | 3436 | void __init mptcp_proto_init(void) |
f870fa0b | 3437 | { |
2303f994 | 3438 | mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; |
2303f994 | 3439 | |
d027236c PA |
3440 | if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) |
3441 | panic("Failed to allocate MPTCP pcpu counter\n"); | |
3442 | ||
2303f994 | 3443 | mptcp_subflow_init(); |
1b1c7a0e | 3444 | mptcp_pm_init(); |
2c5ebd00 | 3445 | mptcp_token_init(); |
2303f994 | 3446 | |
f870fa0b MM |
3447 | if (proto_register(&mptcp_prot, 1) != 0) |
3448 | panic("Failed to register MPTCP proto.\n"); | |
3449 | ||
3450 | inet_register_protosw(&mptcp_protosw); | |
6771bfd9 FW |
3451 | |
3452 | BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); | |
f870fa0b MM |
3453 | } |
3454 | ||
3455 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
e4fae864 FW |
3456 | static int mptcp6_release(struct socket *sock) |
3457 | { | |
3458 | struct mptcp_subflow_context *subflow; | |
3459 | struct mptcp_sock *msk; | |
3460 | struct sock *sk = sock->sk; | |
3461 | ||
3462 | if (!sk) | |
3463 | return 0; | |
3464 | ||
3465 | lock_sock(sk); | |
3466 | ||
3467 | msk = mptcp_sk(sk); | |
3468 | ||
3469 | mptcp_for_each_subflow(msk, subflow) { | |
3470 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
3471 | ||
3472 | ip_mc_drop_socket(ssk); | |
3473 | ipv6_sock_mc_close(ssk); | |
3474 | ipv6_sock_ac_close(ssk); | |
3475 | } | |
3476 | ||
3477 | release_sock(sk); | |
3478 | return inet6_release(sock); | |
3479 | } | |
3480 | ||
e42f1ac6 FW |
3481 | static const struct proto_ops mptcp_v6_stream_ops = { |
3482 | .family = PF_INET6, | |
3483 | .owner = THIS_MODULE, | |
e4fae864 | 3484 | .release = mptcp6_release, |
e42f1ac6 FW |
3485 | .bind = mptcp_bind, |
3486 | .connect = mptcp_stream_connect, | |
3487 | .socketpair = sock_no_socketpair, | |
3488 | .accept = mptcp_stream_accept, | |
d2f77c53 | 3489 | .getname = inet6_getname, |
e42f1ac6 FW |
3490 | .poll = mptcp_poll, |
3491 | .ioctl = inet6_ioctl, | |
3492 | .gettstamp = sock_gettstamp, | |
3493 | .listen = mptcp_listen, | |
76e2a55d | 3494 | .shutdown = inet_shutdown, |
e42f1ac6 FW |
3495 | .setsockopt = sock_common_setsockopt, |
3496 | .getsockopt = sock_common_getsockopt, | |
3497 | .sendmsg = inet6_sendmsg, | |
3498 | .recvmsg = inet6_recvmsg, | |
3499 | .mmap = sock_no_mmap, | |
3500 | .sendpage = inet_sendpage, | |
3501 | #ifdef CONFIG_COMPAT | |
3986912f | 3502 | .compat_ioctl = inet6_compat_ioctl, |
e42f1ac6 FW |
3503 | #endif |
3504 | }; | |
3505 | ||
f870fa0b MM |
3506 | static struct proto mptcp_v6_prot; |
3507 | ||
79c0949e PK |
3508 | static void mptcp_v6_destroy(struct sock *sk) |
3509 | { | |
3510 | mptcp_destroy(sk); | |
3511 | inet6_destroy_sock(sk); | |
3512 | } | |
3513 | ||
f870fa0b MM |
3514 | static struct inet_protosw mptcp_v6_protosw = { |
3515 | .type = SOCK_STREAM, | |
3516 | .protocol = IPPROTO_MPTCP, | |
3517 | .prot = &mptcp_v6_prot, | |
2303f994 | 3518 | .ops = &mptcp_v6_stream_ops, |
f870fa0b MM |
3519 | .flags = INET_PROTOSW_ICSK, |
3520 | }; | |
3521 | ||
d39dceca | 3522 | int __init mptcp_proto_v6_init(void) |
f870fa0b MM |
3523 | { |
3524 | int err; | |
3525 | ||
3526 | mptcp_v6_prot = mptcp_prot; | |
3527 | strcpy(mptcp_v6_prot.name, "MPTCPv6"); | |
3528 | mptcp_v6_prot.slab = NULL; | |
79c0949e | 3529 | mptcp_v6_prot.destroy = mptcp_v6_destroy; |
b0519de8 | 3530 | mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); |
f870fa0b MM |
3531 | |
3532 | err = proto_register(&mptcp_v6_prot, 1); | |
3533 | if (err) | |
3534 | return err; | |
3535 | ||
3536 | err = inet6_register_protosw(&mptcp_v6_protosw); | |
3537 | if (err) | |
3538 | proto_unregister(&mptcp_v6_prot); | |
3539 | ||
3540 | return err; | |
3541 | } | |
3542 | #endif |