]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/mptcp/pm.c
mptcp: send ack for every add_addr
[mirror_ubuntu-jammy-kernel.git] / net / mptcp / pm.c
CommitLineData
1b1c7a0e
PK
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2019, Intel Corporation.
5 */
c85adced
GT
6#define pr_fmt(fmt) "MPTCP: " fmt
7
1b1c7a0e
PK
8#include <linux/kernel.h>
9#include <net/tcp.h>
10#include <net/mptcp.h>
11#include "protocol.h"
12
1b1c7a0e
PK
13/* path manager command handlers */
14
15int mptcp_pm_announce_addr(struct mptcp_sock *msk,
6a6c05a8 16 const struct mptcp_addr_info *addr,
0f5c9e3f 17 bool echo, bool port)
1b1c7a0e 18{
13ad9f01 19 u8 add_addr = READ_ONCE(msk->pm.addr_signal);
d91d322a 20
926bdeab
PK
21 pr_debug("msk=%p, local_id=%d", msk, addr->id);
22
42842a42
GT
23 if (add_addr) {
24 pr_warn("addr_signal error, add_addr=%d", add_addr);
25 return -EINVAL;
26 }
27
926bdeab 28 msk->pm.local = *addr;
d91d322a
GT
29 add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL);
30 if (echo)
31 add_addr |= BIT(MPTCP_ADD_ADDR_ECHO);
84dfe367
GT
32 if (addr->family == AF_INET6)
33 add_addr |= BIT(MPTCP_ADD_ADDR_IPV6);
0f5c9e3f
GT
34 if (port)
35 add_addr |= BIT(MPTCP_ADD_ADDR_PORT);
13ad9f01 36 WRITE_ONCE(msk->pm.addr_signal, add_addr);
926bdeab 37 return 0;
1b1c7a0e
PK
38}
39
40int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
41{
13ad9f01 42 u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
42842a42 43
b6c08380
GT
44 pr_debug("msk=%p, local_id=%d", msk, local_id);
45
42842a42
GT
46 if (rm_addr) {
47 pr_warn("addr_signal error, rm_addr=%d", rm_addr);
48 return -EINVAL;
49 }
50
b6c08380 51 msk->pm.rm_id = local_id;
42842a42 52 rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
13ad9f01 53 WRITE_ONCE(msk->pm.addr_signal, rm_addr);
b6c08380 54 return 0;
1b1c7a0e
PK
55}
56
0ee4261a 57int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
1b1c7a0e 58{
0ee4261a
GT
59 pr_debug("msk=%p, local_id=%d", msk, local_id);
60
61 spin_lock_bh(&msk->pm.lock);
62 mptcp_pm_nl_rm_subflow_received(msk, local_id);
63 spin_unlock_bh(&msk->pm.lock);
64 return 0;
1b1c7a0e
PK
65}
66
67/* path manager event handlers */
68
69void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
70{
71 struct mptcp_pm_data *pm = &msk->pm;
72
73 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
74
75 WRITE_ONCE(pm->server_side, server_side);
76}
77
78bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
79{
926bdeab 80 struct mptcp_pm_data *pm = &msk->pm;
a914e586 81 unsigned int subflows_max;
f58f065a 82 int ret = 0;
926bdeab 83
a914e586
GT
84 subflows_max = mptcp_pm_get_subflows_max(msk);
85
926bdeab 86 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
a914e586 87 subflows_max, READ_ONCE(pm->accept_subflow));
926bdeab
PK
88
89 /* try to avoid acquiring the lock below */
90 if (!READ_ONCE(pm->accept_subflow))
91 return false;
92
93 spin_lock_bh(&pm->lock);
f58f065a 94 if (READ_ONCE(pm->accept_subflow)) {
a914e586
GT
95 ret = pm->subflows < subflows_max;
96 if (ret && ++pm->subflows == subflows_max)
f58f065a
GT
97 WRITE_ONCE(pm->accept_subflow, false);
98 }
926bdeab
PK
99 spin_unlock_bh(&pm->lock);
100
101 return ret;
102}
103
104/* return true if the new status bit is currently cleared, that is, this event
105 * can be server, eventually by an already scheduled work
106 */
107static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
108 enum mptcp_pm_status new_status)
109{
110 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
111 BIT(new_status));
112 if (msk->pm.status & BIT(new_status))
113 return false;
114
115 msk->pm.status |= BIT(new_status);
ba8f48f7 116 mptcp_schedule_work((struct sock *)msk);
926bdeab 117 return true;
1b1c7a0e
PK
118}
119
120void mptcp_pm_fully_established(struct mptcp_sock *msk)
121{
926bdeab
PK
122 struct mptcp_pm_data *pm = &msk->pm;
123
1b1c7a0e 124 pr_debug("msk=%p", msk);
926bdeab
PK
125
126 /* try to avoid acquiring the lock below */
127 if (!READ_ONCE(pm->work_pending))
128 return;
129
130 spin_lock_bh(&pm->lock);
131
5b950ff4
PA
132 /* mptcp_pm_fully_established() can be invoked by multiple
133 * racing paths - accept() and check_fully_established()
134 * be sure to serve this event only once.
135 */
136 if (READ_ONCE(pm->work_pending) &&
137 !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)))
926bdeab 138 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
5b950ff4 139 msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED);
926bdeab
PK
140
141 spin_unlock_bh(&pm->lock);
1b1c7a0e
PK
142}
143
144void mptcp_pm_connection_closed(struct mptcp_sock *msk)
145{
146 pr_debug("msk=%p", msk);
147}
148
149void mptcp_pm_subflow_established(struct mptcp_sock *msk,
150 struct mptcp_subflow_context *subflow)
151{
926bdeab
PK
152 struct mptcp_pm_data *pm = &msk->pm;
153
1b1c7a0e 154 pr_debug("msk=%p", msk);
926bdeab
PK
155
156 if (!READ_ONCE(pm->work_pending))
157 return;
158
159 spin_lock_bh(&pm->lock);
160
161 if (READ_ONCE(pm->work_pending))
162 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
163
164 spin_unlock_bh(&pm->lock);
1b1c7a0e
PK
165}
166
167void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
168{
169 pr_debug("msk=%p", msk);
170}
171
172void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
173 const struct mptcp_addr_info *addr)
174{
926bdeab
PK
175 struct mptcp_pm_data *pm = &msk->pm;
176
177 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
178 READ_ONCE(pm->accept_addr));
179
926bdeab
PK
180 spin_lock_bh(&pm->lock);
181
84dfe367 182 if (!READ_ONCE(pm->accept_addr)) {
0f5c9e3f 183 mptcp_pm_announce_addr(msk, addr, true, addr->port);
84dfe367
GT
184 mptcp_pm_add_addr_send_ack(msk);
185 } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
926bdeab 186 pm->remote = *addr;
84dfe367 187 }
926bdeab
PK
188
189 spin_unlock_bh(&pm->lock);
84dfe367
GT
190}
191
192void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
193{
b5a7acd3 194 if (!mptcp_pm_should_add_signal(msk))
84dfe367
GT
195 return;
196
197 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
1b1c7a0e
PK
198}
199
d0876b22
GT
200void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
201{
202 struct mptcp_pm_data *pm = &msk->pm;
203
204 pr_debug("msk=%p remote_id=%d", msk, rm_id);
205
206 spin_lock_bh(&pm->lock);
207 mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
208 pm->rm_id = rm_id;
209 spin_unlock_bh(&pm->lock);
210}
211
40453a5c
GT
212void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup)
213{
214 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
215
216 pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
217 subflow->backup = bkup;
218}
219
1b1c7a0e
PK
220/* path manager helpers */
221
f643b803 222bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
4a2777a8 223 struct mptcp_addr_info *saddr, bool *echo, bool *port)
1b1c7a0e 224{
926bdeab
PK
225 int ret = false;
226
227 spin_lock_bh(&msk->pm.lock);
228
229 /* double check after the lock is acquired */
f643b803 230 if (!mptcp_pm_should_add_signal(msk))
926bdeab
PK
231 goto out_unlock;
232
d91d322a 233 *echo = mptcp_pm_should_add_signal_echo(msk);
4a2777a8 234 *port = mptcp_pm_should_add_signal_port(msk);
456afe01 235
4a2777a8 236 if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port))
926bdeab
PK
237 goto out_unlock;
238
239 *saddr = msk->pm.local;
13ad9f01 240 WRITE_ONCE(msk->pm.addr_signal, 0);
926bdeab
PK
241 ret = true;
242
243out_unlock:
244 spin_unlock_bh(&msk->pm.lock);
245 return ret;
1b1c7a0e
PK
246}
247
5cb104ae
GT
248bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
249 u8 *rm_id)
250{
251 int ret = false;
252
253 spin_lock_bh(&msk->pm.lock);
254
255 /* double check after the lock is acquired */
256 if (!mptcp_pm_should_rm_signal(msk))
257 goto out_unlock;
258
259 if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
260 goto out_unlock;
261
262 *rm_id = msk->pm.rm_id;
13ad9f01 263 WRITE_ONCE(msk->pm.addr_signal, 0);
5cb104ae
GT
264 ret = true;
265
266out_unlock:
267 spin_unlock_bh(&msk->pm.lock);
268 return ret;
269}
270
1b1c7a0e
PK
271int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
272{
01cacb00 273 return mptcp_pm_nl_get_local_id(msk, skc);
1b1c7a0e
PK
274}
275
1b1c7a0e
PK
276void mptcp_pm_data_init(struct mptcp_sock *msk)
277{
278 msk->pm.add_addr_signaled = 0;
279 msk->pm.add_addr_accepted = 0;
280 msk->pm.local_addr_used = 0;
281 msk->pm.subflows = 0;
5cb104ae 282 msk->pm.rm_id = 0;
1b1c7a0e 283 WRITE_ONCE(msk->pm.work_pending, false);
13ad9f01 284 WRITE_ONCE(msk->pm.addr_signal, 0);
1b1c7a0e
PK
285 WRITE_ONCE(msk->pm.accept_addr, false);
286 WRITE_ONCE(msk->pm.accept_subflow, false);
287 msk->pm.status = 0;
288
289 spin_lock_init(&msk->pm.lock);
b6c08380 290 INIT_LIST_HEAD(&msk->pm.anno_list);
01cacb00
PA
291
292 mptcp_pm_nl_data_init(msk);
1b1c7a0e
PK
293}
294
d39dceca 295void __init mptcp_pm_init(void)
1b1c7a0e 296{
01cacb00 297 mptcp_pm_nl_init();
1b1c7a0e 298}