]>
Commit | Line | Data |
---|---|---|
1b1c7a0e PK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2019, Intel Corporation. | |
5 | */ | |
c85adced GT |
6 | #define pr_fmt(fmt) "MPTCP: " fmt |
7 | ||
1b1c7a0e PK |
8 | #include <linux/kernel.h> |
9 | #include <net/tcp.h> | |
10 | #include <net/mptcp.h> | |
11 | #include "protocol.h" | |
12 | ||
1b1c7a0e PK |
13 | /* path manager command handlers */ |
14 | ||
15 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, | |
6a6c05a8 GT |
16 | const struct mptcp_addr_info *addr, |
17 | bool echo) | |
1b1c7a0e | 18 | { |
926bdeab PK |
19 | pr_debug("msk=%p, local_id=%d", msk, addr->id); |
20 | ||
21 | msk->pm.local = *addr; | |
6a6c05a8 | 22 | WRITE_ONCE(msk->pm.add_addr_echo, echo); |
f643b803 | 23 | WRITE_ONCE(msk->pm.add_addr_signal, true); |
926bdeab | 24 | return 0; |
1b1c7a0e PK |
25 | } |
26 | ||
27 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) | |
28 | { | |
29 | return -ENOTSUPP; | |
30 | } | |
31 | ||
32 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id) | |
33 | { | |
34 | return -ENOTSUPP; | |
35 | } | |
36 | ||
37 | /* path manager event handlers */ | |
38 | ||
39 | void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side) | |
40 | { | |
41 | struct mptcp_pm_data *pm = &msk->pm; | |
42 | ||
43 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); | |
44 | ||
45 | WRITE_ONCE(pm->server_side, server_side); | |
46 | } | |
47 | ||
48 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) | |
49 | { | |
926bdeab PK |
50 | struct mptcp_pm_data *pm = &msk->pm; |
51 | int ret; | |
52 | ||
53 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, | |
54 | pm->subflows_max, READ_ONCE(pm->accept_subflow)); | |
55 | ||
56 | /* try to avoid acquiring the lock below */ | |
57 | if (!READ_ONCE(pm->accept_subflow)) | |
58 | return false; | |
59 | ||
60 | spin_lock_bh(&pm->lock); | |
61 | ret = pm->subflows < pm->subflows_max; | |
62 | if (ret && ++pm->subflows == pm->subflows_max) | |
63 | WRITE_ONCE(pm->accept_subflow, false); | |
64 | spin_unlock_bh(&pm->lock); | |
65 | ||
66 | return ret; | |
67 | } | |
68 | ||
69 | /* return true if the new status bit is currently cleared, that is, this event | |
70 | * can be server, eventually by an already scheduled work | |
71 | */ | |
72 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, | |
73 | enum mptcp_pm_status new_status) | |
74 | { | |
75 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, | |
76 | BIT(new_status)); | |
77 | if (msk->pm.status & BIT(new_status)) | |
78 | return false; | |
79 | ||
80 | msk->pm.status |= BIT(new_status); | |
b416268b | 81 | if (schedule_work(&msk->work)) |
926bdeab PK |
82 | sock_hold((struct sock *)msk); |
83 | return true; | |
1b1c7a0e PK |
84 | } |
85 | ||
86 | void mptcp_pm_fully_established(struct mptcp_sock *msk) | |
87 | { | |
926bdeab PK |
88 | struct mptcp_pm_data *pm = &msk->pm; |
89 | ||
1b1c7a0e | 90 | pr_debug("msk=%p", msk); |
926bdeab PK |
91 | |
92 | /* try to avoid acquiring the lock below */ | |
93 | if (!READ_ONCE(pm->work_pending)) | |
94 | return; | |
95 | ||
96 | spin_lock_bh(&pm->lock); | |
97 | ||
98 | if (READ_ONCE(pm->work_pending)) | |
99 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); | |
100 | ||
101 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
102 | } |
103 | ||
104 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) | |
105 | { | |
106 | pr_debug("msk=%p", msk); | |
107 | } | |
108 | ||
109 | void mptcp_pm_subflow_established(struct mptcp_sock *msk, | |
110 | struct mptcp_subflow_context *subflow) | |
111 | { | |
926bdeab PK |
112 | struct mptcp_pm_data *pm = &msk->pm; |
113 | ||
1b1c7a0e | 114 | pr_debug("msk=%p", msk); |
926bdeab PK |
115 | |
116 | if (!READ_ONCE(pm->work_pending)) | |
117 | return; | |
118 | ||
119 | spin_lock_bh(&pm->lock); | |
120 | ||
121 | if (READ_ONCE(pm->work_pending)) | |
122 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
123 | ||
124 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
125 | } |
126 | ||
127 | void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) | |
128 | { | |
129 | pr_debug("msk=%p", msk); | |
130 | } | |
131 | ||
132 | void mptcp_pm_add_addr_received(struct mptcp_sock *msk, | |
133 | const struct mptcp_addr_info *addr) | |
134 | { | |
926bdeab PK |
135 | struct mptcp_pm_data *pm = &msk->pm; |
136 | ||
137 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, | |
138 | READ_ONCE(pm->accept_addr)); | |
139 | ||
926bdeab PK |
140 | spin_lock_bh(&pm->lock); |
141 | ||
6a6c05a8 GT |
142 | if (!READ_ONCE(pm->accept_addr)) |
143 | mptcp_pm_announce_addr(msk, addr, true); | |
144 | else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) | |
926bdeab PK |
145 | pm->remote = *addr; |
146 | ||
147 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
148 | } |
149 | ||
d0876b22 GT |
150 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id) |
151 | { | |
152 | struct mptcp_pm_data *pm = &msk->pm; | |
153 | ||
154 | pr_debug("msk=%p remote_id=%d", msk, rm_id); | |
155 | ||
156 | spin_lock_bh(&pm->lock); | |
157 | mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); | |
158 | pm->rm_id = rm_id; | |
159 | spin_unlock_bh(&pm->lock); | |
160 | } | |
161 | ||
1b1c7a0e PK |
162 | /* path manager helpers */ |
163 | ||
f643b803 | 164 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
6a6c05a8 | 165 | struct mptcp_addr_info *saddr, bool *echo) |
1b1c7a0e | 166 | { |
926bdeab PK |
167 | int ret = false; |
168 | ||
169 | spin_lock_bh(&msk->pm.lock); | |
170 | ||
171 | /* double check after the lock is acquired */ | |
f643b803 | 172 | if (!mptcp_pm_should_add_signal(msk)) |
926bdeab PK |
173 | goto out_unlock; |
174 | ||
175 | if (remaining < mptcp_add_addr_len(msk->pm.local.family)) | |
176 | goto out_unlock; | |
177 | ||
178 | *saddr = msk->pm.local; | |
6a6c05a8 | 179 | *echo = READ_ONCE(msk->pm.add_addr_echo); |
f643b803 | 180 | WRITE_ONCE(msk->pm.add_addr_signal, false); |
926bdeab PK |
181 | ret = true; |
182 | ||
183 | out_unlock: | |
184 | spin_unlock_bh(&msk->pm.lock); | |
185 | return ret; | |
1b1c7a0e PK |
186 | } |
187 | ||
5cb104ae GT |
188 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
189 | u8 *rm_id) | |
190 | { | |
191 | int ret = false; | |
192 | ||
193 | spin_lock_bh(&msk->pm.lock); | |
194 | ||
195 | /* double check after the lock is acquired */ | |
196 | if (!mptcp_pm_should_rm_signal(msk)) | |
197 | goto out_unlock; | |
198 | ||
199 | if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE) | |
200 | goto out_unlock; | |
201 | ||
202 | *rm_id = msk->pm.rm_id; | |
203 | WRITE_ONCE(msk->pm.rm_addr_signal, false); | |
204 | ret = true; | |
205 | ||
206 | out_unlock: | |
207 | spin_unlock_bh(&msk->pm.lock); | |
208 | return ret; | |
209 | } | |
210 | ||
1b1c7a0e PK |
211 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
212 | { | |
01cacb00 | 213 | return mptcp_pm_nl_get_local_id(msk, skc); |
1b1c7a0e PK |
214 | } |
215 | ||
1b1c7a0e PK |
216 | void mptcp_pm_data_init(struct mptcp_sock *msk) |
217 | { | |
218 | msk->pm.add_addr_signaled = 0; | |
219 | msk->pm.add_addr_accepted = 0; | |
220 | msk->pm.local_addr_used = 0; | |
221 | msk->pm.subflows = 0; | |
5cb104ae | 222 | msk->pm.rm_id = 0; |
1b1c7a0e | 223 | WRITE_ONCE(msk->pm.work_pending, false); |
f643b803 | 224 | WRITE_ONCE(msk->pm.add_addr_signal, false); |
5cb104ae | 225 | WRITE_ONCE(msk->pm.rm_addr_signal, false); |
1b1c7a0e PK |
226 | WRITE_ONCE(msk->pm.accept_addr, false); |
227 | WRITE_ONCE(msk->pm.accept_subflow, false); | |
6a6c05a8 | 228 | WRITE_ONCE(msk->pm.add_addr_echo, false); |
1b1c7a0e PK |
229 | msk->pm.status = 0; |
230 | ||
231 | spin_lock_init(&msk->pm.lock); | |
01cacb00 PA |
232 | |
233 | mptcp_pm_nl_data_init(msk); | |
1b1c7a0e PK |
234 | } |
235 | ||
d39dceca | 236 | void __init mptcp_pm_init(void) |
1b1c7a0e | 237 | { |
01cacb00 | 238 | mptcp_pm_nl_init(); |
1b1c7a0e | 239 | } |