]>
Commit | Line | Data |
---|---|---|
1b1c7a0e PK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2019, Intel Corporation. | |
5 | */ | |
c85adced GT |
6 | #define pr_fmt(fmt) "MPTCP: " fmt |
7 | ||
1b1c7a0e PK |
8 | #include <linux/kernel.h> |
9 | #include <net/tcp.h> | |
10 | #include <net/mptcp.h> | |
11 | #include "protocol.h" | |
12 | ||
13 | static struct workqueue_struct *pm_wq; | |
14 | ||
15 | /* path manager command handlers */ | |
16 | ||
17 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, | |
18 | const struct mptcp_addr_info *addr) | |
19 | { | |
926bdeab PK |
20 | pr_debug("msk=%p, local_id=%d", msk, addr->id); |
21 | ||
22 | msk->pm.local = *addr; | |
23 | WRITE_ONCE(msk->pm.addr_signal, true); | |
24 | return 0; | |
1b1c7a0e PK |
25 | } |
26 | ||
27 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) | |
28 | { | |
29 | return -ENOTSUPP; | |
30 | } | |
31 | ||
32 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id) | |
33 | { | |
34 | return -ENOTSUPP; | |
35 | } | |
36 | ||
37 | /* path manager event handlers */ | |
38 | ||
39 | void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side) | |
40 | { | |
41 | struct mptcp_pm_data *pm = &msk->pm; | |
42 | ||
43 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); | |
44 | ||
45 | WRITE_ONCE(pm->server_side, server_side); | |
46 | } | |
47 | ||
48 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) | |
49 | { | |
926bdeab PK |
50 | struct mptcp_pm_data *pm = &msk->pm; |
51 | int ret; | |
52 | ||
53 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, | |
54 | pm->subflows_max, READ_ONCE(pm->accept_subflow)); | |
55 | ||
56 | /* try to avoid acquiring the lock below */ | |
57 | if (!READ_ONCE(pm->accept_subflow)) | |
58 | return false; | |
59 | ||
60 | spin_lock_bh(&pm->lock); | |
61 | ret = pm->subflows < pm->subflows_max; | |
62 | if (ret && ++pm->subflows == pm->subflows_max) | |
63 | WRITE_ONCE(pm->accept_subflow, false); | |
64 | spin_unlock_bh(&pm->lock); | |
65 | ||
66 | return ret; | |
67 | } | |
68 | ||
69 | /* return true if the new status bit is currently cleared, that is, this event | |
70 | * can be server, eventually by an already scheduled work | |
71 | */ | |
72 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, | |
73 | enum mptcp_pm_status new_status) | |
74 | { | |
75 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, | |
76 | BIT(new_status)); | |
77 | if (msk->pm.status & BIT(new_status)) | |
78 | return false; | |
79 | ||
80 | msk->pm.status |= BIT(new_status); | |
81 | if (queue_work(pm_wq, &msk->pm.work)) | |
82 | sock_hold((struct sock *)msk); | |
83 | return true; | |
1b1c7a0e PK |
84 | } |
85 | ||
86 | void mptcp_pm_fully_established(struct mptcp_sock *msk) | |
87 | { | |
926bdeab PK |
88 | struct mptcp_pm_data *pm = &msk->pm; |
89 | ||
1b1c7a0e | 90 | pr_debug("msk=%p", msk); |
926bdeab PK |
91 | |
92 | /* try to avoid acquiring the lock below */ | |
93 | if (!READ_ONCE(pm->work_pending)) | |
94 | return; | |
95 | ||
96 | spin_lock_bh(&pm->lock); | |
97 | ||
98 | if (READ_ONCE(pm->work_pending)) | |
99 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); | |
100 | ||
101 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
102 | } |
103 | ||
104 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) | |
105 | { | |
106 | pr_debug("msk=%p", msk); | |
107 | } | |
108 | ||
109 | void mptcp_pm_subflow_established(struct mptcp_sock *msk, | |
110 | struct mptcp_subflow_context *subflow) | |
111 | { | |
926bdeab PK |
112 | struct mptcp_pm_data *pm = &msk->pm; |
113 | ||
1b1c7a0e | 114 | pr_debug("msk=%p", msk); |
926bdeab PK |
115 | |
116 | if (!READ_ONCE(pm->work_pending)) | |
117 | return; | |
118 | ||
119 | spin_lock_bh(&pm->lock); | |
120 | ||
121 | if (READ_ONCE(pm->work_pending)) | |
122 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
123 | ||
124 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
125 | } |
126 | ||
127 | void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) | |
128 | { | |
129 | pr_debug("msk=%p", msk); | |
130 | } | |
131 | ||
132 | void mptcp_pm_add_addr_received(struct mptcp_sock *msk, | |
133 | const struct mptcp_addr_info *addr) | |
134 | { | |
926bdeab PK |
135 | struct mptcp_pm_data *pm = &msk->pm; |
136 | ||
137 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, | |
138 | READ_ONCE(pm->accept_addr)); | |
139 | ||
140 | /* avoid acquiring the lock if there is no room for fouther addresses */ | |
141 | if (!READ_ONCE(pm->accept_addr)) | |
142 | return; | |
143 | ||
144 | spin_lock_bh(&pm->lock); | |
145 | ||
146 | /* be sure there is something to signal re-checking under PM lock */ | |
147 | if (READ_ONCE(pm->accept_addr) && | |
148 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) | |
149 | pm->remote = *addr; | |
150 | ||
151 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
152 | } |
153 | ||
154 | /* path manager helpers */ | |
155 | ||
156 | bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, | |
157 | struct mptcp_addr_info *saddr) | |
158 | { | |
926bdeab PK |
159 | int ret = false; |
160 | ||
161 | spin_lock_bh(&msk->pm.lock); | |
162 | ||
163 | /* double check after the lock is acquired */ | |
164 | if (!mptcp_pm_should_signal(msk)) | |
165 | goto out_unlock; | |
166 | ||
167 | if (remaining < mptcp_add_addr_len(msk->pm.local.family)) | |
168 | goto out_unlock; | |
169 | ||
170 | *saddr = msk->pm.local; | |
171 | WRITE_ONCE(msk->pm.addr_signal, false); | |
172 | ret = true; | |
173 | ||
174 | out_unlock: | |
175 | spin_unlock_bh(&msk->pm.lock); | |
176 | return ret; | |
1b1c7a0e PK |
177 | } |
178 | ||
179 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) | |
180 | { | |
01cacb00 | 181 | return mptcp_pm_nl_get_local_id(msk, skc); |
1b1c7a0e PK |
182 | } |
183 | ||
184 | static void pm_worker(struct work_struct *work) | |
185 | { | |
926bdeab PK |
186 | struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data, |
187 | work); | |
188 | struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm); | |
189 | struct sock *sk = (struct sock *)msk; | |
190 | ||
191 | lock_sock(sk); | |
192 | spin_lock_bh(&msk->pm.lock); | |
193 | ||
194 | pr_debug("msk=%p status=%x", msk, pm->status); | |
195 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
196 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); | |
01cacb00 | 197 | mptcp_pm_nl_add_addr_received(msk); |
926bdeab PK |
198 | } |
199 | if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { | |
200 | pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); | |
01cacb00 | 201 | mptcp_pm_nl_fully_established(msk); |
926bdeab PK |
202 | } |
203 | if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { | |
204 | pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); | |
01cacb00 | 205 | mptcp_pm_nl_subflow_established(msk); |
926bdeab PK |
206 | } |
207 | ||
208 | spin_unlock_bh(&msk->pm.lock); | |
209 | release_sock(sk); | |
210 | sock_put(sk); | |
1b1c7a0e PK |
211 | } |
212 | ||
213 | void mptcp_pm_data_init(struct mptcp_sock *msk) | |
214 | { | |
215 | msk->pm.add_addr_signaled = 0; | |
216 | msk->pm.add_addr_accepted = 0; | |
217 | msk->pm.local_addr_used = 0; | |
218 | msk->pm.subflows = 0; | |
219 | WRITE_ONCE(msk->pm.work_pending, false); | |
220 | WRITE_ONCE(msk->pm.addr_signal, false); | |
221 | WRITE_ONCE(msk->pm.accept_addr, false); | |
222 | WRITE_ONCE(msk->pm.accept_subflow, false); | |
223 | msk->pm.status = 0; | |
224 | ||
225 | spin_lock_init(&msk->pm.lock); | |
226 | INIT_WORK(&msk->pm.work, pm_worker); | |
01cacb00 PA |
227 | |
228 | mptcp_pm_nl_data_init(msk); | |
1b1c7a0e PK |
229 | } |
230 | ||
926bdeab PK |
231 | void mptcp_pm_close(struct mptcp_sock *msk) |
232 | { | |
233 | if (cancel_work_sync(&msk->pm.work)) | |
234 | sock_put((struct sock *)msk); | |
235 | } | |
236 | ||
1b1c7a0e PK |
237 | void mptcp_pm_init(void) |
238 | { | |
239 | pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8); | |
240 | if (!pm_wq) | |
241 | panic("Failed to allocate workqueue"); | |
01cacb00 PA |
242 | |
243 | mptcp_pm_nl_init(); | |
1b1c7a0e | 244 | } |