]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/wireless/mediatek/mt76/agg-rx.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / wireless / mediatek / mt76 / agg-rx.c
CommitLineData
0e3d6777 1// SPDX-License-Identifier: ISC
aee5b8cf
FF
2/*
3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
aee5b8cf
FF
4 */
5#include "mt76.h"
6
7#define REORDER_TIMEOUT (HZ / 10)
8
9static void
10mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
11{
12 struct sk_buff *skb;
13
14 tid->head = ieee80211_sn_inc(tid->head);
15
16 skb = tid->reorder_buf[idx];
17 if (!skb)
18 return;
19
20 tid->reorder_buf[idx] = NULL;
21 tid->nframes--;
22 __skb_queue_tail(frames, skb);
23}
24
25static void
13381dcd
RL
26mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
27 struct sk_buff_head *frames,
28 u16 head)
aee5b8cf
FF
29{
30 int idx;
31
32 while (ieee80211_sn_less(tid->head, head)) {
33 idx = tid->head % tid->size;
34 mt76_aggr_release(tid, frames, idx);
35 }
36}
37
38static void
39mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
40{
41 int idx = tid->head % tid->size;
42
43 while (tid->reorder_buf[idx]) {
44 mt76_aggr_release(tid, frames, idx);
45 idx = tid->head % tid->size;
46 }
47}
48
49static void
50mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
51{
52 struct mt76_rx_status *status;
53 struct sk_buff *skb;
54 int start, idx, nframes;
55
56 if (!tid->nframes)
57 return;
58
59 mt76_rx_aggr_release_head(tid, frames);
60
61 start = tid->head % tid->size;
62 nframes = tid->nframes;
63
64 for (idx = (tid->head + 1) % tid->size;
65 idx != start && nframes;
66 idx = (idx + 1) % tid->size) {
aee5b8cf
FF
67 skb = tid->reorder_buf[idx];
68 if (!skb)
69 continue;
70
71 nframes--;
13381dcd
RL
72 status = (struct mt76_rx_status *)skb->cb;
73 if (!time_after(jiffies,
74 status->reorder_time + REORDER_TIMEOUT))
aee5b8cf
FF
75 continue;
76
77 mt76_rx_aggr_release_frames(tid, frames, status->seqno);
78 }
79
80 mt76_rx_aggr_release_head(tid, frames);
81}
82
83static void
84mt76_rx_aggr_reorder_work(struct work_struct *work)
85{
86 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
87 reorder_work.work);
88 struct mt76_dev *dev = tid->dev;
aee5b8cf 89 struct sk_buff_head frames;
fb208dc7 90 int nframes;
aee5b8cf
FF
91
92 __skb_queue_head_init(&frames);
93
94 local_bh_disable();
9febfa67 95 rcu_read_lock();
aee5b8cf
FF
96
97 spin_lock(&tid->lock);
98 mt76_rx_aggr_check_release(tid, &frames);
fb208dc7 99 nframes = tid->nframes;
aee5b8cf
FF
100 spin_unlock(&tid->lock);
101
fb208dc7
FF
102 if (nframes)
103 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
104 REORDER_TIMEOUT);
81e850ef 105 mt76_rx_complete(dev, &frames, NULL);
aee5b8cf 106
9febfa67 107 rcu_read_unlock();
aee5b8cf
FF
108 local_bh_enable();
109}
110
17cf68b7
FF
111static void
112mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
113{
13381dcd
RL
114 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
115 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
17cf68b7
FF
116 struct mt76_wcid *wcid = status->wcid;
117 struct mt76_rx_tid *tid;
118 u16 seqno;
119
120 if (!ieee80211_is_ctl(bar->frame_control))
121 return;
122
123 if (!ieee80211_is_back_req(bar->frame_control))
124 return;
125
126 status->tid = le16_to_cpu(bar->control) >> 12;
b183878a 127 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
17cf68b7
FF
128 tid = rcu_dereference(wcid->aggr[status->tid]);
129 if (!tid)
130 return;
131
132 spin_lock_bh(&tid->lock);
e7aaa72f
FF
133 if (!tid->stopped) {
134 mt76_rx_aggr_release_frames(tid, frames, seqno);
135 mt76_rx_aggr_release_head(tid, frames);
136 }
17cf68b7
FF
137 spin_unlock_bh(&tid->lock);
138}
139
aee5b8cf
FF
140void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
141{
13381dcd
RL
142 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
143 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
aee5b8cf
FF
144 struct mt76_wcid *wcid = status->wcid;
145 struct ieee80211_sta *sta;
146 struct mt76_rx_tid *tid;
147 bool sn_less;
148 u16 seqno, head, size;
1af83148 149 u8 ackp, idx;
aee5b8cf
FF
150
151 __skb_queue_tail(frames, skb);
152
153 sta = wcid_to_sta(wcid);
17cf68b7 154 if (!sta)
aee5b8cf
FF
155 return;
156
17cf68b7
FF
157 if (!status->aggr) {
158 mt76_rx_aggr_check_ctl(skb, frames);
159 return;
160 }
161
1af83148
FF
162 /* not part of a BA session */
163 ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
164 if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
165 ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
166 return;
167
aee5b8cf
FF
168 tid = rcu_dereference(wcid->aggr[status->tid]);
169 if (!tid)
170 return;
171
18efed59 172 status->flag |= RX_FLAG_DUP_VALIDATED;
aee5b8cf
FF
173 spin_lock_bh(&tid->lock);
174
175 if (tid->stopped)
176 goto out;
177
178 head = tid->head;
179 seqno = status->seqno;
180 size = tid->size;
181 sn_less = ieee80211_sn_less(seqno, head);
182
183 if (!tid->started) {
184 if (sn_less)
185 goto out;
186
187 tid->started = true;
188 }
189
190 if (sn_less) {
191 __skb_unlink(skb, frames);
192 dev_kfree_skb(skb);
193 goto out;
194 }
195
196 if (seqno == head) {
197 tid->head = ieee80211_sn_inc(head);
198 if (tid->nframes)
199 mt76_rx_aggr_release_head(tid, frames);
200 goto out;
201 }
202
203 __skb_unlink(skb, frames);
204
205 /*
206 * Frame sequence number exceeds buffering window, free up some space
207 * by releasing previous frames
208 */
209 if (!ieee80211_sn_less(seqno, head + size)) {
210 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
211 mt76_rx_aggr_release_frames(tid, frames, head);
212 }
213
214 idx = seqno % size;
215
216 /* Discard if the current slot is already in use */
217 if (tid->reorder_buf[idx]) {
218 dev_kfree_skb(skb);
219 goto out;
220 }
221
222 status->reorder_time = jiffies;
223 tid->reorder_buf[idx] = skb;
224 tid->nframes++;
225 mt76_rx_aggr_release_head(tid, frames);
226
13381dcd
RL
227 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
228 REORDER_TIMEOUT);
aee5b8cf
FF
229
230out:
231 spin_unlock_bh(&tid->lock);
232}
233
234int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
235 u16 ssn, u8 size)
236{
237 struct mt76_rx_tid *tid;
238
239 mt76_rx_aggr_stop(dev, wcid, tidno);
240
acafe7e3 241 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
aee5b8cf
FF
242 if (!tid)
243 return -ENOMEM;
244
245 tid->dev = dev;
246 tid->head = ssn;
247 tid->size = size;
248 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
249 spin_lock_init(&tid->lock);
250
251 rcu_assign_pointer(wcid->aggr[tidno], tid);
252
253 return 0;
254}
255EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
256
257static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
258{
259 u8 size = tid->size;
260 int i;
261
262 spin_lock_bh(&tid->lock);
263
264 tid->stopped = true;
265 for (i = 0; tid->nframes && i < size; i++) {
266 struct sk_buff *skb = tid->reorder_buf[i];
267
268 if (!skb)
269 continue;
270
271 tid->nframes--;
272 dev_kfree_skb(skb);
273 }
274
275 spin_unlock_bh(&tid->lock);
e7aaa72f
FF
276
277 cancel_delayed_work_sync(&tid->reorder_work);
aee5b8cf
FF
278}
279
280void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
281{
fb7d95c6 282 struct mt76_rx_tid *tid = NULL;
aee5b8cf 283
fb7d95c6
FF
284 rcu_swap_protected(wcid->aggr[tidno], tid,
285 lockdep_is_held(&dev->mutex));
aee5b8cf 286 if (tid) {
aee5b8cf
FF
287 mt76_rx_aggr_shutdown(dev, tid);
288 kfree_rcu(tid, rcu_head);
289 }
aee5b8cf
FF
290}
291EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);