1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
7 static unsigned long mt76_aggr_tid_to_timeo(u8 tidno
)
9 /* Currently voice traffic (AC_VO) always runs without aggregation,
10 * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
11 * for non AC_BK/AC_BE and set smaller timeout for it. */
12 return HZ
/ (tidno
>= 4 ? 25 : 10);
16 mt76_aggr_release(struct mt76_rx_tid
*tid
, struct sk_buff_head
*frames
, int idx
)
20 tid
->head
= ieee80211_sn_inc(tid
->head
);
22 skb
= tid
->reorder_buf
[idx
];
26 tid
->reorder_buf
[idx
] = NULL
;
28 __skb_queue_tail(frames
, skb
);
32 mt76_rx_aggr_release_frames(struct mt76_rx_tid
*tid
,
33 struct sk_buff_head
*frames
,
38 while (ieee80211_sn_less(tid
->head
, head
)) {
39 idx
= tid
->head
% tid
->size
;
40 mt76_aggr_release(tid
, frames
, idx
);
45 mt76_rx_aggr_release_head(struct mt76_rx_tid
*tid
, struct sk_buff_head
*frames
)
47 int idx
= tid
->head
% tid
->size
;
49 while (tid
->reorder_buf
[idx
]) {
50 mt76_aggr_release(tid
, frames
, idx
);
51 idx
= tid
->head
% tid
->size
;
56 mt76_rx_aggr_check_release(struct mt76_rx_tid
*tid
, struct sk_buff_head
*frames
)
58 struct mt76_rx_status
*status
;
60 int start
, idx
, nframes
;
65 mt76_rx_aggr_release_head(tid
, frames
);
67 start
= tid
->head
% tid
->size
;
68 nframes
= tid
->nframes
;
70 for (idx
= (tid
->head
+ 1) % tid
->size
;
71 idx
!= start
&& nframes
;
72 idx
= (idx
+ 1) % tid
->size
) {
73 skb
= tid
->reorder_buf
[idx
];
78 status
= (struct mt76_rx_status
*)skb
->cb
;
79 if (!time_after32(jiffies
,
80 status
->reorder_time
+
81 mt76_aggr_tid_to_timeo(tid
->num
)))
84 mt76_rx_aggr_release_frames(tid
, frames
, status
->seqno
);
87 mt76_rx_aggr_release_head(tid
, frames
);
91 mt76_rx_aggr_reorder_work(struct work_struct
*work
)
93 struct mt76_rx_tid
*tid
= container_of(work
, struct mt76_rx_tid
,
95 struct mt76_dev
*dev
= tid
->dev
;
96 struct sk_buff_head frames
;
99 __skb_queue_head_init(&frames
);
104 spin_lock(&tid
->lock
);
105 mt76_rx_aggr_check_release(tid
, &frames
);
106 nframes
= tid
->nframes
;
107 spin_unlock(&tid
->lock
);
110 ieee80211_queue_delayed_work(tid
->dev
->hw
, &tid
->reorder_work
,
111 mt76_aggr_tid_to_timeo(tid
->num
));
112 mt76_rx_complete(dev
, &frames
, NULL
);
119 mt76_rx_aggr_check_ctl(struct sk_buff
*skb
, struct sk_buff_head
*frames
)
121 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
122 struct ieee80211_bar
*bar
= mt76_skb_get_hdr(skb
);
123 struct mt76_wcid
*wcid
= status
->wcid
;
124 struct mt76_rx_tid
*tid
;
125 u8 tidno
= status
->qos_ctl
& IEEE80211_QOS_CTL_TID_MASK
;
128 if (!ieee80211_is_ctl(bar
->frame_control
))
131 if (!ieee80211_is_back_req(bar
->frame_control
))
134 status
->qos_ctl
= tidno
= le16_to_cpu(bar
->control
) >> 12;
135 seqno
= IEEE80211_SEQ_TO_SN(le16_to_cpu(bar
->start_seq_num
));
136 tid
= rcu_dereference(wcid
->aggr
[tidno
]);
140 spin_lock_bh(&tid
->lock
);
142 mt76_rx_aggr_release_frames(tid
, frames
, seqno
);
143 mt76_rx_aggr_release_head(tid
, frames
);
145 spin_unlock_bh(&tid
->lock
);
148 void mt76_rx_aggr_reorder(struct sk_buff
*skb
, struct sk_buff_head
*frames
)
150 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
151 struct mt76_wcid
*wcid
= status
->wcid
;
152 struct ieee80211_sta
*sta
;
153 struct mt76_rx_tid
*tid
;
155 u16 seqno
, head
, size
, idx
;
156 u8 tidno
= status
->qos_ctl
& IEEE80211_QOS_CTL_TID_MASK
;
159 __skb_queue_tail(frames
, skb
);
161 sta
= wcid_to_sta(wcid
);
165 if (!status
->aggr
&& !(status
->flag
& RX_FLAG_8023
)) {
166 mt76_rx_aggr_check_ctl(skb
, frames
);
170 /* not part of a BA session */
171 ackp
= status
->qos_ctl
& IEEE80211_QOS_CTL_ACK_POLICY_MASK
;
172 if (ackp
!= IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK
&&
173 ackp
!= IEEE80211_QOS_CTL_ACK_POLICY_NORMAL
)
176 tid
= rcu_dereference(wcid
->aggr
[tidno
]);
180 status
->flag
|= RX_FLAG_DUP_VALIDATED
;
181 spin_lock_bh(&tid
->lock
);
187 seqno
= status
->seqno
;
189 sn_less
= ieee80211_sn_less(seqno
, head
);
199 __skb_unlink(skb
, frames
);
205 tid
->head
= ieee80211_sn_inc(head
);
207 mt76_rx_aggr_release_head(tid
, frames
);
211 __skb_unlink(skb
, frames
);
214 * Frame sequence number exceeds buffering window, free up some space
215 * by releasing previous frames
217 if (!ieee80211_sn_less(seqno
, head
+ size
)) {
218 head
= ieee80211_sn_inc(ieee80211_sn_sub(seqno
, size
));
219 mt76_rx_aggr_release_frames(tid
, frames
, head
);
224 /* Discard if the current slot is already in use */
225 if (tid
->reorder_buf
[idx
]) {
230 status
->reorder_time
= jiffies
;
231 tid
->reorder_buf
[idx
] = skb
;
233 mt76_rx_aggr_release_head(tid
, frames
);
235 ieee80211_queue_delayed_work(tid
->dev
->hw
, &tid
->reorder_work
,
236 mt76_aggr_tid_to_timeo(tid
->num
));
239 spin_unlock_bh(&tid
->lock
);
242 int mt76_rx_aggr_start(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tidno
,
245 struct mt76_rx_tid
*tid
;
247 mt76_rx_aggr_stop(dev
, wcid
, tidno
);
249 tid
= kzalloc(struct_size(tid
, reorder_buf
, size
), GFP_KERNEL
);
257 INIT_DELAYED_WORK(&tid
->reorder_work
, mt76_rx_aggr_reorder_work
);
258 spin_lock_init(&tid
->lock
);
260 rcu_assign_pointer(wcid
->aggr
[tidno
], tid
);
264 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start
);
266 static void mt76_rx_aggr_shutdown(struct mt76_dev
*dev
, struct mt76_rx_tid
*tid
)
268 u16 size
= tid
->size
;
271 spin_lock_bh(&tid
->lock
);
274 for (i
= 0; tid
->nframes
&& i
< size
; i
++) {
275 struct sk_buff
*skb
= tid
->reorder_buf
[i
];
280 tid
->reorder_buf
[i
] = NULL
;
285 spin_unlock_bh(&tid
->lock
);
287 cancel_delayed_work_sync(&tid
->reorder_work
);
290 void mt76_rx_aggr_stop(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tidno
)
292 struct mt76_rx_tid
*tid
= NULL
;
294 tid
= rcu_replace_pointer(wcid
->aggr
[tidno
], tid
,
295 lockdep_is_held(&dev
->mutex
));
297 mt76_rx_aggr_shutdown(dev
, tid
);
298 kfree_rcu(tid
, rcu_head
);
301 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop
);