1 /* SPDX-License-Identifier: ISC */
3 #include <linux/etherdevice.h>
4 #include <linux/timekeeping.h>
8 #define MT_PSE_PAGE_SIZE 128
11 mt7603_ac_queue_mask0(u32 mask
)
15 ret
|= GENMASK(3, 0) * !!(mask
& BIT(0));
16 ret
|= GENMASK(8, 5) * !!(mask
& BIT(1));
17 ret
|= GENMASK(13, 10) * !!(mask
& BIT(2));
18 ret
|= GENMASK(19, 16) * !!(mask
& BIT(3));
23 mt76_stop_tx_ac(struct mt7603_dev
*dev
, u32 mask
)
25 mt76_set(dev
, MT_WF_ARB_TX_STOP_0
, mt7603_ac_queue_mask0(mask
));
29 mt76_start_tx_ac(struct mt7603_dev
*dev
, u32 mask
)
31 mt76_set(dev
, MT_WF_ARB_TX_START_0
, mt7603_ac_queue_mask0(mask
));
34 void mt7603_mac_set_timing(struct mt7603_dev
*dev
)
36 u32 cck
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 231) |
37 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 48);
38 u32 ofdm
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, 60) |
39 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, 24);
40 int offset
= 3 * dev
->coverage_class
;
41 u32 reg_offset
= FIELD_PREP(MT_TIMEOUT_VAL_PLCP
, offset
) |
42 FIELD_PREP(MT_TIMEOUT_VAL_CCA
, offset
);
46 if (dev
->mt76
.chandef
.chan
->band
== NL80211_BAND_5GHZ
)
51 mt76_set(dev
, MT_ARB_SCR
,
52 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
55 mt76_wr(dev
, MT_TIMEOUT_CCK
, cck
+ reg_offset
);
56 mt76_wr(dev
, MT_TIMEOUT_OFDM
, ofdm
+ reg_offset
);
58 FIELD_PREP(MT_IFS_EIFS
, 360) |
59 FIELD_PREP(MT_IFS_RIFS
, 2) |
60 FIELD_PREP(MT_IFS_SIFS
, sifs
) |
61 FIELD_PREP(MT_IFS_SLOT
, dev
->slottime
));
63 if (dev
->slottime
< 20)
64 val
= MT7603_CFEND_RATE_DEFAULT
;
66 val
= MT7603_CFEND_RATE_11B
;
68 mt76_rmw_field(dev
, MT_AGG_CONTROL
, MT_AGG_CONTROL_CFEND_RATE
, val
);
70 mt76_clear(dev
, MT_ARB_SCR
,
71 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
75 mt7603_wtbl_update(struct mt7603_dev
*dev
, int idx
, u32 mask
)
77 mt76_rmw(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_WLAN_IDX
,
78 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, idx
) | mask
);
80 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
84 mt7603_wtbl1_addr(int idx
)
86 return MT_WTBL1_BASE
+ idx
* MT_WTBL1_SIZE
;
90 mt7603_wtbl2_addr(int idx
)
93 return MT_PCIE_REMAP_BASE_1
+ idx
* MT_WTBL2_SIZE
;
97 mt7603_wtbl3_addr(int idx
)
99 u32 base
= mt7603_wtbl2_addr(MT7603_WTBL_SIZE
);
101 return base
+ idx
* MT_WTBL3_SIZE
;
105 mt7603_wtbl4_addr(int idx
)
107 u32 base
= mt7603_wtbl3_addr(MT7603_WTBL_SIZE
);
109 return base
+ idx
* MT_WTBL4_SIZE
;
112 void mt7603_wtbl_init(struct mt7603_dev
*dev
, int idx
, int vif
,
115 const void *_mac
= mac_addr
;
116 u32 addr
= mt7603_wtbl1_addr(idx
);
121 w0
= FIELD_PREP(MT_WTBL1_W0_ADDR_HI
,
122 get_unaligned_le16(_mac
+ 4));
123 w1
= FIELD_PREP(MT_WTBL1_W1_ADDR_LO
,
124 get_unaligned_le32(_mac
));
130 w0
|= MT_WTBL1_W0_RX_CHECK_A1
;
131 w0
|= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX
, vif
);
133 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
135 mt76_set(dev
, addr
+ 0 * 4, w0
);
136 mt76_set(dev
, addr
+ 1 * 4, w1
);
137 mt76_set(dev
, addr
+ 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL
);
139 mt76_stop_tx_ac(dev
, GENMASK(3, 0));
140 addr
= mt7603_wtbl2_addr(idx
);
141 for (i
= 0; i
< MT_WTBL2_SIZE
; i
+= 4)
142 mt76_wr(dev
, addr
+ i
, 0);
143 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_WTBL2
);
144 mt76_start_tx_ac(dev
, GENMASK(3, 0));
146 addr
= mt7603_wtbl3_addr(idx
);
147 for (i
= 0; i
< MT_WTBL3_SIZE
; i
+= 4)
148 mt76_wr(dev
, addr
+ i
, 0);
150 addr
= mt7603_wtbl4_addr(idx
);
151 for (i
= 0; i
< MT_WTBL4_SIZE
; i
+= 4)
152 mt76_wr(dev
, addr
+ i
, 0);
156 mt7603_wtbl_set_skip_tx(struct mt7603_dev
*dev
, int idx
, bool enabled
)
158 u32 addr
= mt7603_wtbl1_addr(idx
);
159 u32 val
= mt76_rr(dev
, addr
+ 3 * 4);
161 val
&= ~MT_WTBL1_W3_SKIP_TX
;
162 val
|= enabled
* MT_WTBL1_W3_SKIP_TX
;
164 mt76_wr(dev
, addr
+ 3 * 4, val
);
167 void mt7603_filter_tx(struct mt7603_dev
*dev
, int idx
, bool abort
)
173 queue
= 8; /* free queue */
176 queue
= 1; /* MCU queue */
179 mt7603_wtbl_set_skip_tx(dev
, idx
, true);
181 mt76_wr(dev
, MT_TX_ABORT
, MT_TX_ABORT_EN
|
182 FIELD_PREP(MT_TX_ABORT_WCID
, idx
));
184 for (i
= 0; i
< 4; i
++) {
185 mt76_wr(dev
, MT_DMA_FQCR0
, MT_DMA_FQCR0_BUSY
|
186 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID
, idx
) |
187 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID
, i
) |
188 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID
, port
) |
189 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID
, queue
));
191 WARN_ON_ONCE(!mt76_poll(dev
, MT_DMA_FQCR0
, MT_DMA_FQCR0_BUSY
,
195 mt76_wr(dev
, MT_TX_ABORT
, 0);
197 mt7603_wtbl_set_skip_tx(dev
, idx
, false);
200 void mt7603_wtbl_set_smps(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
203 u32 addr
= mt7603_wtbl1_addr(sta
->wcid
.idx
);
205 if (sta
->smps
== enabled
)
208 mt76_rmw_field(dev
, addr
+ 2 * 4, MT_WTBL1_W2_SMPS
, enabled
);
212 void mt7603_wtbl_set_ps(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
215 int idx
= sta
->wcid
.idx
;
218 spin_lock_bh(&dev
->ps_lock
);
220 if (sta
->ps
== enabled
)
223 mt76_wr(dev
, MT_PSE_RTA
,
224 FIELD_PREP(MT_PSE_RTA_TAG_ID
, idx
) |
225 FIELD_PREP(MT_PSE_RTA_PORT_ID
, 0) |
226 FIELD_PREP(MT_PSE_RTA_QUEUE_ID
, 1) |
227 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN
, enabled
) |
228 MT_PSE_RTA_WRITE
| MT_PSE_RTA_BUSY
);
230 mt76_poll(dev
, MT_PSE_RTA
, MT_PSE_RTA_BUSY
, 0, 5000);
233 mt7603_filter_tx(dev
, idx
, false);
235 addr
= mt7603_wtbl1_addr(idx
);
236 mt76_set(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
237 mt76_rmw(dev
, addr
+ 3 * 4, MT_WTBL1_W3_POWER_SAVE
,
238 enabled
* MT_WTBL1_W3_POWER_SAVE
);
239 mt76_clear(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
243 spin_unlock_bh(&dev
->ps_lock
);
246 void mt7603_wtbl_clear(struct mt7603_dev
*dev
, int idx
)
248 int wtbl2_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL2_SIZE
;
249 int wtbl2_frame
= idx
/ wtbl2_frame_size
;
250 int wtbl2_entry
= idx
% wtbl2_frame_size
;
252 int wtbl3_base_frame
= MT_WTBL3_OFFSET
/ MT_PSE_PAGE_SIZE
;
253 int wtbl3_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL3_SIZE
;
254 int wtbl3_frame
= wtbl3_base_frame
+ idx
/ wtbl3_frame_size
;
255 int wtbl3_entry
= (idx
% wtbl3_frame_size
) * 2;
257 int wtbl4_base_frame
= MT_WTBL4_OFFSET
/ MT_PSE_PAGE_SIZE
;
258 int wtbl4_frame_size
= MT_PSE_PAGE_SIZE
/ MT_WTBL4_SIZE
;
259 int wtbl4_frame
= wtbl4_base_frame
+ idx
/ wtbl4_frame_size
;
260 int wtbl4_entry
= idx
% wtbl4_frame_size
;
262 u32 addr
= MT_WTBL1_BASE
+ idx
* MT_WTBL1_SIZE
;
265 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
267 mt76_wr(dev
, addr
+ 0 * 4,
268 MT_WTBL1_W0_RX_CHECK_A1
|
269 MT_WTBL1_W0_RX_CHECK_A2
|
270 MT_WTBL1_W0_RX_VALID
);
271 mt76_wr(dev
, addr
+ 1 * 4, 0);
272 mt76_wr(dev
, addr
+ 2 * 4, 0);
274 mt76_set(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
276 mt76_wr(dev
, addr
+ 3 * 4,
277 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID
, wtbl2_frame
) |
278 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID
, wtbl2_entry
) |
279 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID
, wtbl4_frame
) |
280 MT_WTBL1_W3_I_PSM
| MT_WTBL1_W3_KEEP_I_PSM
);
281 mt76_wr(dev
, addr
+ 4 * 4,
282 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID
, wtbl3_frame
) |
283 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID
, wtbl3_entry
) |
284 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID
, wtbl4_entry
));
286 mt76_clear(dev
, MT_WTBL1_OR
, MT_WTBL1_OR_PSM_WRITE
);
288 addr
= mt7603_wtbl2_addr(idx
);
290 /* Clear BA information */
291 mt76_wr(dev
, addr
+ (15 * 4), 0);
293 mt76_stop_tx_ac(dev
, GENMASK(3, 0));
294 for (i
= 2; i
<= 4; i
++)
295 mt76_wr(dev
, addr
+ (i
* 4), 0);
296 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_WTBL2
);
297 mt76_start_tx_ac(dev
, GENMASK(3, 0));
299 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_RX_COUNT_CLEAR
);
300 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_TX_COUNT_CLEAR
);
301 mt7603_wtbl_update(dev
, idx
, MT_WTBL_UPDATE_ADM_COUNT_CLEAR
);
304 void mt7603_wtbl_update_cap(struct mt7603_dev
*dev
, struct ieee80211_sta
*sta
)
306 struct mt7603_sta
*msta
= (struct mt7603_sta
*)sta
->drv_priv
;
307 int idx
= msta
->wcid
.idx
;
311 addr
= mt7603_wtbl1_addr(idx
);
313 val
= mt76_rr(dev
, addr
+ 2 * 4);
314 val
&= MT_WTBL1_W2_KEY_TYPE
| MT_WTBL1_W2_ADMISSION_CONTROL
;
315 val
|= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR
, sta
->ht_cap
.ampdu_factor
) |
316 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY
, sta
->ht_cap
.ampdu_density
) |
317 MT_WTBL1_W2_TXS_BAF_REPORT
;
320 val
|= MT_WTBL1_W2_HT
;
321 if (sta
->vht_cap
.cap
)
322 val
|= MT_WTBL1_W2_VHT
;
324 mt76_wr(dev
, addr
+ 2 * 4, val
);
326 addr
= mt7603_wtbl2_addr(idx
);
327 val
= mt76_rr(dev
, addr
+ 9 * 4);
328 val
&= ~(MT_WTBL2_W9_SHORT_GI_20
| MT_WTBL2_W9_SHORT_GI_40
|
329 MT_WTBL2_W9_SHORT_GI_80
);
330 if (sta
->ht_cap
.cap
& IEEE80211_HT_CAP_SGI_20
)
331 val
|= MT_WTBL2_W9_SHORT_GI_20
;
332 if (sta
->ht_cap
.cap
& IEEE80211_HT_CAP_SGI_40
)
333 val
|= MT_WTBL2_W9_SHORT_GI_40
;
334 mt76_wr(dev
, addr
+ 9 * 4, val
);
337 void mt7603_mac_rx_ba_reset(struct mt7603_dev
*dev
, void *addr
, u8 tid
)
339 mt76_wr(dev
, MT_BA_CONTROL_0
, get_unaligned_le32(addr
));
340 mt76_wr(dev
, MT_BA_CONTROL_1
,
341 (get_unaligned_le16(addr
+ 4) |
342 FIELD_PREP(MT_BA_CONTROL_1_TID
, tid
) |
343 MT_BA_CONTROL_1_RESET
));
346 void mt7603_mac_tx_ba_reset(struct mt7603_dev
*dev
, int wcid
, int tid
, int ssn
,
349 u32 addr
= mt7603_wtbl2_addr(wcid
);
350 u32 tid_mask
= FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS
, BIT(tid
)) |
351 (MT_WTBL2_W15_BA_WIN_SIZE
<<
352 (tid
* MT_WTBL2_W15_BA_WIN_SIZE_SHIFT
));
358 mt76_clear(dev
, addr
+ (15 * 4), tid_mask
);
361 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
363 mt7603_mac_stop(dev
);
366 mt76_rmw_field(dev
, addr
+ (2 * 4), MT_WTBL2_W2_TID0_SN
, ssn
);
369 mt76_rmw_field(dev
, addr
+ (2 * 4), MT_WTBL2_W2_TID1_SN
, ssn
);
372 mt76_rmw_field(dev
, addr
+ (2 * 4), MT_WTBL2_W2_TID2_SN_LO
,
374 mt76_rmw_field(dev
, addr
+ (3 * 4), MT_WTBL2_W3_TID2_SN_HI
,
378 mt76_rmw_field(dev
, addr
+ (3 * 4), MT_WTBL2_W3_TID3_SN
, ssn
);
381 mt76_rmw_field(dev
, addr
+ (3 * 4), MT_WTBL2_W3_TID4_SN
, ssn
);
384 mt76_rmw_field(dev
, addr
+ (3 * 4), MT_WTBL2_W3_TID5_SN_LO
,
386 mt76_rmw_field(dev
, addr
+ (4 * 4), MT_WTBL2_W4_TID5_SN_HI
,
390 mt76_rmw_field(dev
, addr
+ (4 * 4), MT_WTBL2_W4_TID6_SN
, ssn
);
393 mt76_rmw_field(dev
, addr
+ (4 * 4), MT_WTBL2_W4_TID7_SN
, ssn
);
396 mt7603_wtbl_update(dev
, wcid
, MT_WTBL_UPDATE_WTBL2
);
397 mt7603_mac_start(dev
);
399 for (i
= 7; i
> 0; i
--) {
400 if (ba_size
>= MT_AGG_SIZE_LIMIT(i
))
404 tid_val
= FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS
, BIT(tid
)) |
405 i
<< (tid
* MT_WTBL2_W15_BA_WIN_SIZE_SHIFT
);
407 mt76_rmw(dev
, addr
+ (15 * 4), tid_mask
, tid_val
);
411 mt7603_get_rate(struct mt7603_dev
*dev
, struct ieee80211_supported_band
*sband
,
415 int len
= sband
->n_bitrates
;
419 if (sband
== &dev
->mt76
.sband_5g
.sband
)
422 idx
&= ~BIT(2); /* short preamble */
423 } else if (sband
== &dev
->mt76
.sband_2g
.sband
) {
427 for (i
= offset
; i
< len
; i
++) {
428 if ((sband
->bitrates
[i
].hw_value
& GENMASK(7, 0)) == idx
)
435 static struct mt76_wcid
*
436 mt7603_rx_get_wcid(struct mt7603_dev
*dev
, u8 idx
, bool unicast
)
438 struct mt7603_sta
*sta
;
439 struct mt76_wcid
*wcid
;
441 if (idx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
444 wcid
= rcu_dereference(dev
->mt76
.wcid
[idx
]);
445 if (unicast
|| !wcid
)
451 sta
= container_of(wcid
, struct mt7603_sta
, wcid
);
455 return &sta
->vif
->sta
.wcid
;
459 mt7603_insert_ccmp_hdr(struct sk_buff
*skb
, u8 key_id
)
461 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
462 int hdr_len
= ieee80211_get_hdrlen_from_skb(skb
);
467 memmove(skb
->data
, skb
->data
+ 8, hdr_len
);
468 hdr
= skb
->data
+ hdr_len
;
473 hdr
[3] = 0x20 | (key_id
<< 6);
479 status
->flag
&= ~RX_FLAG_IV_STRIPPED
;
483 mt7603_mac_fill_rx(struct mt7603_dev
*dev
, struct sk_buff
*skb
)
485 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
486 struct ieee80211_supported_band
*sband
;
487 struct ieee80211_hdr
*hdr
;
488 __le32
*rxd
= (__le32
*)skb
->data
;
489 u32 rxd0
= le32_to_cpu(rxd
[0]);
490 u32 rxd1
= le32_to_cpu(rxd
[1]);
491 u32 rxd2
= le32_to_cpu(rxd
[2]);
492 bool unicast
= rxd1
& MT_RXD1_NORMAL_U2M
;
493 bool insert_ccmp_hdr
= false;
498 memset(status
, 0, sizeof(*status
));
500 i
= FIELD_GET(MT_RXD1_NORMAL_CH_FREQ
, rxd1
);
501 sband
= (i
& 1) ? &dev
->mt76
.sband_5g
.sband
: &dev
->mt76
.sband_2g
.sband
;
504 idx
= FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX
, rxd2
);
505 status
->wcid
= mt7603_rx_get_wcid(dev
, idx
, unicast
);
507 status
->band
= sband
->band
;
508 if (i
< sband
->n_channels
)
509 status
->freq
= sband
->channels
[i
].center_freq
;
511 if (rxd2
& MT_RXD2_NORMAL_FCS_ERR
)
512 status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
514 if (rxd2
& MT_RXD2_NORMAL_TKIP_MIC_ERR
)
515 status
->flag
|= RX_FLAG_MMIC_ERROR
;
517 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE
, rxd2
) != 0 &&
518 !(rxd2
& (MT_RXD2_NORMAL_CLM
| MT_RXD2_NORMAL_CM
))) {
519 status
->flag
|= RX_FLAG_DECRYPTED
;
520 status
->flag
|= RX_FLAG_IV_STRIPPED
;
521 status
->flag
|= RX_FLAG_MMIC_STRIPPED
| RX_FLAG_MIC_STRIPPED
;
524 remove_pad
= rxd1
& MT_RXD1_NORMAL_HDR_OFFSET
;
526 if (rxd2
& MT_RXD2_NORMAL_MAX_LEN_ERROR
)
529 if (!sband
->channels
)
533 if (rxd0
& MT_RXD0_NORMAL_GROUP_4
) {
535 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
538 if (rxd0
& MT_RXD0_NORMAL_GROUP_1
) {
539 u8
*data
= (u8
*)rxd
;
541 if (status
->flag
& RX_FLAG_DECRYPTED
) {
542 status
->iv
[0] = data
[5];
543 status
->iv
[1] = data
[4];
544 status
->iv
[2] = data
[3];
545 status
->iv
[3] = data
[2];
546 status
->iv
[4] = data
[1];
547 status
->iv
[5] = data
[0];
549 insert_ccmp_hdr
= FIELD_GET(MT_RXD2_NORMAL_FRAG
, rxd2
);
553 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
556 if (rxd0
& MT_RXD0_NORMAL_GROUP_2
) {
558 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
561 if (rxd0
& MT_RXD0_NORMAL_GROUP_3
) {
562 u32 rxdg0
= le32_to_cpu(rxd
[0]);
563 u32 rxdg3
= le32_to_cpu(rxd
[3]);
566 i
= FIELD_GET(MT_RXV1_TX_RATE
, rxdg0
);
567 switch (FIELD_GET(MT_RXV1_TX_MODE
, rxdg0
)) {
568 case MT_PHY_TYPE_CCK
:
571 case MT_PHY_TYPE_OFDM
:
572 i
= mt7603_get_rate(dev
, sband
, i
, cck
);
574 case MT_PHY_TYPE_HT_GF
:
576 status
->encoding
= RX_ENC_HT
;
584 if (rxdg0
& MT_RXV1_HT_SHORT_GI
)
585 status
->enc_flags
|= RX_ENC_FLAG_SHORT_GI
;
586 if (rxdg0
& MT_RXV1_HT_AD_CODE
)
587 status
->enc_flags
|= RX_ENC_FLAG_LDPC
;
589 status
->enc_flags
|= RX_ENC_FLAG_STBC_MASK
*
590 FIELD_GET(MT_RXV1_HT_STBC
, rxdg0
);
592 status
->rate_idx
= i
;
594 status
->chains
= dev
->mt76
.antenna_mask
;
595 status
->chain_signal
[0] = FIELD_GET(MT_RXV4_IB_RSSI0
, rxdg3
) +
597 status
->chain_signal
[1] = FIELD_GET(MT_RXV4_IB_RSSI1
, rxdg3
) +
600 status
->signal
= status
->chain_signal
[0];
601 if (status
->chains
& BIT(1))
602 status
->signal
= max(status
->signal
,
603 status
->chain_signal
[1]);
605 if (FIELD_GET(MT_RXV1_FRAME_MODE
, rxdg0
) == 1)
606 status
->bw
= RATE_INFO_BW_40
;
609 if ((u8
*)rxd
- skb
->data
>= skb
->len
)
615 skb_pull(skb
, (u8
*)rxd
- skb
->data
+ 2 * remove_pad
);
617 if (insert_ccmp_hdr
) {
618 u8 key_id
= FIELD_GET(MT_RXD1_NORMAL_KEY_ID
, rxd1
);
620 mt7603_insert_ccmp_hdr(skb
, key_id
);
623 hdr
= (struct ieee80211_hdr
*)skb
->data
;
624 if (!status
->wcid
|| !ieee80211_is_data_qos(hdr
->frame_control
))
627 status
->aggr
= unicast
&&
628 !ieee80211_is_qos_nullfunc(hdr
->frame_control
);
629 status
->tid
= *ieee80211_get_qos_ctl(hdr
) & IEEE80211_QOS_CTL_TID_MASK
;
630 status
->seqno
= hdr
->seq_ctrl
>> 4;
636 mt7603_mac_tx_rate_val(struct mt7603_dev
*dev
,
637 const struct ieee80211_tx_rate
*rate
, bool stbc
, u8
*bw
)
639 u8 phy
, nss
, rate_idx
;
643 if (rate
->flags
& IEEE80211_TX_RC_MCS
) {
644 rate_idx
= rate
->idx
;
645 nss
= 1 + (rate
->idx
>> 3);
646 phy
= MT_PHY_TYPE_HT
;
647 if (rate
->flags
& IEEE80211_TX_RC_GREEN_FIELD
)
648 phy
= MT_PHY_TYPE_HT_GF
;
649 if (rate
->flags
& IEEE80211_TX_RC_40_MHZ_WIDTH
)
652 const struct ieee80211_rate
*r
;
653 int band
= dev
->mt76
.chandef
.chan
->band
;
657 r
= &mt76_hw(dev
)->wiphy
->bands
[band
]->bitrates
[rate
->idx
];
658 if (rate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
659 val
= r
->hw_value_short
;
664 rate_idx
= val
& 0xff;
667 rateval
= (FIELD_PREP(MT_TX_RATE_IDX
, rate_idx
) |
668 FIELD_PREP(MT_TX_RATE_MODE
, phy
));
670 if (stbc
&& nss
== 1)
671 rateval
|= MT_TX_RATE_STBC
;
676 void mt7603_wtbl_set_rates(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
677 struct ieee80211_tx_rate
*probe_rate
,
678 struct ieee80211_tx_rate
*rates
)
680 int wcid
= sta
->wcid
.idx
;
681 u32 addr
= mt7603_wtbl2_addr(wcid
);
683 int n_rates
= sta
->n_rates
;
684 u8 bw
, bw_prev
, bw_idx
= 0;
687 u32 w9
= mt76_rr(dev
, addr
+ 9 * 4);
690 if (!mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000))
693 for (i
= n_rates
; i
< 4; i
++)
694 rates
[i
] = rates
[n_rates
- 1];
696 w9
&= MT_WTBL2_W9_SHORT_GI_20
| MT_WTBL2_W9_SHORT_GI_40
|
697 MT_WTBL2_W9_SHORT_GI_80
;
699 val
[0] = mt7603_mac_tx_rate_val(dev
, &rates
[0], stbc
, &bw
);
703 probe_val
= mt7603_mac_tx_rate_val(dev
, probe_rate
, stbc
, &bw
);
712 w9
|= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL
, bw
);
713 w9
|= FIELD_PREP(MT_WTBL2_W9_BW_CAP
, bw
);
715 val
[1] = mt7603_mac_tx_rate_val(dev
, &rates
[1], stbc
, &bw
);
721 val
[2] = mt7603_mac_tx_rate_val(dev
, &rates
[2], stbc
, &bw
);
727 val
[3] = mt7603_mac_tx_rate_val(dev
, &rates
[3], stbc
, &bw
);
731 w9
|= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE
,
732 bw_idx
? bw_idx
- 1 : 7);
734 mt76_wr(dev
, MT_WTBL_RIUCR0
, w9
);
736 mt76_wr(dev
, MT_WTBL_RIUCR1
,
737 FIELD_PREP(MT_WTBL_RIUCR1_RATE0
, probe_val
) |
738 FIELD_PREP(MT_WTBL_RIUCR1_RATE1
, val
[0]) |
739 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO
, val
[0]));
741 mt76_wr(dev
, MT_WTBL_RIUCR2
,
742 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI
, val
[0] >> 8) |
743 FIELD_PREP(MT_WTBL_RIUCR2_RATE3
, val
[1]) |
744 FIELD_PREP(MT_WTBL_RIUCR2_RATE4
, val
[1]) |
745 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO
, val
[2]));
747 mt76_wr(dev
, MT_WTBL_RIUCR3
,
748 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI
, val
[2] >> 4) |
749 FIELD_PREP(MT_WTBL_RIUCR3_RATE6
, val
[2]) |
750 FIELD_PREP(MT_WTBL_RIUCR3_RATE7
, val
[3]));
752 mt76_wr(dev
, MT_WTBL_UPDATE
,
753 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX
, wcid
) |
754 MT_WTBL_UPDATE_RATE_UPDATE
|
755 MT_WTBL_UPDATE_TX_COUNT_CLEAR
);
757 if (!sta
->wcid
.tx_rate_set
)
758 mt76_poll(dev
, MT_WTBL_UPDATE
, MT_WTBL_UPDATE_BUSY
, 0, 5000);
760 sta
->rate_count
= 2 * MT7603_RATE_RETRY
* n_rates
;
761 sta
->wcid
.tx_rate_set
= true;
764 static enum mt7603_cipher_type
765 mt7603_mac_get_key_info(struct ieee80211_key_conf
*key
, u8
*key_data
)
767 memset(key_data
, 0, 32);
769 return MT_CIPHER_NONE
;
771 if (key
->keylen
> 32)
772 return MT_CIPHER_NONE
;
774 memcpy(key_data
, key
->key
, key
->keylen
);
776 switch (key
->cipher
) {
777 case WLAN_CIPHER_SUITE_WEP40
:
778 return MT_CIPHER_WEP40
;
779 case WLAN_CIPHER_SUITE_WEP104
:
780 return MT_CIPHER_WEP104
;
781 case WLAN_CIPHER_SUITE_TKIP
:
782 /* Rx/Tx MIC keys are swapped */
783 memcpy(key_data
+ 16, key
->key
+ 24, 8);
784 memcpy(key_data
+ 24, key
->key
+ 16, 8);
785 return MT_CIPHER_TKIP
;
786 case WLAN_CIPHER_SUITE_CCMP
:
787 return MT_CIPHER_AES_CCMP
;
789 return MT_CIPHER_NONE
;
793 int mt7603_wtbl_set_key(struct mt7603_dev
*dev
, int wcid
,
794 struct ieee80211_key_conf
*key
)
796 enum mt7603_cipher_type cipher
;
797 u32 addr
= mt7603_wtbl3_addr(wcid
);
799 int key_len
= sizeof(key_data
);
801 cipher
= mt7603_mac_get_key_info(key
, key_data
);
802 if (cipher
== MT_CIPHER_NONE
&& key
)
805 if (key
&& (cipher
== MT_CIPHER_WEP40
|| cipher
== MT_CIPHER_WEP104
)) {
806 addr
+= key
->keyidx
* 16;
810 mt76_wr_copy(dev
, addr
, key_data
, key_len
);
812 addr
= mt7603_wtbl1_addr(wcid
);
813 mt76_rmw_field(dev
, addr
+ 2 * 4, MT_WTBL1_W2_KEY_TYPE
, cipher
);
815 mt76_rmw_field(dev
, addr
, MT_WTBL1_W0_KEY_IDX
, key
->keyidx
);
816 mt76_rmw_field(dev
, addr
, MT_WTBL1_W0_RX_KEY_VALID
, !!key
);
822 mt7603_mac_write_txwi(struct mt7603_dev
*dev
, __le32
*txwi
,
823 struct sk_buff
*skb
, struct mt76_queue
*q
,
824 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
825 int pid
, struct ieee80211_key_conf
*key
)
827 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
828 struct ieee80211_tx_rate
*rate
= &info
->control
.rates
[0];
829 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
830 struct ieee80211_vif
*vif
= info
->control
.vif
;
831 struct mt7603_vif
*mvif
;
833 int hdr_len
= ieee80211_get_hdrlen_from_skb(skb
);
835 u8 frame_type
, frame_subtype
;
836 u16 fc
= le16_to_cpu(hdr
->frame_control
);
842 mvif
= (struct mt7603_vif
*)vif
->drv_priv
;
844 if (vif_idx
&& q
>= &dev
->mt76
.q_tx
[MT_TXQ_BEACON
])
849 struct mt7603_sta
*msta
= (struct mt7603_sta
*)sta
->drv_priv
;
851 tx_count
= msta
->rate_count
;
855 wlan_idx
= wcid
->idx
;
857 wlan_idx
= MT7603_WTBL_RESERVED
;
859 frame_type
= (fc
& IEEE80211_FCTL_FTYPE
) >> 2;
860 frame_subtype
= (fc
& IEEE80211_FCTL_STYPE
) >> 4;
862 val
= FIELD_PREP(MT_TXD0_TX_BYTES
, skb
->len
+ MT_TXD_SIZE
) |
863 FIELD_PREP(MT_TXD0_Q_IDX
, q
->hw_idx
);
864 txwi
[0] = cpu_to_le32(val
);
866 val
= MT_TXD1_LONG_FORMAT
|
867 FIELD_PREP(MT_TXD1_OWN_MAC
, vif_idx
) |
868 FIELD_PREP(MT_TXD1_TID
,
869 skb
->priority
& IEEE80211_QOS_CTL_TID_MASK
) |
870 FIELD_PREP(MT_TXD1_HDR_FORMAT
, MT_HDR_FORMAT_802_11
) |
871 FIELD_PREP(MT_TXD1_HDR_INFO
, hdr_len
/ 2) |
872 FIELD_PREP(MT_TXD1_WLAN_IDX
, wlan_idx
) |
873 FIELD_PREP(MT_TXD1_PROTECTED
, !!key
);
874 txwi
[1] = cpu_to_le32(val
);
876 if (info
->flags
& IEEE80211_TX_CTL_NO_ACK
)
877 txwi
[1] |= cpu_to_le32(MT_TXD1_NO_ACK
);
879 val
= FIELD_PREP(MT_TXD2_FRAME_TYPE
, frame_type
) |
880 FIELD_PREP(MT_TXD2_SUB_TYPE
, frame_subtype
) |
881 FIELD_PREP(MT_TXD2_MULTICAST
,
882 is_multicast_ether_addr(hdr
->addr1
));
883 txwi
[2] = cpu_to_le32(val
);
885 if (!(info
->flags
& IEEE80211_TX_CTL_AMPDU
))
886 txwi
[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE
);
890 val
= MT_TXD5_TX_STATUS_HOST
| MT_TXD5_SW_POWER_MGMT
|
891 FIELD_PREP(MT_TXD5_PID
, pid
);
892 txwi
[5] = cpu_to_le32(val
);
896 if (rate
->idx
>= 0 && rate
->count
&&
897 !(info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
)) {
898 bool stbc
= info
->flags
& IEEE80211_TX_CTL_STBC
;
899 u16 rateval
= mt7603_mac_tx_rate_val(dev
, rate
, stbc
, &bw
);
901 txwi
[2] |= cpu_to_le32(MT_TXD2_FIX_RATE
);
903 val
= MT_TXD6_FIXED_BW
|
904 FIELD_PREP(MT_TXD6_BW
, bw
) |
905 FIELD_PREP(MT_TXD6_TX_RATE
, rateval
);
906 txwi
[6] |= cpu_to_le32(val
);
908 if (rate
->flags
& IEEE80211_TX_RC_SHORT_GI
)
909 txwi
[6] |= cpu_to_le32(MT_TXD6_SGI
);
911 if (!(rate
->flags
& IEEE80211_TX_RC_MCS
))
912 txwi
[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE
);
914 tx_count
= rate
->count
;
917 /* use maximum tx count for beacons and buffered multicast */
918 if (q
>= &dev
->mt76
.q_tx
[MT_TXQ_BEACON
])
921 val
= FIELD_PREP(MT_TXD3_REM_TX_COUNT
, tx_count
) |
922 FIELD_PREP(MT_TXD3_SEQ
, le16_to_cpu(hdr
->seq_ctrl
));
923 txwi
[3] = cpu_to_le32(val
);
926 u64 pn
= atomic64_inc_return(&key
->tx_pn
);
928 txwi
[3] |= cpu_to_le32(MT_TXD3_PN_VALID
);
929 txwi
[4] = cpu_to_le32(pn
& GENMASK(31, 0));
930 txwi
[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH
, pn
>> 32));
938 int mt7603_tx_prepare_skb(struct mt76_dev
*mdev
, void *txwi_ptr
,
939 struct sk_buff
*skb
, struct mt76_queue
*q
,
940 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
,
943 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
944 struct mt7603_sta
*msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
945 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
946 struct ieee80211_key_conf
*key
= info
->control
.hw_key
;
950 wcid
= &dev
->global_sta
.wcid
;
953 msta
= (struct mt7603_sta
*)sta
->drv_priv
;
955 if ((info
->flags
& (IEEE80211_TX_CTL_NO_PS_BUFFER
|
956 IEEE80211_TX_CTL_CLEAR_PS_FILT
)) ||
957 (info
->control
.flags
& IEEE80211_TX_CTRL_PS_RESPONSE
))
958 mt7603_wtbl_set_ps(dev
, msta
, false);
961 pid
= mt76_tx_status_skb_add(mdev
, wcid
, skb
);
963 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
) {
964 spin_lock_bh(&dev
->mt76
.lock
);
965 msta
->rate_probe
= true;
966 mt7603_wtbl_set_rates(dev
, msta
, &info
->control
.rates
[0],
968 spin_unlock_bh(&dev
->mt76
.lock
);
971 mt7603_mac_write_txwi(dev
, txwi_ptr
, skb
, q
, wcid
, sta
, pid
, key
);
977 mt7603_fill_txs(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
,
978 struct ieee80211_tx_info
*info
, __le32
*txs_data
)
980 struct ieee80211_supported_band
*sband
;
983 u32 final_rate_flags
;
996 fixed_rate
= info
->status
.rates
[0].count
;
997 probe
= !!(info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
);
999 txs
= le32_to_cpu(txs_data
[4]);
1000 final_mpdu
= txs
& MT_TXS4_ACKED_MPDU
;
1001 ampdu
= !fixed_rate
&& (txs
& MT_TXS4_AMPDU
);
1002 pid
= FIELD_GET(MT_TXS4_PID
, txs
);
1003 count
= FIELD_GET(MT_TXS4_TX_COUNT
, txs
);
1005 txs
= le32_to_cpu(txs_data
[0]);
1006 final_rate
= FIELD_GET(MT_TXS0_TX_RATE
, txs
);
1007 ack_timeout
= txs
& MT_TXS0_ACK_TIMEOUT
;
1009 if (!ampdu
&& (txs
& MT_TXS0_RTS_TIMEOUT
))
1012 if (txs
& MT_TXS0_QUEUE_TIMEOUT
)
1016 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1018 info
->status
.ampdu_len
= 1;
1019 info
->status
.ampdu_ack_len
= !!(info
->flags
&
1020 IEEE80211_TX_STAT_ACK
);
1022 if (ampdu
|| (info
->flags
& IEEE80211_TX_CTL_AMPDU
))
1023 info
->flags
|= IEEE80211_TX_STAT_AMPDU
| IEEE80211_TX_CTL_AMPDU
;
1025 if (fixed_rate
&& !probe
) {
1026 info
->status
.rates
[0].count
= count
;
1030 for (i
= 0, idx
= 0; i
< ARRAY_SIZE(info
->status
.rates
); i
++) {
1031 int cur_count
= min_t(int, count
, 2 * MT7603_RATE_RETRY
);
1036 info
->status
.rates
[i
] = sta
->rates
[idx
];
1040 if (i
&& info
->status
.rates
[i
].idx
< 0) {
1041 info
->status
.rates
[i
- 1].count
+= count
;
1046 info
->status
.rates
[i
].idx
= -1;
1050 info
->status
.rates
[i
].count
= cur_count
;
1056 final_rate_flags
= info
->status
.rates
[final_idx
].flags
;
1058 switch (FIELD_GET(MT_TX_RATE_MODE
, final_rate
)) {
1059 case MT_PHY_TYPE_CCK
:
1062 case MT_PHY_TYPE_OFDM
:
1063 if (dev
->mt76
.chandef
.chan
->band
== NL80211_BAND_5GHZ
)
1064 sband
= &dev
->mt76
.sband_5g
.sband
;
1066 sband
= &dev
->mt76
.sband_2g
.sband
;
1067 final_rate
&= GENMASK(5, 0);
1068 final_rate
= mt7603_get_rate(dev
, sband
, final_rate
, cck
);
1069 final_rate_flags
= 0;
1071 case MT_PHY_TYPE_HT_GF
:
1072 case MT_PHY_TYPE_HT
:
1073 final_rate_flags
|= IEEE80211_TX_RC_MCS
;
1074 final_rate
&= GENMASK(5, 0);
1075 if (final_rate
> 15)
1082 info
->status
.rates
[final_idx
].idx
= final_rate
;
1083 info
->status
.rates
[final_idx
].flags
= final_rate_flags
;
1089 mt7603_mac_add_txs_skb(struct mt7603_dev
*dev
, struct mt7603_sta
*sta
, int pid
,
1092 struct mt76_dev
*mdev
= &dev
->mt76
;
1093 struct sk_buff_head list
;
1094 struct sk_buff
*skb
;
1096 if (pid
< MT_PACKET_ID_FIRST
)
1099 mt76_tx_status_lock(mdev
, &list
);
1100 skb
= mt76_tx_status_skb_get(mdev
, &sta
->wcid
, pid
, &list
);
1102 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1104 if (info
->flags
& IEEE80211_TX_CTL_RATE_CTRL_PROBE
) {
1105 spin_lock_bh(&dev
->mt76
.lock
);
1106 if (sta
->rate_probe
) {
1107 mt7603_wtbl_set_rates(dev
, sta
, NULL
,
1109 sta
->rate_probe
= false;
1111 spin_unlock_bh(&dev
->mt76
.lock
);
1114 if (!mt7603_fill_txs(dev
, sta
, info
, txs_data
)) {
1115 ieee80211_tx_info_clear_status(info
);
1116 info
->status
.rates
[0].idx
= -1;
1119 mt76_tx_status_skb_done(mdev
, skb
, &list
);
1121 mt76_tx_status_unlock(mdev
, &list
);
1126 void mt7603_mac_add_txs(struct mt7603_dev
*dev
, void *data
)
1128 struct ieee80211_tx_info info
= {};
1129 struct ieee80211_sta
*sta
= NULL
;
1130 struct mt7603_sta
*msta
= NULL
;
1131 struct mt76_wcid
*wcid
;
1132 __le32
*txs_data
= data
;
1137 txs
= le32_to_cpu(txs_data
[4]);
1138 pid
= FIELD_GET(MT_TXS4_PID
, txs
);
1139 txs
= le32_to_cpu(txs_data
[3]);
1140 wcidx
= FIELD_GET(MT_TXS3_WCID
, txs
);
1142 if (pid
== MT_PACKET_ID_NO_ACK
)
1145 if (wcidx
>= ARRAY_SIZE(dev
->mt76
.wcid
))
1150 wcid
= rcu_dereference(dev
->mt76
.wcid
[wcidx
]);
1154 msta
= container_of(wcid
, struct mt7603_sta
, wcid
);
1155 sta
= wcid_to_sta(wcid
);
1157 if (mt7603_mac_add_txs_skb(dev
, msta
, pid
, txs_data
))
1160 if (wcidx
>= MT7603_WTBL_STA
|| !sta
)
1163 if (mt7603_fill_txs(dev
, msta
, &info
, txs_data
))
1164 ieee80211_tx_status_noskb(mt76_hw(dev
), sta
, &info
);
1170 void mt7603_tx_complete_skb(struct mt76_dev
*mdev
, struct mt76_queue
*q
,
1171 struct mt76_queue_entry
*e
, bool flush
)
1173 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1174 struct sk_buff
*skb
= e
->skb
;
1177 dev_kfree_skb_any(skb
);
1181 if (q
- dev
->mt76
.q_tx
< 4)
1182 dev
->tx_hang_check
= 0;
1184 mt76_tx_complete_skb(mdev
, skb
);
1188 wait_for_wpdma(struct mt7603_dev
*dev
)
1190 return mt76_poll(dev
, MT_WPDMA_GLO_CFG
,
1191 MT_WPDMA_GLO_CFG_TX_DMA_BUSY
|
1192 MT_WPDMA_GLO_CFG_RX_DMA_BUSY
,
1196 static void mt7603_pse_reset(struct mt7603_dev
*dev
)
1198 /* Clear previous reset result */
1199 if (!dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
])
1200 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE_S
);
1203 mt76_set(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE
);
1205 if (!mt76_poll_msec(dev
, MT_MCU_DEBUG_RESET
,
1206 MT_MCU_DEBUG_RESET_PSE_S
,
1207 MT_MCU_DEBUG_RESET_PSE_S
, 500)) {
1208 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
]++;
1209 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_PSE
);
1211 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] = 0;
1212 mt76_clear(dev
, MT_MCU_DEBUG_RESET
, MT_MCU_DEBUG_RESET_QUEUES
);
1215 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] >= 3)
1216 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] = 0;
1219 void mt7603_mac_dma_start(struct mt7603_dev
*dev
)
1221 mt7603_mac_start(dev
);
1223 wait_for_wpdma(dev
);
1224 usleep_range(50, 100);
1226 mt76_set(dev
, MT_WPDMA_GLO_CFG
,
1227 (MT_WPDMA_GLO_CFG_TX_DMA_EN
|
1228 MT_WPDMA_GLO_CFG_RX_DMA_EN
|
1229 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE
, 3) |
1230 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
));
1232 mt7603_irq_enable(dev
, MT_INT_RX_DONE_ALL
| MT_INT_TX_DONE_ALL
);
1235 void mt7603_mac_start(struct mt7603_dev
*dev
)
1237 mt76_clear(dev
, MT_ARB_SCR
,
1238 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1239 mt76_wr(dev
, MT_WF_ARB_TX_START_0
, ~0);
1240 mt76_set(dev
, MT_WF_ARB_RQCR
, MT_WF_ARB_RQCR_RX_START
);
1243 void mt7603_mac_stop(struct mt7603_dev
*dev
)
1245 mt76_set(dev
, MT_ARB_SCR
,
1246 MT_ARB_SCR_TX_DISABLE
| MT_ARB_SCR_RX_DISABLE
);
1247 mt76_wr(dev
, MT_WF_ARB_TX_START_0
, 0);
1248 mt76_clear(dev
, MT_WF_ARB_RQCR
, MT_WF_ARB_RQCR_RX_START
);
1251 void mt7603_pse_client_reset(struct mt7603_dev
*dev
)
1255 addr
= mt7603_reg_map(dev
, MT_CLIENT_BASE_PHYS_ADDR
+
1256 MT_CLIENT_RESET_TX
);
1258 /* Clear previous reset state */
1259 mt76_clear(dev
, addr
,
1260 MT_CLIENT_RESET_TX_R_E_1
|
1261 MT_CLIENT_RESET_TX_R_E_2
|
1262 MT_CLIENT_RESET_TX_R_E_1_S
|
1263 MT_CLIENT_RESET_TX_R_E_2_S
);
1265 /* Start PSE client TX abort */
1266 mt76_set(dev
, addr
, MT_CLIENT_RESET_TX_R_E_1
);
1267 mt76_poll_msec(dev
, addr
, MT_CLIENT_RESET_TX_R_E_1_S
,
1268 MT_CLIENT_RESET_TX_R_E_1_S
, 500);
1270 mt76_set(dev
, addr
, MT_CLIENT_RESET_TX_R_E_2
);
1271 mt76_set(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_SW_RESET
);
1273 /* Wait for PSE client to clear TX FIFO */
1274 mt76_poll_msec(dev
, addr
, MT_CLIENT_RESET_TX_R_E_2_S
,
1275 MT_CLIENT_RESET_TX_R_E_2_S
, 500);
1277 /* Clear PSE client TX abort state */
1278 mt76_clear(dev
, addr
,
1279 MT_CLIENT_RESET_TX_R_E_1
|
1280 MT_CLIENT_RESET_TX_R_E_2
);
1283 static void mt7603_dma_sched_reset(struct mt7603_dev
*dev
)
1285 if (!is_mt7628(dev
))
1288 mt76_set(dev
, MT_SCH_4
, MT_SCH_4_RESET
);
1289 mt76_clear(dev
, MT_SCH_4
, MT_SCH_4_RESET
);
1292 static void mt7603_mac_watchdog_reset(struct mt7603_dev
*dev
)
1294 int beacon_int
= dev
->beacon_int
;
1295 u32 mask
= dev
->mt76
.mmio
.irqmask
;
1298 ieee80211_stop_queues(dev
->mt76
.hw
);
1299 set_bit(MT76_RESET
, &dev
->mt76
.state
);
1301 /* lock/unlock all queues to ensure that no tx is pending */
1302 mt76_txq_schedule_all(&dev
->mt76
);
1304 tasklet_disable(&dev
->tx_tasklet
);
1305 tasklet_disable(&dev
->pre_tbtt_tasklet
);
1306 napi_disable(&dev
->mt76
.napi
[0]);
1307 napi_disable(&dev
->mt76
.napi
[1]);
1309 mutex_lock(&dev
->mt76
.mutex
);
1311 mt7603_beacon_set_timer(dev
, -1, 0);
1313 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
] ||
1314 dev
->cur_reset_cause
== RESET_CAUSE_RX_PSE_BUSY
||
1315 dev
->cur_reset_cause
== RESET_CAUSE_BEACON_STUCK
||
1316 dev
->cur_reset_cause
== RESET_CAUSE_TX_HANG
)
1317 mt7603_pse_reset(dev
);
1319 if (dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
])
1320 goto skip_dma_reset
;
1322 mt7603_mac_stop(dev
);
1324 mt76_clear(dev
, MT_WPDMA_GLO_CFG
,
1325 MT_WPDMA_GLO_CFG_RX_DMA_EN
| MT_WPDMA_GLO_CFG_TX_DMA_EN
|
1326 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE
);
1327 usleep_range(1000, 2000);
1329 mt7603_irq_disable(dev
, mask
);
1331 mt76_set(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_FORCE_TX_EOF
);
1333 mt7603_pse_client_reset(dev
);
1335 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_tx
); i
++)
1336 mt76_queue_tx_cleanup(dev
, i
, true);
1338 for (i
= 0; i
< ARRAY_SIZE(dev
->mt76
.q_rx
); i
++)
1339 mt76_queue_rx_reset(dev
, i
);
1341 mt7603_dma_sched_reset(dev
);
1343 mt7603_mac_dma_start(dev
);
1345 mt7603_irq_enable(dev
, mask
);
1348 clear_bit(MT76_RESET
, &dev
->mt76
.state
);
1349 mutex_unlock(&dev
->mt76
.mutex
);
1351 tasklet_enable(&dev
->tx_tasklet
);
1352 tasklet_schedule(&dev
->tx_tasklet
);
1354 tasklet_enable(&dev
->pre_tbtt_tasklet
);
1355 mt7603_beacon_set_timer(dev
, -1, beacon_int
);
1357 napi_enable(&dev
->mt76
.napi
[0]);
1358 napi_schedule(&dev
->mt76
.napi
[0]);
1360 napi_enable(&dev
->mt76
.napi
[1]);
1361 napi_schedule(&dev
->mt76
.napi
[1]);
1363 ieee80211_wake_queues(dev
->mt76
.hw
);
1364 mt76_txq_schedule_all(&dev
->mt76
);
1367 static u32
mt7603_dma_debug(struct mt7603_dev
*dev
, u8 index
)
1371 mt76_wr(dev
, MT_WPDMA_DEBUG
,
1372 FIELD_PREP(MT_WPDMA_DEBUG_IDX
, index
) |
1373 MT_WPDMA_DEBUG_SEL
);
1375 val
= mt76_rr(dev
, MT_WPDMA_DEBUG
);
1376 return FIELD_GET(MT_WPDMA_DEBUG_VALUE
, val
);
1379 static bool mt7603_rx_fifo_busy(struct mt7603_dev
*dev
)
1382 return mt7603_dma_debug(dev
, 9) & BIT(9);
1384 return mt7603_dma_debug(dev
, 2) & BIT(8);
1387 static bool mt7603_rx_dma_busy(struct mt7603_dev
*dev
)
1389 if (!(mt76_rr(dev
, MT_WPDMA_GLO_CFG
) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY
))
1392 return mt7603_rx_fifo_busy(dev
);
1395 static bool mt7603_tx_dma_busy(struct mt7603_dev
*dev
)
1399 if (!(mt76_rr(dev
, MT_WPDMA_GLO_CFG
) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY
))
1402 val
= mt7603_dma_debug(dev
, 9);
1403 return (val
& BIT(8)) && (val
& 0xf) != 0xf;
1406 static bool mt7603_tx_hang(struct mt7603_dev
*dev
)
1408 struct mt76_queue
*q
;
1409 u32 dma_idx
, prev_dma_idx
;
1412 for (i
= 0; i
< 4; i
++) {
1413 q
= &dev
->mt76
.q_tx
[i
];
1418 prev_dma_idx
= dev
->tx_dma_idx
[i
];
1419 dma_idx
= ioread32(&q
->regs
->dma_idx
);
1420 dev
->tx_dma_idx
[i
] = dma_idx
;
1422 if (dma_idx
== prev_dma_idx
&&
1423 dma_idx
!= ioread32(&q
->regs
->cpu_idx
))
1430 static bool mt7603_rx_pse_busy(struct mt7603_dev
*dev
)
1434 if (mt76_rr(dev
, MT_MCU_DEBUG_RESET
) & MT_MCU_DEBUG_RESET_QUEUES
)
1437 if (mt7603_rx_fifo_busy(dev
))
1440 addr
= mt7603_reg_map(dev
, MT_CLIENT_BASE_PHYS_ADDR
+ MT_CLIENT_STATUS
);
1441 mt76_wr(dev
, addr
, 3);
1442 val
= mt76_rr(dev
, addr
) >> 16;
1444 if (is_mt7628(dev
) && (val
& 0x4001) == 0x4001)
1447 return (val
& 0x8001) == 0x8001 || (val
& 0xe001) == 0xe001;
1451 mt7603_watchdog_check(struct mt7603_dev
*dev
, u8
*counter
,
1452 enum mt7603_reset_cause cause
,
1453 bool (*check
)(struct mt7603_dev
*dev
))
1455 if (dev
->reset_test
== cause
+ 1) {
1456 dev
->reset_test
= 0;
1461 if (!check(dev
) && *counter
< MT7603_WATCHDOG_TIMEOUT
) {
1469 if (*counter
< MT7603_WATCHDOG_TIMEOUT
)
1472 dev
->cur_reset_cause
= cause
;
1473 dev
->reset_cause
[cause
]++;
1477 void mt7603_update_channel(struct mt76_dev
*mdev
)
1479 struct mt7603_dev
*dev
= container_of(mdev
, struct mt7603_dev
, mt76
);
1480 struct mt76_channel_state
*state
;
1484 if (!test_bit(MT76_STATE_RUNNING
, &dev
->mt76
.state
))
1487 state
= mt76_channel_state(&dev
->mt76
, dev
->mt76
.chandef
.chan
);
1488 busy
= mt76_rr(dev
, MT_MIB_STAT_PSCCA
);
1490 spin_lock_bh(&dev
->mt76
.cc_lock
);
1491 cur_time
= ktime_get_boottime();
1492 state
->cc_busy
+= busy
;
1493 state
->cc_active
+= ktime_to_us(ktime_sub(cur_time
, dev
->survey_time
));
1494 dev
->survey_time
= cur_time
;
1495 spin_unlock_bh(&dev
->mt76
.cc_lock
);
1499 mt7603_edcca_set_strict(struct mt7603_dev
*dev
, bool val
)
1501 u32 rxtd_6
= 0xd7c80000;
1503 if (val
== dev
->ed_strict_mode
)
1506 dev
->ed_strict_mode
= val
;
1508 /* Ensure that ED/CCA does not trigger if disabled */
1509 if (!dev
->ed_monitor
)
1510 rxtd_6
|= FIELD_PREP(MT_RXTD_6_CCAED_TH
, 0x34);
1512 rxtd_6
|= FIELD_PREP(MT_RXTD_6_CCAED_TH
, 0x7d);
1514 if (dev
->ed_monitor
&& !dev
->ed_strict_mode
)
1515 rxtd_6
|= FIELD_PREP(MT_RXTD_6_ACI_TH
, 0x0f);
1517 rxtd_6
|= FIELD_PREP(MT_RXTD_6_ACI_TH
, 0x10);
1519 mt76_wr(dev
, MT_RXTD(6), rxtd_6
);
1521 mt76_rmw_field(dev
, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN
,
1522 dev
->ed_monitor
&& !dev
->ed_strict_mode
);
1526 mt7603_edcca_check(struct mt7603_dev
*dev
)
1528 u32 val
= mt76_rr(dev
, MT_AGC(41));
1534 if (!dev
->ed_monitor
)
1537 rssi0
= FIELD_GET(MT_AGC_41_RSSI_0
, val
);
1541 rssi1
= FIELD_GET(MT_AGC_41_RSSI_1
, val
);
1545 if (max(rssi0
, rssi1
) >= -40 &&
1546 dev
->ed_strong_signal
< MT7603_EDCCA_BLOCK_TH
)
1547 dev
->ed_strong_signal
++;
1548 else if (dev
->ed_strong_signal
> 0)
1549 dev
->ed_strong_signal
--;
1551 cur_time
= ktime_get_boottime();
1552 ed_busy
= mt76_rr(dev
, MT_MIB_STAT_ED
) & MT_MIB_STAT_ED_MASK
;
1554 active
= ktime_to_us(ktime_sub(cur_time
, dev
->ed_time
));
1555 dev
->ed_time
= cur_time
;
1560 if (100 * ed_busy
/ active
> 90) {
1561 if (dev
->ed_trigger
< 0)
1562 dev
->ed_trigger
= 0;
1565 if (dev
->ed_trigger
> 0)
1566 dev
->ed_trigger
= 0;
1570 if (dev
->ed_trigger
> MT7603_EDCCA_BLOCK_TH
||
1571 dev
->ed_strong_signal
< MT7603_EDCCA_BLOCK_TH
/ 2) {
1572 mt7603_edcca_set_strict(dev
, true);
1573 } else if (dev
->ed_trigger
< -MT7603_EDCCA_BLOCK_TH
) {
1574 mt7603_edcca_set_strict(dev
, false);
1577 if (dev
->ed_trigger
> MT7603_EDCCA_BLOCK_TH
)
1578 dev
->ed_trigger
= MT7603_EDCCA_BLOCK_TH
;
1579 else if (dev
->ed_trigger
< -MT7603_EDCCA_BLOCK_TH
)
1580 dev
->ed_trigger
= -MT7603_EDCCA_BLOCK_TH
;
1583 void mt7603_cca_stats_reset(struct mt7603_dev
*dev
)
1585 mt76_set(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET
);
1586 mt76_clear(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET
);
1587 mt76_set(dev
, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN
);
1591 mt7603_adjust_sensitivity(struct mt7603_dev
*dev
)
1593 u32 agc0
= dev
->agc0
, agc3
= dev
->agc3
;
1596 if (!dev
->sensitivity
|| dev
->sensitivity
< -100) {
1597 dev
->sensitivity
= 0;
1598 } else if (dev
->sensitivity
<= -84) {
1599 adj
= 7 + (dev
->sensitivity
+ 92) / 2;
1605 } else if (dev
->sensitivity
<= -72) {
1606 adj
= 7 + (dev
->sensitivity
+ 80) / 2;
1615 if (dev
->sensitivity
> -54)
1616 dev
->sensitivity
= -54;
1618 adj
= 7 + (dev
->sensitivity
+ 80) / 2;
1629 mt76_wr(dev
, MT_AGC(0), agc0
);
1630 mt76_wr(dev
, MT_AGC1(0), agc0
);
1632 mt76_wr(dev
, MT_AGC(3), agc3
);
1633 mt76_wr(dev
, MT_AGC1(3), agc3
);
1637 mt7603_false_cca_check(struct mt7603_dev
*dev
)
1639 int pd_cck
, pd_ofdm
, mdrdy_cck
, mdrdy_ofdm
;
1644 val
= mt76_rr(dev
, MT_PHYCTRL_STAT_PD
);
1645 pd_cck
= FIELD_GET(MT_PHYCTRL_STAT_PD_CCK
, val
);
1646 pd_ofdm
= FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM
, val
);
1648 val
= mt76_rr(dev
, MT_PHYCTRL_STAT_MDRDY
);
1649 mdrdy_cck
= FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK
, val
);
1650 mdrdy_ofdm
= FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM
, val
);
1652 dev
->false_cca_ofdm
= pd_ofdm
- mdrdy_ofdm
;
1653 dev
->false_cca_cck
= pd_cck
- mdrdy_cck
;
1655 mt7603_cca_stats_reset(dev
);
1657 min_signal
= mt76_get_min_avg_rssi(&dev
->mt76
);
1659 dev
->sensitivity
= 0;
1660 dev
->last_cca_adj
= jiffies
;
1666 false_cca
= dev
->false_cca_ofdm
+ dev
->false_cca_cck
;
1667 if (false_cca
> 600) {
1668 if (!dev
->sensitivity
)
1669 dev
->sensitivity
= -92;
1671 dev
->sensitivity
+= 2;
1672 dev
->last_cca_adj
= jiffies
;
1673 } else if (false_cca
< 100 ||
1674 time_after(jiffies
, dev
->last_cca_adj
+ 10 * HZ
)) {
1675 dev
->last_cca_adj
= jiffies
;
1676 if (!dev
->sensitivity
)
1679 dev
->sensitivity
-= 2;
1682 if (dev
->sensitivity
&& dev
->sensitivity
> min_signal
) {
1683 dev
->sensitivity
= min_signal
;
1684 dev
->last_cca_adj
= jiffies
;
1688 mt7603_adjust_sensitivity(dev
);
1691 void mt7603_mac_work(struct work_struct
*work
)
1693 struct mt7603_dev
*dev
= container_of(work
, struct mt7603_dev
,
1697 mt76_tx_status_check(&dev
->mt76
, NULL
, false);
1699 mutex_lock(&dev
->mt76
.mutex
);
1701 dev
->mac_work_count
++;
1702 mt7603_update_channel(&dev
->mt76
);
1703 mt7603_edcca_check(dev
);
1705 if (dev
->mac_work_count
== 10)
1706 mt7603_false_cca_check(dev
);
1708 if (mt7603_watchdog_check(dev
, &dev
->rx_pse_check
,
1709 RESET_CAUSE_RX_PSE_BUSY
,
1710 mt7603_rx_pse_busy
) ||
1711 mt7603_watchdog_check(dev
, &dev
->beacon_check
,
1712 RESET_CAUSE_BEACON_STUCK
,
1714 mt7603_watchdog_check(dev
, &dev
->tx_hang_check
,
1715 RESET_CAUSE_TX_HANG
,
1717 mt7603_watchdog_check(dev
, &dev
->tx_dma_check
,
1718 RESET_CAUSE_TX_BUSY
,
1719 mt7603_tx_dma_busy
) ||
1720 mt7603_watchdog_check(dev
, &dev
->rx_dma_check
,
1721 RESET_CAUSE_RX_BUSY
,
1722 mt7603_rx_dma_busy
) ||
1723 mt7603_watchdog_check(dev
, &dev
->mcu_hang
,
1724 RESET_CAUSE_MCU_HANG
,
1726 dev
->reset_cause
[RESET_CAUSE_RESET_FAILED
]) {
1727 dev
->beacon_check
= 0;
1728 dev
->tx_dma_check
= 0;
1729 dev
->tx_hang_check
= 0;
1730 dev
->rx_dma_check
= 0;
1731 dev
->rx_pse_check
= 0;
1733 dev
->rx_dma_idx
= ~0;
1734 memset(dev
->tx_dma_idx
, 0xff, sizeof(dev
->tx_dma_idx
));
1736 dev
->mac_work_count
= 0;
1739 if (dev
->mac_work_count
>= 10)
1740 dev
->mac_work_count
= 0;
1742 mutex_unlock(&dev
->mt76
.mutex
);
1745 mt7603_mac_watchdog_reset(dev
);
1747 ieee80211_queue_delayed_work(mt76_hw(dev
), &dev
->mac_work
,
1748 msecs_to_jiffies(MT7603_WATCHDOG_TIME
));