]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/net/wireless/mediatek/mt76/mt7603/mac.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / net / wireless / mediatek / mt76 / mt7603 / mac.c
1 /* SPDX-License-Identifier: ISC */
2
3 #include <linux/etherdevice.h>
4 #include <linux/timekeeping.h>
5 #include "mt7603.h"
6 #include "mac.h"
7
8 #define MT_PSE_PAGE_SIZE 128
9
10 static u32
11 mt7603_ac_queue_mask0(u32 mask)
12 {
13 u32 ret = 0;
14
15 ret |= GENMASK(3, 0) * !!(mask & BIT(0));
16 ret |= GENMASK(8, 5) * !!(mask & BIT(1));
17 ret |= GENMASK(13, 10) * !!(mask & BIT(2));
18 ret |= GENMASK(19, 16) * !!(mask & BIT(3));
19 return ret;
20 }
21
22 static void
23 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
24 {
25 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
26 }
27
28 static void
29 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
30 {
31 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
32 }
33
34 void mt7603_mac_set_timing(struct mt7603_dev *dev)
35 {
36 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
37 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
38 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
39 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
40 int offset = 3 * dev->coverage_class;
41 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
42 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
43 int sifs;
44 u32 val;
45
46 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
47 sifs = 16;
48 else
49 sifs = 10;
50
51 mt76_set(dev, MT_ARB_SCR,
52 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
53 udelay(1);
54
55 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
56 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
57 mt76_wr(dev, MT_IFS,
58 FIELD_PREP(MT_IFS_EIFS, 360) |
59 FIELD_PREP(MT_IFS_RIFS, 2) |
60 FIELD_PREP(MT_IFS_SIFS, sifs) |
61 FIELD_PREP(MT_IFS_SLOT, dev->slottime));
62
63 if (dev->slottime < 20)
64 val = MT7603_CFEND_RATE_DEFAULT;
65 else
66 val = MT7603_CFEND_RATE_11B;
67
68 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
69
70 mt76_clear(dev, MT_ARB_SCR,
71 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
72 }
73
74 static void
75 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
76 {
77 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
78 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
79
80 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
81 }
82
83 static u32
84 mt7603_wtbl1_addr(int idx)
85 {
86 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
87 }
88
89 static u32
90 mt7603_wtbl2_addr(int idx)
91 {
92 /* Mapped to WTBL2 */
93 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
94 }
95
96 static u32
97 mt7603_wtbl3_addr(int idx)
98 {
99 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
100
101 return base + idx * MT_WTBL3_SIZE;
102 }
103
104 static u32
105 mt7603_wtbl4_addr(int idx)
106 {
107 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
108
109 return base + idx * MT_WTBL4_SIZE;
110 }
111
112 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
113 const u8 *mac_addr)
114 {
115 const void *_mac = mac_addr;
116 u32 addr = mt7603_wtbl1_addr(idx);
117 u32 w0 = 0, w1 = 0;
118 int i;
119
120 if (_mac) {
121 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
122 get_unaligned_le16(_mac + 4));
123 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
124 get_unaligned_le32(_mac));
125 }
126
127 if (vif < 0)
128 vif = 0;
129 else
130 w0 |= MT_WTBL1_W0_RX_CHECK_A1;
131 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
132
133 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
134
135 mt76_set(dev, addr + 0 * 4, w0);
136 mt76_set(dev, addr + 1 * 4, w1);
137 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
138
139 mt76_stop_tx_ac(dev, GENMASK(3, 0));
140 addr = mt7603_wtbl2_addr(idx);
141 for (i = 0; i < MT_WTBL2_SIZE; i += 4)
142 mt76_wr(dev, addr + i, 0);
143 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
144 mt76_start_tx_ac(dev, GENMASK(3, 0));
145
146 addr = mt7603_wtbl3_addr(idx);
147 for (i = 0; i < MT_WTBL3_SIZE; i += 4)
148 mt76_wr(dev, addr + i, 0);
149
150 addr = mt7603_wtbl4_addr(idx);
151 for (i = 0; i < MT_WTBL4_SIZE; i += 4)
152 mt76_wr(dev, addr + i, 0);
153 }
154
155 static void
156 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
157 {
158 u32 addr = mt7603_wtbl1_addr(idx);
159 u32 val = mt76_rr(dev, addr + 3 * 4);
160
161 val &= ~MT_WTBL1_W3_SKIP_TX;
162 val |= enabled * MT_WTBL1_W3_SKIP_TX;
163
164 mt76_wr(dev, addr + 3 * 4, val);
165 }
166
167 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
168 {
169 int i, port, queue;
170
171 if (abort) {
172 port = 3; /* PSE */
173 queue = 8; /* free queue */
174 } else {
175 port = 0; /* HIF */
176 queue = 1; /* MCU queue */
177 }
178
179 mt7603_wtbl_set_skip_tx(dev, idx, true);
180
181 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
182 FIELD_PREP(MT_TX_ABORT_WCID, idx));
183
184 for (i = 0; i < 4; i++) {
185 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
186 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
187 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
188 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
189 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
190
191 WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
192 0, 5000));
193 }
194
195 mt76_wr(dev, MT_TX_ABORT, 0);
196
197 mt7603_wtbl_set_skip_tx(dev, idx, false);
198 }
199
200 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
201 bool enabled)
202 {
203 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
204
205 if (sta->smps == enabled)
206 return;
207
208 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
209 sta->smps = enabled;
210 }
211
212 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
213 bool enabled)
214 {
215 int idx = sta->wcid.idx;
216 u32 addr;
217
218 spin_lock_bh(&dev->ps_lock);
219
220 if (sta->ps == enabled)
221 goto out;
222
223 mt76_wr(dev, MT_PSE_RTA,
224 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
225 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
226 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
227 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
228 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
229
230 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
231
232 if (enabled)
233 mt7603_filter_tx(dev, idx, false);
234
235 addr = mt7603_wtbl1_addr(idx);
236 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
237 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
238 enabled * MT_WTBL1_W3_POWER_SAVE);
239 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
240 sta->ps = enabled;
241
242 out:
243 spin_unlock_bh(&dev->ps_lock);
244 }
245
246 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
247 {
248 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
249 int wtbl2_frame = idx / wtbl2_frame_size;
250 int wtbl2_entry = idx % wtbl2_frame_size;
251
252 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
253 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
254 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
255 int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
256
257 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
258 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
259 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
260 int wtbl4_entry = idx % wtbl4_frame_size;
261
262 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
263 int i;
264
265 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
266
267 mt76_wr(dev, addr + 0 * 4,
268 MT_WTBL1_W0_RX_CHECK_A1 |
269 MT_WTBL1_W0_RX_CHECK_A2 |
270 MT_WTBL1_W0_RX_VALID);
271 mt76_wr(dev, addr + 1 * 4, 0);
272 mt76_wr(dev, addr + 2 * 4, 0);
273
274 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
275
276 mt76_wr(dev, addr + 3 * 4,
277 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
278 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
279 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
280 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
281 mt76_wr(dev, addr + 4 * 4,
282 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
283 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
284 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
285
286 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
287
288 addr = mt7603_wtbl2_addr(idx);
289
290 /* Clear BA information */
291 mt76_wr(dev, addr + (15 * 4), 0);
292
293 mt76_stop_tx_ac(dev, GENMASK(3, 0));
294 for (i = 2; i <= 4; i++)
295 mt76_wr(dev, addr + (i * 4), 0);
296 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
297 mt76_start_tx_ac(dev, GENMASK(3, 0));
298
299 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
300 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
301 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
302 }
303
304 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
305 {
306 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
307 int idx = msta->wcid.idx;
308 u32 addr;
309 u32 val;
310
311 addr = mt7603_wtbl1_addr(idx);
312
313 val = mt76_rr(dev, addr + 2 * 4);
314 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
315 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
316 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
317 MT_WTBL1_W2_TXS_BAF_REPORT;
318
319 if (sta->ht_cap.cap)
320 val |= MT_WTBL1_W2_HT;
321 if (sta->vht_cap.cap)
322 val |= MT_WTBL1_W2_VHT;
323
324 mt76_wr(dev, addr + 2 * 4, val);
325
326 addr = mt7603_wtbl2_addr(idx);
327 val = mt76_rr(dev, addr + 9 * 4);
328 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
329 MT_WTBL2_W9_SHORT_GI_80);
330 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
331 val |= MT_WTBL2_W9_SHORT_GI_20;
332 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
333 val |= MT_WTBL2_W9_SHORT_GI_40;
334 mt76_wr(dev, addr + 9 * 4, val);
335 }
336
337 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
338 {
339 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
340 mt76_wr(dev, MT_BA_CONTROL_1,
341 (get_unaligned_le16(addr + 4) |
342 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
343 MT_BA_CONTROL_1_RESET));
344 }
345
346 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
347 int ba_size)
348 {
349 u32 addr = mt7603_wtbl2_addr(wcid);
350 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
351 (MT_WTBL2_W15_BA_WIN_SIZE <<
352 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
353 u32 tid_val;
354 int i;
355
356 if (ba_size < 0) {
357 /* disable */
358 mt76_clear(dev, addr + (15 * 4), tid_mask);
359 return;
360 }
361 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
362
363 mt7603_mac_stop(dev);
364 switch (tid) {
365 case 0:
366 mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
367 break;
368 case 1:
369 mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
370 break;
371 case 2:
372 mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
373 ssn);
374 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
375 ssn >> 8);
376 break;
377 case 3:
378 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
379 break;
380 case 4:
381 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
382 break;
383 case 5:
384 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
385 ssn);
386 mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
387 ssn >> 4);
388 break;
389 case 6:
390 mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
391 break;
392 case 7:
393 mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
394 break;
395 }
396 mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
397 mt7603_mac_start(dev);
398
399 for (i = 7; i > 0; i--) {
400 if (ba_size >= MT_AGG_SIZE_LIMIT(i))
401 break;
402 }
403
404 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
405 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
406
407 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
408 }
409
410 static int
411 mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband,
412 int idx, bool cck)
413 {
414 int offset = 0;
415 int len = sband->n_bitrates;
416 int i;
417
418 if (cck) {
419 if (sband == &dev->mt76.sband_5g.sband)
420 return 0;
421
422 idx &= ~BIT(2); /* short preamble */
423 } else if (sband == &dev->mt76.sband_2g.sband) {
424 offset = 4;
425 }
426
427 for (i = offset; i < len; i++) {
428 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
429 return i;
430 }
431
432 return 0;
433 }
434
435 static struct mt76_wcid *
436 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
437 {
438 struct mt7603_sta *sta;
439 struct mt76_wcid *wcid;
440
441 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
442 return NULL;
443
444 wcid = rcu_dereference(dev->mt76.wcid[idx]);
445 if (unicast || !wcid)
446 return wcid;
447
448 if (!wcid->sta)
449 return NULL;
450
451 sta = container_of(wcid, struct mt7603_sta, wcid);
452 if (!sta->vif)
453 return NULL;
454
455 return &sta->vif->sta.wcid;
456 }
457
458 static void
459 mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
460 {
461 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
462 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
463 u8 *pn = status->iv;
464 u8 *hdr;
465
466 __skb_push(skb, 8);
467 memmove(skb->data, skb->data + 8, hdr_len);
468 hdr = skb->data + hdr_len;
469
470 hdr[0] = pn[5];
471 hdr[1] = pn[4];
472 hdr[2] = 0;
473 hdr[3] = 0x20 | (key_id << 6);
474 hdr[4] = pn[3];
475 hdr[5] = pn[2];
476 hdr[6] = pn[1];
477 hdr[7] = pn[0];
478
479 status->flag &= ~RX_FLAG_IV_STRIPPED;
480 }
481
482 int
483 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
484 {
485 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
486 struct ieee80211_supported_band *sband;
487 struct ieee80211_hdr *hdr;
488 __le32 *rxd = (__le32 *)skb->data;
489 u32 rxd0 = le32_to_cpu(rxd[0]);
490 u32 rxd1 = le32_to_cpu(rxd[1]);
491 u32 rxd2 = le32_to_cpu(rxd[2]);
492 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
493 bool insert_ccmp_hdr = false;
494 bool remove_pad;
495 int idx;
496 int i;
497
498 memset(status, 0, sizeof(*status));
499
500 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
501 sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
502 i >>= 1;
503
504 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
505 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
506
507 status->band = sband->band;
508 if (i < sband->n_channels)
509 status->freq = sband->channels[i].center_freq;
510
511 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
512 status->flag |= RX_FLAG_FAILED_FCS_CRC;
513
514 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
515 status->flag |= RX_FLAG_MMIC_ERROR;
516
517 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
518 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
519 status->flag |= RX_FLAG_DECRYPTED;
520 status->flag |= RX_FLAG_IV_STRIPPED;
521 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
522 }
523
524 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
525
526 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
527 return -EINVAL;
528
529 if (!sband->channels)
530 return -EINVAL;
531
532 rxd += 4;
533 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
534 rxd += 4;
535 if ((u8 *)rxd - skb->data >= skb->len)
536 return -EINVAL;
537 }
538 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
539 u8 *data = (u8 *)rxd;
540
541 if (status->flag & RX_FLAG_DECRYPTED) {
542 status->iv[0] = data[5];
543 status->iv[1] = data[4];
544 status->iv[2] = data[3];
545 status->iv[3] = data[2];
546 status->iv[4] = data[1];
547 status->iv[5] = data[0];
548
549 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
550 }
551
552 rxd += 4;
553 if ((u8 *)rxd - skb->data >= skb->len)
554 return -EINVAL;
555 }
556 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
557 rxd += 2;
558 if ((u8 *)rxd - skb->data >= skb->len)
559 return -EINVAL;
560 }
561 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
562 u32 rxdg0 = le32_to_cpu(rxd[0]);
563 u32 rxdg3 = le32_to_cpu(rxd[3]);
564 bool cck = false;
565
566 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
567 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
568 case MT_PHY_TYPE_CCK:
569 cck = true;
570 /* fall through */
571 case MT_PHY_TYPE_OFDM:
572 i = mt7603_get_rate(dev, sband, i, cck);
573 break;
574 case MT_PHY_TYPE_HT_GF:
575 case MT_PHY_TYPE_HT:
576 status->encoding = RX_ENC_HT;
577 if (i > 15)
578 return -EINVAL;
579 break;
580 default:
581 return -EINVAL;
582 }
583
584 if (rxdg0 & MT_RXV1_HT_SHORT_GI)
585 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
586 if (rxdg0 & MT_RXV1_HT_AD_CODE)
587 status->enc_flags |= RX_ENC_FLAG_LDPC;
588
589 status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
590 FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
591
592 status->rate_idx = i;
593
594 status->chains = dev->mt76.antenna_mask;
595 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
596 dev->rssi_offset[0];
597 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
598 dev->rssi_offset[1];
599
600 status->signal = status->chain_signal[0];
601 if (status->chains & BIT(1))
602 status->signal = max(status->signal,
603 status->chain_signal[1]);
604
605 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
606 status->bw = RATE_INFO_BW_40;
607
608 rxd += 6;
609 if ((u8 *)rxd - skb->data >= skb->len)
610 return -EINVAL;
611 } else {
612 return -EINVAL;
613 }
614
615 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
616
617 if (insert_ccmp_hdr) {
618 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
619
620 mt7603_insert_ccmp_hdr(skb, key_id);
621 }
622
623 hdr = (struct ieee80211_hdr *)skb->data;
624 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
625 return 0;
626
627 status->aggr = unicast &&
628 !ieee80211_is_qos_nullfunc(hdr->frame_control);
629 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
630 status->seqno = hdr->seq_ctrl >> 4;
631
632 return 0;
633 }
634
635 static u16
636 mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
637 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
638 {
639 u8 phy, nss, rate_idx;
640 u16 rateval;
641
642 *bw = 0;
643 if (rate->flags & IEEE80211_TX_RC_MCS) {
644 rate_idx = rate->idx;
645 nss = 1 + (rate->idx >> 3);
646 phy = MT_PHY_TYPE_HT;
647 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
648 phy = MT_PHY_TYPE_HT_GF;
649 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
650 *bw = 1;
651 } else {
652 const struct ieee80211_rate *r;
653 int band = dev->mt76.chandef.chan->band;
654 u16 val;
655
656 nss = 1;
657 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
658 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
659 val = r->hw_value_short;
660 else
661 val = r->hw_value;
662
663 phy = val >> 8;
664 rate_idx = val & 0xff;
665 }
666
667 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
668 FIELD_PREP(MT_TX_RATE_MODE, phy));
669
670 if (stbc && nss == 1)
671 rateval |= MT_TX_RATE_STBC;
672
673 return rateval;
674 }
675
676 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
677 struct ieee80211_tx_rate *probe_rate,
678 struct ieee80211_tx_rate *rates)
679 {
680 int wcid = sta->wcid.idx;
681 u32 addr = mt7603_wtbl2_addr(wcid);
682 bool stbc = false;
683 int n_rates = sta->n_rates;
684 u8 bw, bw_prev, bw_idx = 0;
685 u16 val[4];
686 u16 probe_val;
687 u32 w9 = mt76_rr(dev, addr + 9 * 4);
688 int i;
689
690 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
691 return;
692
693 for (i = n_rates; i < 4; i++)
694 rates[i] = rates[n_rates - 1];
695
696 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
697 MT_WTBL2_W9_SHORT_GI_80;
698
699 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
700 bw_prev = bw;
701
702 if (probe_rate) {
703 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
704 if (bw)
705 bw_idx = 1;
706 else
707 bw_prev = 0;
708 } else {
709 probe_val = val[0];
710 }
711
712 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
713 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
714
715 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
716 if (bw_prev) {
717 bw_idx = 3;
718 bw_prev = bw;
719 }
720
721 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
722 if (bw_prev) {
723 bw_idx = 5;
724 bw_prev = bw;
725 }
726
727 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
728 if (bw_prev)
729 bw_idx = 7;
730
731 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
732 bw_idx ? bw_idx - 1 : 7);
733
734 mt76_wr(dev, MT_WTBL_RIUCR0, w9);
735
736 mt76_wr(dev, MT_WTBL_RIUCR1,
737 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
738 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
739 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
740
741 mt76_wr(dev, MT_WTBL_RIUCR2,
742 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
743 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
744 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
745 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
746
747 mt76_wr(dev, MT_WTBL_RIUCR3,
748 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
749 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
750 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
751
752 mt76_wr(dev, MT_WTBL_UPDATE,
753 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
754 MT_WTBL_UPDATE_RATE_UPDATE |
755 MT_WTBL_UPDATE_TX_COUNT_CLEAR);
756
757 if (!sta->wcid.tx_rate_set)
758 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
759
760 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
761 sta->wcid.tx_rate_set = true;
762 }
763
764 static enum mt7603_cipher_type
765 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
766 {
767 memset(key_data, 0, 32);
768 if (!key)
769 return MT_CIPHER_NONE;
770
771 if (key->keylen > 32)
772 return MT_CIPHER_NONE;
773
774 memcpy(key_data, key->key, key->keylen);
775
776 switch (key->cipher) {
777 case WLAN_CIPHER_SUITE_WEP40:
778 return MT_CIPHER_WEP40;
779 case WLAN_CIPHER_SUITE_WEP104:
780 return MT_CIPHER_WEP104;
781 case WLAN_CIPHER_SUITE_TKIP:
782 /* Rx/Tx MIC keys are swapped */
783 memcpy(key_data + 16, key->key + 24, 8);
784 memcpy(key_data + 24, key->key + 16, 8);
785 return MT_CIPHER_TKIP;
786 case WLAN_CIPHER_SUITE_CCMP:
787 return MT_CIPHER_AES_CCMP;
788 default:
789 return MT_CIPHER_NONE;
790 }
791 }
792
793 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
794 struct ieee80211_key_conf *key)
795 {
796 enum mt7603_cipher_type cipher;
797 u32 addr = mt7603_wtbl3_addr(wcid);
798 u8 key_data[32];
799 int key_len = sizeof(key_data);
800
801 cipher = mt7603_mac_get_key_info(key, key_data);
802 if (cipher == MT_CIPHER_NONE && key)
803 return -EOPNOTSUPP;
804
805 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
806 addr += key->keyidx * 16;
807 key_len = 16;
808 }
809
810 mt76_wr_copy(dev, addr, key_data, key_len);
811
812 addr = mt7603_wtbl1_addr(wcid);
813 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
814 if (key)
815 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
816 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
817
818 return 0;
819 }
820
821 static int
822 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
823 struct sk_buff *skb, struct mt76_queue *q,
824 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
825 int pid, struct ieee80211_key_conf *key)
826 {
827 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
828 struct ieee80211_tx_rate *rate = &info->control.rates[0];
829 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
830 struct ieee80211_vif *vif = info->control.vif;
831 struct mt7603_vif *mvif;
832 int wlan_idx;
833 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
834 int tx_count = 8;
835 u8 frame_type, frame_subtype;
836 u16 fc = le16_to_cpu(hdr->frame_control);
837 u8 vif_idx = 0;
838 u32 val;
839 u8 bw;
840
841 if (vif) {
842 mvif = (struct mt7603_vif *)vif->drv_priv;
843 vif_idx = mvif->idx;
844 if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
845 vif_idx += 0x10;
846 }
847
848 if (sta) {
849 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
850
851 tx_count = msta->rate_count;
852 }
853
854 if (wcid)
855 wlan_idx = wcid->idx;
856 else
857 wlan_idx = MT7603_WTBL_RESERVED;
858
859 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
860 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
861
862 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
863 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
864 txwi[0] = cpu_to_le32(val);
865
866 val = MT_TXD1_LONG_FORMAT |
867 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
868 FIELD_PREP(MT_TXD1_TID,
869 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
870 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
871 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
872 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
873 FIELD_PREP(MT_TXD1_PROTECTED, !!key);
874 txwi[1] = cpu_to_le32(val);
875
876 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
877 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
878
879 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
880 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
881 FIELD_PREP(MT_TXD2_MULTICAST,
882 is_multicast_ether_addr(hdr->addr1));
883 txwi[2] = cpu_to_le32(val);
884
885 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
886 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
887
888 txwi[4] = 0;
889
890 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
891 FIELD_PREP(MT_TXD5_PID, pid);
892 txwi[5] = cpu_to_le32(val);
893
894 txwi[6] = 0;
895
896 if (rate->idx >= 0 && rate->count &&
897 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
898 bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
899 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
900
901 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
902
903 val = MT_TXD6_FIXED_BW |
904 FIELD_PREP(MT_TXD6_BW, bw) |
905 FIELD_PREP(MT_TXD6_TX_RATE, rateval);
906 txwi[6] |= cpu_to_le32(val);
907
908 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
909 txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
910
911 if (!(rate->flags & IEEE80211_TX_RC_MCS))
912 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
913
914 tx_count = rate->count;
915 }
916
917 /* use maximum tx count for beacons and buffered multicast */
918 if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
919 tx_count = 0x1f;
920
921 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
922 FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
923 txwi[3] = cpu_to_le32(val);
924
925 if (key) {
926 u64 pn = atomic64_inc_return(&key->tx_pn);
927
928 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
929 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
930 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
931 }
932
933 txwi[7] = 0;
934
935 return 0;
936 }
937
938 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
939 struct sk_buff *skb, struct mt76_queue *q,
940 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
941 u32 *tx_info)
942 {
943 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
944 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
945 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
946 struct ieee80211_key_conf *key = info->control.hw_key;
947 int pid;
948
949 if (!wcid)
950 wcid = &dev->global_sta.wcid;
951
952 if (sta) {
953 msta = (struct mt7603_sta *)sta->drv_priv;
954
955 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
956 IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
957 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
958 mt7603_wtbl_set_ps(dev, msta, false);
959 }
960
961 pid = mt76_tx_status_skb_add(mdev, wcid, skb);
962
963 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
964 spin_lock_bh(&dev->mt76.lock);
965 msta->rate_probe = true;
966 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
967 msta->rates);
968 spin_unlock_bh(&dev->mt76.lock);
969 }
970
971 mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
972
973 return 0;
974 }
975
976 static bool
977 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
978 struct ieee80211_tx_info *info, __le32 *txs_data)
979 {
980 struct ieee80211_supported_band *sband;
981 int final_idx = 0;
982 u32 final_rate;
983 u32 final_rate_flags;
984 bool final_mpdu;
985 bool ack_timeout;
986 bool fixed_rate;
987 bool probe;
988 bool ampdu;
989 bool cck = false;
990 int count;
991 u32 txs;
992 u8 pid;
993 int idx;
994 int i;
995
996 fixed_rate = info->status.rates[0].count;
997 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
998
999 txs = le32_to_cpu(txs_data[4]);
1000 final_mpdu = txs & MT_TXS4_ACKED_MPDU;
1001 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
1002 pid = FIELD_GET(MT_TXS4_PID, txs);
1003 count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
1004
1005 txs = le32_to_cpu(txs_data[0]);
1006 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1007 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1008
1009 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1010 return false;
1011
1012 if (txs & MT_TXS0_QUEUE_TIMEOUT)
1013 return false;
1014
1015 if (!ack_timeout)
1016 info->flags |= IEEE80211_TX_STAT_ACK;
1017
1018 info->status.ampdu_len = 1;
1019 info->status.ampdu_ack_len = !!(info->flags &
1020 IEEE80211_TX_STAT_ACK);
1021
1022 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1023 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1024
1025 if (fixed_rate && !probe) {
1026 info->status.rates[0].count = count;
1027 goto out;
1028 }
1029
1030 for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
1031 int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY);
1032
1033 if (!i && probe) {
1034 cur_count = 1;
1035 } else {
1036 info->status.rates[i] = sta->rates[idx];
1037 idx++;
1038 }
1039
1040 if (i && info->status.rates[i].idx < 0) {
1041 info->status.rates[i - 1].count += count;
1042 break;
1043 }
1044
1045 if (!count) {
1046 info->status.rates[i].idx = -1;
1047 break;
1048 }
1049
1050 info->status.rates[i].count = cur_count;
1051 final_idx = i;
1052 count -= cur_count;
1053 }
1054
1055 out:
1056 final_rate_flags = info->status.rates[final_idx].flags;
1057
1058 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1059 case MT_PHY_TYPE_CCK:
1060 cck = true;
1061 /* fall through */
1062 case MT_PHY_TYPE_OFDM:
1063 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
1064 sband = &dev->mt76.sband_5g.sband;
1065 else
1066 sband = &dev->mt76.sband_2g.sband;
1067 final_rate &= GENMASK(5, 0);
1068 final_rate = mt7603_get_rate(dev, sband, final_rate, cck);
1069 final_rate_flags = 0;
1070 break;
1071 case MT_PHY_TYPE_HT_GF:
1072 case MT_PHY_TYPE_HT:
1073 final_rate_flags |= IEEE80211_TX_RC_MCS;
1074 final_rate &= GENMASK(5, 0);
1075 if (final_rate > 15)
1076 return false;
1077 break;
1078 default:
1079 return false;
1080 }
1081
1082 info->status.rates[final_idx].idx = final_rate;
1083 info->status.rates[final_idx].flags = final_rate_flags;
1084
1085 return true;
1086 }
1087
1088 static bool
1089 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
1090 __le32 *txs_data)
1091 {
1092 struct mt76_dev *mdev = &dev->mt76;
1093 struct sk_buff_head list;
1094 struct sk_buff *skb;
1095
1096 if (pid < MT_PACKET_ID_FIRST)
1097 return false;
1098
1099 mt76_tx_status_lock(mdev, &list);
1100 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1101 if (skb) {
1102 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1103
1104 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
1105 spin_lock_bh(&dev->mt76.lock);
1106 if (sta->rate_probe) {
1107 mt7603_wtbl_set_rates(dev, sta, NULL,
1108 sta->rates);
1109 sta->rate_probe = false;
1110 }
1111 spin_unlock_bh(&dev->mt76.lock);
1112 }
1113
1114 if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
1115 ieee80211_tx_info_clear_status(info);
1116 info->status.rates[0].idx = -1;
1117 }
1118
1119 mt76_tx_status_skb_done(mdev, skb, &list);
1120 }
1121 mt76_tx_status_unlock(mdev, &list);
1122
1123 return !!skb;
1124 }
1125
1126 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
1127 {
1128 struct ieee80211_tx_info info = {};
1129 struct ieee80211_sta *sta = NULL;
1130 struct mt7603_sta *msta = NULL;
1131 struct mt76_wcid *wcid;
1132 __le32 *txs_data = data;
1133 u32 txs;
1134 u8 wcidx;
1135 u8 pid;
1136
1137 txs = le32_to_cpu(txs_data[4]);
1138 pid = FIELD_GET(MT_TXS4_PID, txs);
1139 txs = le32_to_cpu(txs_data[3]);
1140 wcidx = FIELD_GET(MT_TXS3_WCID, txs);
1141
1142 if (pid == MT_PACKET_ID_NO_ACK)
1143 return;
1144
1145 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
1146 return;
1147
1148 rcu_read_lock();
1149
1150 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1151 if (!wcid)
1152 goto out;
1153
1154 msta = container_of(wcid, struct mt7603_sta, wcid);
1155 sta = wcid_to_sta(wcid);
1156
1157 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
1158 goto out;
1159
1160 if (wcidx >= MT7603_WTBL_STA || !sta)
1161 goto out;
1162
1163 if (mt7603_fill_txs(dev, msta, &info, txs_data))
1164 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
1165
1166 out:
1167 rcu_read_unlock();
1168 }
1169
1170 void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
1171 struct mt76_queue_entry *e, bool flush)
1172 {
1173 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1174 struct sk_buff *skb = e->skb;
1175
1176 if (!e->txwi) {
1177 dev_kfree_skb_any(skb);
1178 return;
1179 }
1180
1181 if (q - dev->mt76.q_tx < 4)
1182 dev->tx_hang_check = 0;
1183
1184 mt76_tx_complete_skb(mdev, skb);
1185 }
1186
1187 static bool
1188 wait_for_wpdma(struct mt7603_dev *dev)
1189 {
1190 return mt76_poll(dev, MT_WPDMA_GLO_CFG,
1191 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
1192 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
1193 0, 1000);
1194 }
1195
1196 static void mt7603_pse_reset(struct mt7603_dev *dev)
1197 {
1198 /* Clear previous reset result */
1199 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1200 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
1201
1202 /* Reset PSE */
1203 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1204
1205 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
1206 MT_MCU_DEBUG_RESET_PSE_S,
1207 MT_MCU_DEBUG_RESET_PSE_S, 500)) {
1208 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
1209 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1210 } else {
1211 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1212 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
1213 }
1214
1215 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
1216 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1217 }
1218
1219 void mt7603_mac_dma_start(struct mt7603_dev *dev)
1220 {
1221 mt7603_mac_start(dev);
1222
1223 wait_for_wpdma(dev);
1224 usleep_range(50, 100);
1225
1226 mt76_set(dev, MT_WPDMA_GLO_CFG,
1227 (MT_WPDMA_GLO_CFG_TX_DMA_EN |
1228 MT_WPDMA_GLO_CFG_RX_DMA_EN |
1229 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
1230 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
1231
1232 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
1233 }
1234
1235 void mt7603_mac_start(struct mt7603_dev *dev)
1236 {
1237 mt76_clear(dev, MT_ARB_SCR,
1238 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1239 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
1240 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1241 }
1242
1243 void mt7603_mac_stop(struct mt7603_dev *dev)
1244 {
1245 mt76_set(dev, MT_ARB_SCR,
1246 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1247 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
1248 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1249 }
1250
1251 void mt7603_pse_client_reset(struct mt7603_dev *dev)
1252 {
1253 u32 addr;
1254
1255 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
1256 MT_CLIENT_RESET_TX);
1257
1258 /* Clear previous reset state */
1259 mt76_clear(dev, addr,
1260 MT_CLIENT_RESET_TX_R_E_1 |
1261 MT_CLIENT_RESET_TX_R_E_2 |
1262 MT_CLIENT_RESET_TX_R_E_1_S |
1263 MT_CLIENT_RESET_TX_R_E_2_S);
1264
1265 /* Start PSE client TX abort */
1266 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
1267 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
1268 MT_CLIENT_RESET_TX_R_E_1_S, 500);
1269
1270 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
1271 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
1272
1273 /* Wait for PSE client to clear TX FIFO */
1274 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
1275 MT_CLIENT_RESET_TX_R_E_2_S, 500);
1276
1277 /* Clear PSE client TX abort state */
1278 mt76_clear(dev, addr,
1279 MT_CLIENT_RESET_TX_R_E_1 |
1280 MT_CLIENT_RESET_TX_R_E_2);
1281 }
1282
1283 static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
1284 {
1285 if (!is_mt7628(dev))
1286 return;
1287
1288 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
1289 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
1290 }
1291
1292 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
1293 {
1294 int beacon_int = dev->beacon_int;
1295 u32 mask = dev->mt76.mmio.irqmask;
1296 int i;
1297
1298 ieee80211_stop_queues(dev->mt76.hw);
1299 set_bit(MT76_RESET, &dev->mt76.state);
1300
1301 /* lock/unlock all queues to ensure that no tx is pending */
1302 mt76_txq_schedule_all(&dev->mt76);
1303
1304 tasklet_disable(&dev->tx_tasklet);
1305 tasklet_disable(&dev->pre_tbtt_tasklet);
1306 napi_disable(&dev->mt76.napi[0]);
1307 napi_disable(&dev->mt76.napi[1]);
1308
1309 mutex_lock(&dev->mt76.mutex);
1310
1311 mt7603_beacon_set_timer(dev, -1, 0);
1312
1313 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
1314 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
1315 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
1316 dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
1317 mt7603_pse_reset(dev);
1318
1319 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1320 goto skip_dma_reset;
1321
1322 mt7603_mac_stop(dev);
1323
1324 mt76_clear(dev, MT_WPDMA_GLO_CFG,
1325 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
1326 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
1327 usleep_range(1000, 2000);
1328
1329 mt7603_irq_disable(dev, mask);
1330
1331 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
1332
1333 mt7603_pse_client_reset(dev);
1334
1335 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
1336 mt76_queue_tx_cleanup(dev, i, true);
1337
1338 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
1339 mt76_queue_rx_reset(dev, i);
1340
1341 mt7603_dma_sched_reset(dev);
1342
1343 mt7603_mac_dma_start(dev);
1344
1345 mt7603_irq_enable(dev, mask);
1346
1347 skip_dma_reset:
1348 clear_bit(MT76_RESET, &dev->mt76.state);
1349 mutex_unlock(&dev->mt76.mutex);
1350
1351 tasklet_enable(&dev->tx_tasklet);
1352 tasklet_schedule(&dev->tx_tasklet);
1353
1354 tasklet_enable(&dev->pre_tbtt_tasklet);
1355 mt7603_beacon_set_timer(dev, -1, beacon_int);
1356
1357 napi_enable(&dev->mt76.napi[0]);
1358 napi_schedule(&dev->mt76.napi[0]);
1359
1360 napi_enable(&dev->mt76.napi[1]);
1361 napi_schedule(&dev->mt76.napi[1]);
1362
1363 ieee80211_wake_queues(dev->mt76.hw);
1364 mt76_txq_schedule_all(&dev->mt76);
1365 }
1366
1367 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
1368 {
1369 u32 val;
1370
1371 mt76_wr(dev, MT_WPDMA_DEBUG,
1372 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
1373 MT_WPDMA_DEBUG_SEL);
1374
1375 val = mt76_rr(dev, MT_WPDMA_DEBUG);
1376 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
1377 }
1378
1379 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
1380 {
1381 if (is_mt7628(dev))
1382 return mt7603_dma_debug(dev, 9) & BIT(9);
1383
1384 return mt7603_dma_debug(dev, 2) & BIT(8);
1385 }
1386
1387 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
1388 {
1389 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
1390 return false;
1391
1392 return mt7603_rx_fifo_busy(dev);
1393 }
1394
1395 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
1396 {
1397 u32 val;
1398
1399 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
1400 return false;
1401
1402 val = mt7603_dma_debug(dev, 9);
1403 return (val & BIT(8)) && (val & 0xf) != 0xf;
1404 }
1405
1406 static bool mt7603_tx_hang(struct mt7603_dev *dev)
1407 {
1408 struct mt76_queue *q;
1409 u32 dma_idx, prev_dma_idx;
1410 int i;
1411
1412 for (i = 0; i < 4; i++) {
1413 q = &dev->mt76.q_tx[i];
1414
1415 if (!q->queued)
1416 continue;
1417
1418 prev_dma_idx = dev->tx_dma_idx[i];
1419 dma_idx = ioread32(&q->regs->dma_idx);
1420 dev->tx_dma_idx[i] = dma_idx;
1421
1422 if (dma_idx == prev_dma_idx &&
1423 dma_idx != ioread32(&q->regs->cpu_idx))
1424 break;
1425 }
1426
1427 return i < 4;
1428 }
1429
1430 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
1431 {
1432 u32 addr, val;
1433
1434 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
1435 return true;
1436
1437 if (mt7603_rx_fifo_busy(dev))
1438 return false;
1439
1440 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
1441 mt76_wr(dev, addr, 3);
1442 val = mt76_rr(dev, addr) >> 16;
1443
1444 if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
1445 return true;
1446
1447 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
1448 }
1449
1450 static bool
1451 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
1452 enum mt7603_reset_cause cause,
1453 bool (*check)(struct mt7603_dev *dev))
1454 {
1455 if (dev->reset_test == cause + 1) {
1456 dev->reset_test = 0;
1457 goto trigger;
1458 }
1459
1460 if (check) {
1461 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
1462 *counter = 0;
1463 return false;
1464 }
1465
1466 (*counter)++;
1467 }
1468
1469 if (*counter < MT7603_WATCHDOG_TIMEOUT)
1470 return false;
1471 trigger:
1472 dev->cur_reset_cause = cause;
1473 dev->reset_cause[cause]++;
1474 return true;
1475 }
1476
1477 void mt7603_update_channel(struct mt76_dev *mdev)
1478 {
1479 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1480 struct mt76_channel_state *state;
1481 ktime_t cur_time;
1482 u32 busy;
1483
1484 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
1485 return;
1486
1487 state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
1488 busy = mt76_rr(dev, MT_MIB_STAT_PSCCA);
1489
1490 spin_lock_bh(&dev->mt76.cc_lock);
1491 cur_time = ktime_get_boottime();
1492 state->cc_busy += busy;
1493 state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time));
1494 dev->survey_time = cur_time;
1495 spin_unlock_bh(&dev->mt76.cc_lock);
1496 }
1497
1498 void
1499 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
1500 {
1501 u32 rxtd_6 = 0xd7c80000;
1502
1503 if (val == dev->ed_strict_mode)
1504 return;
1505
1506 dev->ed_strict_mode = val;
1507
1508 /* Ensure that ED/CCA does not trigger if disabled */
1509 if (!dev->ed_monitor)
1510 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
1511 else
1512 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
1513
1514 if (dev->ed_monitor && !dev->ed_strict_mode)
1515 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
1516 else
1517 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
1518
1519 mt76_wr(dev, MT_RXTD(6), rxtd_6);
1520
1521 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
1522 dev->ed_monitor && !dev->ed_strict_mode);
1523 }
1524
1525 static void
1526 mt7603_edcca_check(struct mt7603_dev *dev)
1527 {
1528 u32 val = mt76_rr(dev, MT_AGC(41));
1529 ktime_t cur_time;
1530 int rssi0, rssi1;
1531 u32 active;
1532 u32 ed_busy;
1533
1534 if (!dev->ed_monitor)
1535 return;
1536
1537 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
1538 if (rssi0 > 128)
1539 rssi0 -= 256;
1540
1541 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
1542 if (rssi1 > 128)
1543 rssi1 -= 256;
1544
1545 if (max(rssi0, rssi1) >= -40 &&
1546 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
1547 dev->ed_strong_signal++;
1548 else if (dev->ed_strong_signal > 0)
1549 dev->ed_strong_signal--;
1550
1551 cur_time = ktime_get_boottime();
1552 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
1553
1554 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1555 dev->ed_time = cur_time;
1556
1557 if (!active)
1558 return;
1559
1560 if (100 * ed_busy / active > 90) {
1561 if (dev->ed_trigger < 0)
1562 dev->ed_trigger = 0;
1563 dev->ed_trigger++;
1564 } else {
1565 if (dev->ed_trigger > 0)
1566 dev->ed_trigger = 0;
1567 dev->ed_trigger--;
1568 }
1569
1570 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
1571 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
1572 mt7603_edcca_set_strict(dev, true);
1573 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
1574 mt7603_edcca_set_strict(dev, false);
1575 }
1576
1577 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
1578 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
1579 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
1580 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
1581 }
1582
1583 void mt7603_cca_stats_reset(struct mt7603_dev *dev)
1584 {
1585 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1586 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1587 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
1588 }
1589
1590 static void
1591 mt7603_adjust_sensitivity(struct mt7603_dev *dev)
1592 {
1593 u32 agc0 = dev->agc0, agc3 = dev->agc3;
1594 u32 adj;
1595
1596 if (!dev->sensitivity || dev->sensitivity < -100) {
1597 dev->sensitivity = 0;
1598 } else if (dev->sensitivity <= -84) {
1599 adj = 7 + (dev->sensitivity + 92) / 2;
1600
1601 agc0 = 0x56f0076f;
1602 agc0 |= adj << 12;
1603 agc0 |= adj << 16;
1604 agc3 = 0x81d0d5e3;
1605 } else if (dev->sensitivity <= -72) {
1606 adj = 7 + (dev->sensitivity + 80) / 2;
1607
1608 agc0 = 0x6af0006f;
1609 agc0 |= adj << 8;
1610 agc0 |= adj << 12;
1611 agc0 |= adj << 16;
1612
1613 agc3 = 0x8181d5e3;
1614 } else {
1615 if (dev->sensitivity > -54)
1616 dev->sensitivity = -54;
1617
1618 adj = 7 + (dev->sensitivity + 80) / 2;
1619
1620 agc0 = 0x7ff0000f;
1621 agc0 |= adj << 4;
1622 agc0 |= adj << 8;
1623 agc0 |= adj << 12;
1624 agc0 |= adj << 16;
1625
1626 agc3 = 0x818181e3;
1627 }
1628
1629 mt76_wr(dev, MT_AGC(0), agc0);
1630 mt76_wr(dev, MT_AGC1(0), agc0);
1631
1632 mt76_wr(dev, MT_AGC(3), agc3);
1633 mt76_wr(dev, MT_AGC1(3), agc3);
1634 }
1635
1636 static void
1637 mt7603_false_cca_check(struct mt7603_dev *dev)
1638 {
1639 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
1640 int false_cca;
1641 int min_signal;
1642 u32 val;
1643
1644 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
1645 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
1646 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
1647
1648 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
1649 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
1650 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
1651
1652 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1653 dev->false_cca_cck = pd_cck - mdrdy_cck;
1654
1655 mt7603_cca_stats_reset(dev);
1656
1657 min_signal = mt76_get_min_avg_rssi(&dev->mt76);
1658 if (!min_signal) {
1659 dev->sensitivity = 0;
1660 dev->last_cca_adj = jiffies;
1661 goto out;
1662 }
1663
1664 min_signal -= 15;
1665
1666 false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
1667 if (false_cca > 600) {
1668 if (!dev->sensitivity)
1669 dev->sensitivity = -92;
1670 else
1671 dev->sensitivity += 2;
1672 dev->last_cca_adj = jiffies;
1673 } else if (false_cca < 100 ||
1674 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
1675 dev->last_cca_adj = jiffies;
1676 if (!dev->sensitivity)
1677 goto out;
1678
1679 dev->sensitivity -= 2;
1680 }
1681
1682 if (dev->sensitivity && dev->sensitivity > min_signal) {
1683 dev->sensitivity = min_signal;
1684 dev->last_cca_adj = jiffies;
1685 }
1686
1687 out:
1688 mt7603_adjust_sensitivity(dev);
1689 }
1690
1691 void mt7603_mac_work(struct work_struct *work)
1692 {
1693 struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
1694 mac_work.work);
1695 bool reset = false;
1696
1697 mt76_tx_status_check(&dev->mt76, NULL, false);
1698
1699 mutex_lock(&dev->mt76.mutex);
1700
1701 dev->mac_work_count++;
1702 mt7603_update_channel(&dev->mt76);
1703 mt7603_edcca_check(dev);
1704
1705 if (dev->mac_work_count == 10)
1706 mt7603_false_cca_check(dev);
1707
1708 if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
1709 RESET_CAUSE_RX_PSE_BUSY,
1710 mt7603_rx_pse_busy) ||
1711 mt7603_watchdog_check(dev, &dev->beacon_check,
1712 RESET_CAUSE_BEACON_STUCK,
1713 NULL) ||
1714 mt7603_watchdog_check(dev, &dev->tx_hang_check,
1715 RESET_CAUSE_TX_HANG,
1716 mt7603_tx_hang) ||
1717 mt7603_watchdog_check(dev, &dev->tx_dma_check,
1718 RESET_CAUSE_TX_BUSY,
1719 mt7603_tx_dma_busy) ||
1720 mt7603_watchdog_check(dev, &dev->rx_dma_check,
1721 RESET_CAUSE_RX_BUSY,
1722 mt7603_rx_dma_busy) ||
1723 mt7603_watchdog_check(dev, &dev->mcu_hang,
1724 RESET_CAUSE_MCU_HANG,
1725 NULL) ||
1726 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
1727 dev->beacon_check = 0;
1728 dev->tx_dma_check = 0;
1729 dev->tx_hang_check = 0;
1730 dev->rx_dma_check = 0;
1731 dev->rx_pse_check = 0;
1732 dev->mcu_hang = 0;
1733 dev->rx_dma_idx = ~0;
1734 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
1735 reset = true;
1736 dev->mac_work_count = 0;
1737 }
1738
1739 if (dev->mac_work_count >= 10)
1740 dev->mac_work_count = 0;
1741
1742 mutex_unlock(&dev->mt76.mutex);
1743
1744 if (reset)
1745 mt7603_mac_watchdog_reset(dev);
1746
1747 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
1748 msecs_to_jiffies(MT7603_WATCHDOG_TIME));
1749 }