]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/wireless/ath/ath9k/mac.c
ath9k: clarify what hw code is and remove ath9k.h from a few files
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / ath / ath9k / mac.c
CommitLineData
f1dc5600 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f1dc5600
S
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
394cf0a1 17#include "ath9k.h"
f1dc5600 18
cbe61d8a 19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
f1dc5600
S
20 struct ath9k_tx_queue_info *qi)
21{
c46917bb
LR
22 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask);
f1dc5600
S
27
28 REG_WRITE(ah, AR_IMR_S0,
2660b81a
S
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
f1dc5600 31 REG_WRITE(ah, AR_IMR_S1,
2660b81a
S
32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
f1dc5600 34 REG_RMW_FIELD(ah, AR_IMR_S2,
2660b81a 35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
f1dc5600
S
36}
37
cbe61d8a 38u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
f1dc5600
S
39{
40 return REG_READ(ah, AR_QTXDP(q));
41}
42
54e4cec6 43void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
f1dc5600
S
44{
45 REG_WRITE(ah, AR_QTXDP(q), txdp);
f1dc5600
S
46}
47
54e4cec6 48void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
f1dc5600 49{
c46917bb
LR
50 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
51 "Enable TXE on queue: %u\n", q);
f1dc5600 52 REG_WRITE(ah, AR_Q_TXE, 1 << q);
f1dc5600
S
53}
54
cbe61d8a 55u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
f1dc5600
S
56{
57 u32 npend;
58
59 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
60 if (npend == 0) {
61
62 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
63 npend = 1;
64 }
65
66 return npend;
67}
68
cbe61d8a 69bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
f1dc5600 70{
f1dc5600
S
71 u32 txcfg, curLevel, newLevel;
72 enum ath9k_int omask;
73
2660b81a 74 if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD)
f1dc5600
S
75 return false;
76
2660b81a 77 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
f1dc5600
S
78
79 txcfg = REG_READ(ah, AR_TXCFG);
80 curLevel = MS(txcfg, AR_FTRIG);
81 newLevel = curLevel;
82 if (bIncTrigLevel) {
83 if (curLevel < MAX_TX_FIFO_THRESHOLD)
84 newLevel++;
85 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
86 newLevel--;
87 if (newLevel != curLevel)
88 REG_WRITE(ah, AR_TXCFG,
89 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
90
91 ath9k_hw_set_interrupts(ah, omask);
92
2660b81a 93 ah->tx_trig_level = newLevel;
f1dc5600
S
94
95 return newLevel != curLevel;
96}
97
cbe61d8a 98bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
f1dc5600 99{
94ff91d4
S
100#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
101#define ATH9K_TIME_QUANTUM 100 /* usec */
c46917bb 102 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 103 struct ath9k_hw_capabilities *pCap = &ah->caps;
94ff91d4 104 struct ath9k_tx_queue_info *qi;
f1dc5600 105 u32 tsfLow, j, wait;
94ff91d4
S
106 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
107
108 if (q >= pCap->total_queues) {
c46917bb
LR
109 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
110 "invalid queue: %u\n", q);
94ff91d4
S
111 return false;
112 }
113
2660b81a 114 qi = &ah->txq[q];
94ff91d4 115 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
c46917bb
LR
116 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
117 "inactive queue: %u\n", q);
94ff91d4
S
118 return false;
119 }
f1dc5600
S
120
121 REG_WRITE(ah, AR_Q_TXD, 1 << q);
122
94ff91d4 123 for (wait = wait_time; wait != 0; wait--) {
f1dc5600
S
124 if (ath9k_hw_numtxpending(ah, q) == 0)
125 break;
94ff91d4 126 udelay(ATH9K_TIME_QUANTUM);
f1dc5600
S
127 }
128
129 if (ath9k_hw_numtxpending(ah, q)) {
c46917bb
LR
130 ath_print(common, ATH_DBG_QUEUE,
131 "%s: Num of pending TX Frames %d on Q %d\n",
132 __func__, ath9k_hw_numtxpending(ah, q), q);
f1dc5600
S
133
134 for (j = 0; j < 2; j++) {
135 tsfLow = REG_READ(ah, AR_TSF_L32);
136 REG_WRITE(ah, AR_QUIET2,
137 SM(10, AR_QUIET2_QUIET_DUR));
138 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
139 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
140 REG_SET_BIT(ah, AR_TIMER_MODE,
141 AR_QUIET_TIMER_EN);
142
143 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
144 break;
145
c46917bb
LR
146 ath_print(common, ATH_DBG_QUEUE,
147 "TSF has moved while trying to set "
148 "quiet time TSF: 0x%08x\n", tsfLow);
f1dc5600
S
149 }
150
151 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
152
153 udelay(200);
154 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
155
94ff91d4 156 wait = wait_time;
f1dc5600
S
157 while (ath9k_hw_numtxpending(ah, q)) {
158 if ((--wait) == 0) {
c46917bb
LR
159 ath_print(common, ATH_DBG_QUEUE,
160 "Failed to stop TX DMA in 100 "
161 "msec after killing last frame\n");
f1dc5600
S
162 break;
163 }
94ff91d4 164 udelay(ATH9K_TIME_QUANTUM);
f1dc5600
S
165 }
166
167 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
168 }
169
170 REG_WRITE(ah, AR_Q_TXD, 0);
f1dc5600 171 return wait != 0;
94ff91d4
S
172
173#undef ATH9K_TX_STOP_DMA_TIMEOUT
174#undef ATH9K_TIME_QUANTUM
f1dc5600
S
175}
176
54e4cec6 177void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
178 u32 segLen, bool firstSeg,
179 bool lastSeg, const struct ath_desc *ds0)
180{
181 struct ar5416_desc *ads = AR5416DESC(ds);
182
183 if (firstSeg) {
184 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
185 } else if (lastSeg) {
186 ads->ds_ctl0 = 0;
187 ads->ds_ctl1 = segLen;
188 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
189 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
190 } else {
191 ads->ds_ctl0 = 0;
192 ads->ds_ctl1 = segLen | AR_TxMore;
193 ads->ds_ctl2 = 0;
194 ads->ds_ctl3 = 0;
195 }
196 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
197 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
198 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
199 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
200 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
f1dc5600
S
201}
202
cbe61d8a 203void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
f1dc5600
S
204{
205 struct ar5416_desc *ads = AR5416DESC(ds);
206
207 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
208 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
209 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
210 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
211 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
212}
213
cbe61d8a 214int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
f1dc5600
S
215{
216 struct ar5416_desc *ads = AR5416DESC(ds);
217
218 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
219 return -EINPROGRESS;
220
221 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
222 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
223 ds->ds_txstat.ts_status = 0;
224 ds->ds_txstat.ts_flags = 0;
225
226 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
227 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
228 if (ads->ds_txstatus1 & AR_Filtered)
229 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
daa9deb3 230 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
f1dc5600 231 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
daa9deb3
S
232 ath9k_hw_updatetxtriglevel(ah, true);
233 }
f1dc5600
S
234 if (ads->ds_txstatus9 & AR_TxOpExceeded)
235 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
236 if (ads->ds_txstatus1 & AR_TxTimerExpired)
237 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
238
239 if (ads->ds_txstatus1 & AR_DescCfgErr)
240 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
241 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
242 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
243 ath9k_hw_updatetxtriglevel(ah, true);
244 }
245 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
246 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
247 ath9k_hw_updatetxtriglevel(ah, true);
248 }
249 if (ads->ds_txstatus0 & AR_TxBaStatus) {
250 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
251 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
252 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
253 }
254
255 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
256 switch (ds->ds_txstat.ts_rateindex) {
257 case 0:
258 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
259 break;
260 case 1:
261 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
262 break;
263 case 2:
264 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
265 break;
266 case 3:
267 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
268 break;
269 }
270
271 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
272 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
273 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
274 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
275 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
276 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
277 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
278 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
279 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
280 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
281 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
282 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
283 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
83befbde 284 ds->ds_txstat.ts_antenna = 0;
f1dc5600
S
285
286 return 0;
287}
288
cbe61d8a 289void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
290 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
291 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
292{
293 struct ar5416_desc *ads = AR5416DESC(ds);
f1dc5600 294
2660b81a 295 txPower += ah->txpower_indexoffset;
f1dc5600
S
296 if (txPower > 63)
297 txPower = 63;
298
299 ads->ds_ctl0 = (pktLen & AR_FrameLen)
300 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
301 | SM(txPower, AR_XmitPower)
302 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
303 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
304 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
305 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
306
307 ads->ds_ctl1 =
308 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
309 | SM(type, AR_FrameType)
310 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
311 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
312 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
313
314 ads->ds_ctl6 = SM(keyType, AR_EncrType);
315
316 if (AR_SREV_9285(ah)) {
317 ads->ds_ctl8 = 0;
318 ads->ds_ctl9 = 0;
319 ads->ds_ctl10 = 0;
320 ads->ds_ctl11 = 0;
321 }
322}
323
cbe61d8a 324void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
325 struct ath_desc *lastds,
326 u32 durUpdateEn, u32 rtsctsRate,
327 u32 rtsctsDuration,
328 struct ath9k_11n_rate_series series[],
329 u32 nseries, u32 flags)
330{
331 struct ar5416_desc *ads = AR5416DESC(ds);
332 struct ar5416_desc *last_ads = AR5416DESC(lastds);
333 u32 ds_ctl0;
334
f1dc5600
S
335 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
336 ds_ctl0 = ads->ds_ctl0;
337
338 if (flags & ATH9K_TXDESC_RTSENA) {
339 ds_ctl0 &= ~AR_CTSEnable;
340 ds_ctl0 |= AR_RTSEnable;
341 } else {
342 ds_ctl0 &= ~AR_RTSEnable;
343 ds_ctl0 |= AR_CTSEnable;
344 }
345
346 ads->ds_ctl0 = ds_ctl0;
347 } else {
348 ads->ds_ctl0 =
349 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
350 }
351
352 ads->ds_ctl2 = set11nTries(series, 0)
353 | set11nTries(series, 1)
354 | set11nTries(series, 2)
355 | set11nTries(series, 3)
356 | (durUpdateEn ? AR_DurUpdateEna : 0)
357 | SM(0, AR_BurstDur);
358
359 ads->ds_ctl3 = set11nRate(series, 0)
360 | set11nRate(series, 1)
361 | set11nRate(series, 2)
362 | set11nRate(series, 3);
363
364 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
365 | set11nPktDurRTSCTS(series, 1);
366
367 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
368 | set11nPktDurRTSCTS(series, 3);
369
370 ads->ds_ctl7 = set11nRateFlags(series, 0)
371 | set11nRateFlags(series, 1)
372 | set11nRateFlags(series, 2)
373 | set11nRateFlags(series, 3)
374 | SM(rtsctsRate, AR_RTSCTSRate);
375 last_ads->ds_ctl2 = ads->ds_ctl2;
376 last_ads->ds_ctl3 = ads->ds_ctl3;
377}
378
cbe61d8a 379void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
380 u32 aggrLen)
381{
382 struct ar5416_desc *ads = AR5416DESC(ds);
383
384 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
385 ads->ds_ctl6 &= ~AR_AggrLen;
386 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
387}
388
cbe61d8a 389void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
390 u32 numDelims)
391{
392 struct ar5416_desc *ads = AR5416DESC(ds);
393 unsigned int ctl6;
394
395 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
396
397 ctl6 = ads->ds_ctl6;
398 ctl6 &= ~AR_PadDelim;
399 ctl6 |= SM(numDelims, AR_PadDelim);
400 ads->ds_ctl6 = ctl6;
401}
402
cbe61d8a 403void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
f1dc5600
S
404{
405 struct ar5416_desc *ads = AR5416DESC(ds);
406
407 ads->ds_ctl1 |= AR_IsAggr;
408 ads->ds_ctl1 &= ~AR_MoreAggr;
409 ads->ds_ctl6 &= ~AR_PadDelim;
410}
411
cbe61d8a 412void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
f1dc5600
S
413{
414 struct ar5416_desc *ads = AR5416DESC(ds);
415
416 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
417}
418
cbe61d8a 419void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
420 u32 burstDuration)
421{
422 struct ar5416_desc *ads = AR5416DESC(ds);
423
424 ads->ds_ctl2 &= ~AR_BurstDur;
425 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
426}
427
cbe61d8a 428void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
429 u32 vmf)
430{
431 struct ar5416_desc *ads = AR5416DESC(ds);
432
433 if (vmf)
434 ads->ds_ctl0 |= AR_VirtMoreFrag;
435 else
436 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
437}
438
cbe61d8a 439void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
f1dc5600 440{
2660b81a
S
441 *txqs &= ah->intr_txqs;
442 ah->intr_txqs &= ~(*txqs);
f1dc5600
S
443}
444
cbe61d8a 445bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
f1dc5600
S
446 const struct ath9k_tx_queue_info *qinfo)
447{
448 u32 cw;
c46917bb 449 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 450 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
451 struct ath9k_tx_queue_info *qi;
452
453 if (q >= pCap->total_queues) {
c46917bb
LR
454 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
455 "invalid queue: %u\n", q);
f1dc5600
S
456 return false;
457 }
458
2660b81a 459 qi = &ah->txq[q];
f1dc5600 460 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
c46917bb
LR
461 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
462 "inactive queue: %u\n", q);
f1dc5600
S
463 return false;
464 }
465
c46917bb 466 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
f1dc5600
S
467
468 qi->tqi_ver = qinfo->tqi_ver;
469 qi->tqi_subtype = qinfo->tqi_subtype;
470 qi->tqi_qflags = qinfo->tqi_qflags;
471 qi->tqi_priority = qinfo->tqi_priority;
472 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
473 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
474 else
475 qi->tqi_aifs = INIT_AIFS;
476 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
477 cw = min(qinfo->tqi_cwmin, 1024U);
478 qi->tqi_cwmin = 1;
479 while (qi->tqi_cwmin < cw)
480 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
481 } else
482 qi->tqi_cwmin = qinfo->tqi_cwmin;
483 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
484 cw = min(qinfo->tqi_cwmax, 1024U);
485 qi->tqi_cwmax = 1;
486 while (qi->tqi_cwmax < cw)
487 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
488 } else
489 qi->tqi_cwmax = INIT_CWMAX;
490
491 if (qinfo->tqi_shretry != 0)
492 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
493 else
494 qi->tqi_shretry = INIT_SH_RETRY;
495 if (qinfo->tqi_lgretry != 0)
496 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
497 else
498 qi->tqi_lgretry = INIT_LG_RETRY;
499 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
500 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
501 qi->tqi_burstTime = qinfo->tqi_burstTime;
502 qi->tqi_readyTime = qinfo->tqi_readyTime;
503
504 switch (qinfo->tqi_subtype) {
505 case ATH9K_WME_UPSD:
506 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
507 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
508 break;
509 default:
510 break;
511 }
512
513 return true;
514}
515
cbe61d8a 516bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
f1dc5600
S
517 struct ath9k_tx_queue_info *qinfo)
518{
c46917bb 519 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 520 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
521 struct ath9k_tx_queue_info *qi;
522
523 if (q >= pCap->total_queues) {
c46917bb
LR
524 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
525 "invalid queue: %u\n", q);
f1dc5600
S
526 return false;
527 }
528
2660b81a 529 qi = &ah->txq[q];
f1dc5600 530 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
c46917bb
LR
531 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
532 "inactive queue: %u\n", q);
f1dc5600
S
533 return false;
534 }
535
536 qinfo->tqi_qflags = qi->tqi_qflags;
537 qinfo->tqi_ver = qi->tqi_ver;
538 qinfo->tqi_subtype = qi->tqi_subtype;
539 qinfo->tqi_qflags = qi->tqi_qflags;
540 qinfo->tqi_priority = qi->tqi_priority;
541 qinfo->tqi_aifs = qi->tqi_aifs;
542 qinfo->tqi_cwmin = qi->tqi_cwmin;
543 qinfo->tqi_cwmax = qi->tqi_cwmax;
544 qinfo->tqi_shretry = qi->tqi_shretry;
545 qinfo->tqi_lgretry = qi->tqi_lgretry;
546 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
547 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
548 qinfo->tqi_burstTime = qi->tqi_burstTime;
549 qinfo->tqi_readyTime = qi->tqi_readyTime;
550
551 return true;
552}
553
cbe61d8a 554int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
f1dc5600
S
555 const struct ath9k_tx_queue_info *qinfo)
556{
c46917bb 557 struct ath_common *common = ath9k_hw_common(ah);
f1dc5600 558 struct ath9k_tx_queue_info *qi;
2660b81a 559 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
560 int q;
561
562 switch (type) {
563 case ATH9K_TX_QUEUE_BEACON:
564 q = pCap->total_queues - 1;
565 break;
566 case ATH9K_TX_QUEUE_CAB:
567 q = pCap->total_queues - 2;
568 break;
569 case ATH9K_TX_QUEUE_PSPOLL:
570 q = 1;
571 break;
572 case ATH9K_TX_QUEUE_UAPSD:
573 q = pCap->total_queues - 3;
574 break;
575 case ATH9K_TX_QUEUE_DATA:
576 for (q = 0; q < pCap->total_queues; q++)
2660b81a 577 if (ah->txq[q].tqi_type ==
f1dc5600
S
578 ATH9K_TX_QUEUE_INACTIVE)
579 break;
580 if (q == pCap->total_queues) {
c46917bb
LR
581 ath_print(common, ATH_DBG_FATAL,
582 "No available TX queue\n");
f1dc5600
S
583 return -1;
584 }
585 break;
586 default:
c46917bb
LR
587 ath_print(common, ATH_DBG_FATAL,
588 "Invalid TX queue type: %u\n", type);
f1dc5600
S
589 return -1;
590 }
591
c46917bb 592 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
f1dc5600 593
2660b81a 594 qi = &ah->txq[q];
f1dc5600 595 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
c46917bb
LR
596 ath_print(common, ATH_DBG_FATAL,
597 "TX queue: %u already active\n", q);
f1dc5600
S
598 return -1;
599 }
600 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
601 qi->tqi_type = type;
602 if (qinfo == NULL) {
603 qi->tqi_qflags =
604 TXQ_FLAG_TXOKINT_ENABLE
605 | TXQ_FLAG_TXERRINT_ENABLE
606 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
607 qi->tqi_aifs = INIT_AIFS;
608 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
609 qi->tqi_cwmax = INIT_CWMAX;
610 qi->tqi_shretry = INIT_SH_RETRY;
611 qi->tqi_lgretry = INIT_LG_RETRY;
612 qi->tqi_physCompBuf = 0;
613 } else {
614 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
615 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
616 }
617
618 return q;
619}
620
cbe61d8a 621bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
f1dc5600 622{
2660b81a 623 struct ath9k_hw_capabilities *pCap = &ah->caps;
c46917bb 624 struct ath_common *common = ath9k_hw_common(ah);
f1dc5600
S
625 struct ath9k_tx_queue_info *qi;
626
627 if (q >= pCap->total_queues) {
c46917bb
LR
628 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
629 "invalid queue: %u\n", q);
f1dc5600
S
630 return false;
631 }
2660b81a 632 qi = &ah->txq[q];
f1dc5600 633 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
c46917bb
LR
634 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
635 "inactive queue: %u\n", q);
f1dc5600
S
636 return false;
637 }
638
c46917bb 639 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
f1dc5600
S
640
641 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
2660b81a
S
642 ah->txok_interrupt_mask &= ~(1 << q);
643 ah->txerr_interrupt_mask &= ~(1 << q);
644 ah->txdesc_interrupt_mask &= ~(1 << q);
645 ah->txeol_interrupt_mask &= ~(1 << q);
646 ah->txurn_interrupt_mask &= ~(1 << q);
f1dc5600
S
647 ath9k_hw_set_txq_interrupts(ah, qi);
648
649 return true;
650}
651
cbe61d8a 652bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
f1dc5600 653{
2660b81a 654 struct ath9k_hw_capabilities *pCap = &ah->caps;
c46917bb 655 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 656 struct ath9k_channel *chan = ah->curchan;
f1dc5600
S
657 struct ath9k_tx_queue_info *qi;
658 u32 cwMin, chanCwMin, value;
659
660 if (q >= pCap->total_queues) {
c46917bb
LR
661 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
662 "invalid queue: %u\n", q);
f1dc5600
S
663 return false;
664 }
665
2660b81a 666 qi = &ah->txq[q];
f1dc5600 667 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
c46917bb
LR
668 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
669 "inactive queue: %u\n", q);
f1dc5600
S
670 return true;
671 }
672
c46917bb 673 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
f1dc5600
S
674
675 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
676 if (chan && IS_CHAN_B(chan))
677 chanCwMin = INIT_CWMIN_11B;
678 else
679 chanCwMin = INIT_CWMIN;
680
681 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
682 } else
683 cwMin = qi->tqi_cwmin;
684
685 REG_WRITE(ah, AR_DLCL_IFS(q),
686 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
687 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
688 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
689
690 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
691 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
692 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
693 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
694
695 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
696 REG_WRITE(ah, AR_DMISC(q),
697 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
698
699 if (qi->tqi_cbrPeriod) {
700 REG_WRITE(ah, AR_QCBRCFG(q),
701 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
702 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
703 REG_WRITE(ah, AR_QMISC(q),
704 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
705 (qi->tqi_cbrOverflowLimit ?
706 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
707 }
708 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
709 REG_WRITE(ah, AR_QRDYTIMECFG(q),
710 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
711 AR_Q_RDYTIMECFG_EN);
712 }
713
714 REG_WRITE(ah, AR_DCHNTIME(q),
715 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
716 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
717
718 if (qi->tqi_burstTime
719 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
720 REG_WRITE(ah, AR_QMISC(q),
721 REG_READ(ah, AR_QMISC(q)) |
722 AR_Q_MISC_RDYTIME_EXP_POLICY);
723
724 }
725
726 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
727 REG_WRITE(ah, AR_DMISC(q),
728 REG_READ(ah, AR_DMISC(q)) |
729 AR_D_MISC_POST_FR_BKOFF_DIS);
730 }
731 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
732 REG_WRITE(ah, AR_DMISC(q),
733 REG_READ(ah, AR_DMISC(q)) |
734 AR_D_MISC_FRAG_BKOFF_EN);
735 }
736 switch (qi->tqi_type) {
737 case ATH9K_TX_QUEUE_BEACON:
738 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
739 | AR_Q_MISC_FSP_DBA_GATED
740 | AR_Q_MISC_BEACON_USE
741 | AR_Q_MISC_CBR_INCR_DIS1);
742
743 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
744 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
745 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
746 | AR_D_MISC_BEACON_USE
747 | AR_D_MISC_POST_FR_BKOFF_DIS);
748 break;
749 case ATH9K_TX_QUEUE_CAB:
750 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
751 | AR_Q_MISC_FSP_DBA_GATED
752 | AR_Q_MISC_CBR_INCR_DIS1
753 | AR_Q_MISC_CBR_INCR_DIS0);
754 value = (qi->tqi_readyTime -
2660b81a
S
755 (ah->config.sw_beacon_response_time -
756 ah->config.dma_beacon_response_time) -
757 ah->config.additional_swba_backoff) * 1024;
f1dc5600
S
758 REG_WRITE(ah, AR_QRDYTIMECFG(q),
759 value | AR_Q_RDYTIMECFG_EN);
760 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
761 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
762 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
763 break;
764 case ATH9K_TX_QUEUE_PSPOLL:
765 REG_WRITE(ah, AR_QMISC(q),
766 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
767 break;
768 case ATH9K_TX_QUEUE_UAPSD:
769 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
770 AR_D_MISC_POST_FR_BKOFF_DIS);
771 break;
772 default:
773 break;
774 }
775
776 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
777 REG_WRITE(ah, AR_DMISC(q),
778 REG_READ(ah, AR_DMISC(q)) |
779 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
780 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
781 AR_D_MISC_POST_FR_BKOFF_DIS);
782 }
783
784 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
2660b81a 785 ah->txok_interrupt_mask |= 1 << q;
f1dc5600 786 else
2660b81a 787 ah->txok_interrupt_mask &= ~(1 << q);
f1dc5600 788 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
2660b81a 789 ah->txerr_interrupt_mask |= 1 << q;
f1dc5600 790 else
2660b81a 791 ah->txerr_interrupt_mask &= ~(1 << q);
f1dc5600 792 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
2660b81a 793 ah->txdesc_interrupt_mask |= 1 << q;
f1dc5600 794 else
2660b81a 795 ah->txdesc_interrupt_mask &= ~(1 << q);
f1dc5600 796 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
2660b81a 797 ah->txeol_interrupt_mask |= 1 << q;
f1dc5600 798 else
2660b81a 799 ah->txeol_interrupt_mask &= ~(1 << q);
f1dc5600 800 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
2660b81a 801 ah->txurn_interrupt_mask |= 1 << q;
f1dc5600 802 else
2660b81a 803 ah->txurn_interrupt_mask &= ~(1 << q);
f1dc5600
S
804 ath9k_hw_set_txq_interrupts(ah, qi);
805
806 return true;
807}
808
cbe61d8a 809int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
810 u32 pa, struct ath_desc *nds, u64 tsf)
811{
812 struct ar5416_desc ads;
813 struct ar5416_desc *adsp = AR5416DESC(ds);
814 u32 phyerr;
815
816 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
817 return -EINPROGRESS;
818
819 ads.u.rx = adsp->u.rx;
820
821 ds->ds_rxstat.rs_status = 0;
822 ds->ds_rxstat.rs_flags = 0;
823
824 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
825 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
826
dd8b15b0
SB
827 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
828 ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
829 ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
830 ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
831 ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
832 ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
833 ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
834 ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
835 } else {
836 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
837 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
838 AR_RxRSSIAnt00);
839 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
840 AR_RxRSSIAnt01);
841 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
842 AR_RxRSSIAnt02);
843 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
844 AR_RxRSSIAnt10);
845 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
846 AR_RxRSSIAnt11);
847 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
848 AR_RxRSSIAnt12);
849 }
f1dc5600
S
850 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
851 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
852 else
853 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
854
855 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
856 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
857
858 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
859 ds->ds_rxstat.rs_moreaggr =
860 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
861 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
862 ds->ds_rxstat.rs_flags =
863 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
864 ds->ds_rxstat.rs_flags |=
865 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
866
867 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
868 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
869 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
870 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
871 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
872 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
873
874 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
875 if (ads.ds_rxstatus8 & AR_CRCErr)
876 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
877 else if (ads.ds_rxstatus8 & AR_PHYErr) {
878 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
879 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
880 ds->ds_rxstat.rs_phyerr = phyerr;
881 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
882 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
883 else if (ads.ds_rxstatus8 & AR_MichaelErr)
884 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
885 }
886
887 return 0;
888}
889
54e4cec6 890void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
f1dc5600
S
891 u32 size, u32 flags)
892{
893 struct ar5416_desc *ads = AR5416DESC(ds);
2660b81a 894 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
895
896 ads->ds_ctl1 = size & AR_BufLen;
897 if (flags & ATH9K_RXDESC_INTREQ)
898 ads->ds_ctl1 |= AR_RxIntrReq;
899
900 ads->ds_rxstatus8 &= ~AR_RxDone;
901 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
902 memset(&(ads->u), 0, sizeof(ads->u));
f1dc5600
S
903}
904
cbe61d8a 905bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
f1dc5600
S
906{
907 u32 reg;
908
909 if (set) {
910 REG_SET_BIT(ah, AR_DIAG_SW,
911 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
912
0caa7b14
S
913 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
914 0, AH_WAIT_TIMEOUT)) {
f1dc5600
S
915 REG_CLR_BIT(ah, AR_DIAG_SW,
916 (AR_DIAG_RX_DIS |
917 AR_DIAG_RX_ABORT));
918
919 reg = REG_READ(ah, AR_OBS_BUS_1);
c46917bb
LR
920 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
921 "RX failed to go idle in 10 ms RXSM=0x%x\n",
922 reg);
f1dc5600
S
923
924 return false;
925 }
926 } else {
927 REG_CLR_BIT(ah, AR_DIAG_SW,
928 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
929 }
930
931 return true;
932}
933
cbe61d8a 934void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
f1dc5600
S
935{
936 REG_WRITE(ah, AR_RXDP, rxdp);
937}
938
cbe61d8a 939void ath9k_hw_rxena(struct ath_hw *ah)
f1dc5600
S
940{
941 REG_WRITE(ah, AR_CR, AR_CR_RXE);
942}
943
cbe61d8a 944void ath9k_hw_startpcureceive(struct ath_hw *ah)
f1dc5600 945{
f1dc5600
S
946 ath9k_enable_mib_counters(ah);
947
948 ath9k_ani_reset(ah);
e7594072 949
8aa15e15 950 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
f1dc5600
S
951}
952
cbe61d8a 953void ath9k_hw_stoppcurecv(struct ath_hw *ah)
f1dc5600
S
954{
955 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
956
957 ath9k_hw_disable_mib_counters(ah);
958}
959
cbe61d8a 960bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
f1dc5600 961{
0caa7b14
S
962#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
963#define AH_RX_TIME_QUANTUM 100 /* usec */
c46917bb 964 struct ath_common *common = ath9k_hw_common(ah);
0caa7b14
S
965 int i;
966
f1dc5600
S
967 REG_WRITE(ah, AR_CR, AR_CR_RXD);
968
0caa7b14
S
969 /* Wait for rx enable bit to go low */
970 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
971 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
972 break;
973 udelay(AH_TIME_QUANTUM);
974 }
975
976 if (i == 0) {
c46917bb
LR
977 ath_print(common, ATH_DBG_FATAL,
978 "DMA failed to stop in %d ms "
979 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
980 AH_RX_STOP_DMA_TIMEOUT / 1000,
981 REG_READ(ah, AR_CR),
982 REG_READ(ah, AR_DIAG_SW));
f1dc5600
S
983 return false;
984 } else {
985 return true;
986 }
0caa7b14
S
987
988#undef AH_RX_TIME_QUANTUM
989#undef AH_RX_STOP_DMA_TIMEOUT
f1dc5600 990}