]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/wireless/ath/ath9k/mac.c
ath9k: fix the .flush driver op implementation
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / ath / ath9k / mac.c
CommitLineData
f1dc5600 1/*
cee075a2 2 * Copyright (c) 2008-2009 Atheros Communications Inc.
f1dc5600
S
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
990b70ab 17#include "hw.h"
ac0bb767 18#include "hw-ops.h"
f1dc5600 19
cc610ac0
VT
20static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
21 struct ath9k_tx_queue_info *qi)
22{
226afe68
JP
23 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
27 ah->txurn_interrupt_mask);
cc610ac0 28
7d0d0df0
S
29 ENABLE_REGWRITE_BUFFER(ah);
30
cc610ac0
VT
31 REG_WRITE(ah, AR_IMR_S0,
32 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
33 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
34 REG_WRITE(ah, AR_IMR_S1,
35 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
36 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
37
38 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
39 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
7d0d0df0
S
41
42 REGWRITE_BUFFER_FLUSH(ah);
cc610ac0
VT
43}
44
45u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
46{
47 return REG_READ(ah, AR_QTXDP(q));
48}
49EXPORT_SYMBOL(ath9k_hw_gettxbuf);
50
51void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
52{
53 REG_WRITE(ah, AR_QTXDP(q), txdp);
54}
55EXPORT_SYMBOL(ath9k_hw_puttxbuf);
56
57void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
58{
226afe68
JP
59 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
60 "Enable TXE on queue: %u\n", q);
cc610ac0
VT
61 REG_WRITE(ah, AR_Q_TXE, 1 << q);
62}
63EXPORT_SYMBOL(ath9k_hw_txstart);
64
65void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
66{
67 struct ar5416_desc *ads = AR5416DESC(ds);
68
69 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
70 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
71 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
72 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
73 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
74}
75EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
76
77u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
78{
79 u32 npend;
80
81 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
82 if (npend == 0) {
83
84 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
85 npend = 1;
86 }
87
88 return npend;
89}
90EXPORT_SYMBOL(ath9k_hw_numtxpending);
91
92/**
93 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
94 *
95 * @ah: atheros hardware struct
96 * @bIncTrigLevel: whether or not the frame trigger level should be updated
97 *
98 * The frame trigger level specifies the minimum number of bytes,
99 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
100 * before the PCU will initiate sending the frame on the air. This can
101 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
102 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
103 * first)
104 *
105 * Caution must be taken to ensure to set the frame trigger level based
106 * on the DMA request size. For example if the DMA request size is set to
107 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
108 * there need to be enough space in the tx FIFO for the requested transfer
109 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
110 * the threshold to a value beyond 6, then the transmit will hang.
111 *
112 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
113 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
114 * there is a hardware issue which forces us to use 2 KB instead so the
115 * frame trigger level must not exceed 2 KB for these chipsets.
116 */
117bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
118{
119 u32 txcfg, curLevel, newLevel;
cc610ac0
VT
120
121 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
122 return false;
123
4df3071e 124 ath9k_hw_disable_interrupts(ah);
cc610ac0
VT
125
126 txcfg = REG_READ(ah, AR_TXCFG);
127 curLevel = MS(txcfg, AR_FTRIG);
128 newLevel = curLevel;
129 if (bIncTrigLevel) {
130 if (curLevel < ah->config.max_txtrig_level)
131 newLevel++;
132 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
133 newLevel--;
134 if (newLevel != curLevel)
135 REG_WRITE(ah, AR_TXCFG,
136 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
137
4df3071e 138 ath9k_hw_enable_interrupts(ah);
cc610ac0
VT
139
140 ah->tx_trig_level = newLevel;
141
142 return newLevel != curLevel;
143}
144EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
145
0d51cccc
FF
146void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
147{
148 int i, q;
149
150 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
151
152 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
153 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
154 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
155
156 for (q = 0; q < AR_NUM_QCU; q++) {
157 for (i = 0; i < 1000; i++) {
158 if (i)
159 udelay(5);
160
161 if (!ath9k_hw_numtxpending(ah, q))
162 break;
163 }
164 }
165
166 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
167 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
168 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
169
170 REG_WRITE(ah, AR_Q_TXD, 0);
171}
172EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
173
cc610ac0
VT
174bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
175{
176#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
177#define ATH9K_TIME_QUANTUM 100 /* usec */
178 struct ath_common *common = ath9k_hw_common(ah);
179 struct ath9k_hw_capabilities *pCap = &ah->caps;
180 struct ath9k_tx_queue_info *qi;
181 u32 tsfLow, j, wait;
182 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
183
184 if (q >= pCap->total_queues) {
226afe68
JP
185 ath_dbg(common, ATH_DBG_QUEUE,
186 "Stopping TX DMA, invalid queue: %u\n", q);
cc610ac0
VT
187 return false;
188 }
189
190 qi = &ah->txq[q];
191 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
226afe68
JP
192 ath_dbg(common, ATH_DBG_QUEUE,
193 "Stopping TX DMA, inactive queue: %u\n", q);
cc610ac0
VT
194 return false;
195 }
196
197 REG_WRITE(ah, AR_Q_TXD, 1 << q);
198
199 for (wait = wait_time; wait != 0; wait--) {
200 if (ath9k_hw_numtxpending(ah, q) == 0)
201 break;
202 udelay(ATH9K_TIME_QUANTUM);
203 }
204
205 if (ath9k_hw_numtxpending(ah, q)) {
226afe68
JP
206 ath_dbg(common, ATH_DBG_QUEUE,
207 "%s: Num of pending TX Frames %d on Q %d\n",
208 __func__, ath9k_hw_numtxpending(ah, q), q);
cc610ac0
VT
209
210 for (j = 0; j < 2; j++) {
211 tsfLow = REG_READ(ah, AR_TSF_L32);
212 REG_WRITE(ah, AR_QUIET2,
213 SM(10, AR_QUIET2_QUIET_DUR));
214 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
215 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
216 REG_SET_BIT(ah, AR_TIMER_MODE,
217 AR_QUIET_TIMER_EN);
218
219 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
220 break;
221
226afe68
JP
222 ath_dbg(common, ATH_DBG_QUEUE,
223 "TSF has moved while trying to set quiet time TSF: 0x%08x\n",
224 tsfLow);
cc610ac0
VT
225 }
226
227 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
228
229 udelay(200);
230 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
231
232 wait = wait_time;
233 while (ath9k_hw_numtxpending(ah, q)) {
234 if ((--wait) == 0) {
3800276a
JP
235 ath_err(common,
236 "Failed to stop TX DMA in 100 msec after killing last frame\n");
cc610ac0
VT
237 break;
238 }
239 udelay(ATH9K_TIME_QUANTUM);
240 }
241
242 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
243 }
244
245 REG_WRITE(ah, AR_Q_TXD, 0);
246 return wait != 0;
247
248#undef ATH9K_TX_STOP_DMA_TIMEOUT
249#undef ATH9K_TIME_QUANTUM
250}
251EXPORT_SYMBOL(ath9k_hw_stoptxdma);
252
cbe61d8a 253void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
f1dc5600 254{
2660b81a
S
255 *txqs &= ah->intr_txqs;
256 ah->intr_txqs &= ~(*txqs);
f1dc5600 257}
7322fd19 258EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
f1dc5600 259
cbe61d8a 260bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
f1dc5600
S
261 const struct ath9k_tx_queue_info *qinfo)
262{
263 u32 cw;
c46917bb 264 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 265 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
266 struct ath9k_tx_queue_info *qi;
267
268 if (q >= pCap->total_queues) {
226afe68
JP
269 ath_dbg(common, ATH_DBG_QUEUE,
270 "Set TXQ properties, invalid queue: %u\n", q);
f1dc5600
S
271 return false;
272 }
273
2660b81a 274 qi = &ah->txq[q];
f1dc5600 275 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
226afe68
JP
276 ath_dbg(common, ATH_DBG_QUEUE,
277 "Set TXQ properties, inactive queue: %u\n", q);
f1dc5600
S
278 return false;
279 }
280
226afe68 281 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
f1dc5600
S
282
283 qi->tqi_ver = qinfo->tqi_ver;
284 qi->tqi_subtype = qinfo->tqi_subtype;
285 qi->tqi_qflags = qinfo->tqi_qflags;
286 qi->tqi_priority = qinfo->tqi_priority;
287 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
288 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
289 else
290 qi->tqi_aifs = INIT_AIFS;
291 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
292 cw = min(qinfo->tqi_cwmin, 1024U);
293 qi->tqi_cwmin = 1;
294 while (qi->tqi_cwmin < cw)
295 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
296 } else
297 qi->tqi_cwmin = qinfo->tqi_cwmin;
298 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
299 cw = min(qinfo->tqi_cwmax, 1024U);
300 qi->tqi_cwmax = 1;
301 while (qi->tqi_cwmax < cw)
302 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
303 } else
304 qi->tqi_cwmax = INIT_CWMAX;
305
306 if (qinfo->tqi_shretry != 0)
307 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
308 else
309 qi->tqi_shretry = INIT_SH_RETRY;
310 if (qinfo->tqi_lgretry != 0)
311 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
312 else
313 qi->tqi_lgretry = INIT_LG_RETRY;
314 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
315 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
316 qi->tqi_burstTime = qinfo->tqi_burstTime;
317 qi->tqi_readyTime = qinfo->tqi_readyTime;
318
319 switch (qinfo->tqi_subtype) {
320 case ATH9K_WME_UPSD:
321 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
322 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
323 break;
324 default:
325 break;
326 }
327
328 return true;
329}
7322fd19 330EXPORT_SYMBOL(ath9k_hw_set_txq_props);
f1dc5600 331
cbe61d8a 332bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
f1dc5600
S
333 struct ath9k_tx_queue_info *qinfo)
334{
c46917bb 335 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 336 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
337 struct ath9k_tx_queue_info *qi;
338
339 if (q >= pCap->total_queues) {
226afe68
JP
340 ath_dbg(common, ATH_DBG_QUEUE,
341 "Get TXQ properties, invalid queue: %u\n", q);
f1dc5600
S
342 return false;
343 }
344
2660b81a 345 qi = &ah->txq[q];
f1dc5600 346 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
226afe68
JP
347 ath_dbg(common, ATH_DBG_QUEUE,
348 "Get TXQ properties, inactive queue: %u\n", q);
f1dc5600
S
349 return false;
350 }
351
352 qinfo->tqi_qflags = qi->tqi_qflags;
353 qinfo->tqi_ver = qi->tqi_ver;
354 qinfo->tqi_subtype = qi->tqi_subtype;
355 qinfo->tqi_qflags = qi->tqi_qflags;
356 qinfo->tqi_priority = qi->tqi_priority;
357 qinfo->tqi_aifs = qi->tqi_aifs;
358 qinfo->tqi_cwmin = qi->tqi_cwmin;
359 qinfo->tqi_cwmax = qi->tqi_cwmax;
360 qinfo->tqi_shretry = qi->tqi_shretry;
361 qinfo->tqi_lgretry = qi->tqi_lgretry;
362 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
363 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
364 qinfo->tqi_burstTime = qi->tqi_burstTime;
365 qinfo->tqi_readyTime = qi->tqi_readyTime;
366
367 return true;
368}
7322fd19 369EXPORT_SYMBOL(ath9k_hw_get_txq_props);
f1dc5600 370
cbe61d8a 371int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
f1dc5600
S
372 const struct ath9k_tx_queue_info *qinfo)
373{
c46917bb 374 struct ath_common *common = ath9k_hw_common(ah);
f1dc5600 375 struct ath9k_tx_queue_info *qi;
2660b81a 376 struct ath9k_hw_capabilities *pCap = &ah->caps;
f1dc5600
S
377 int q;
378
379 switch (type) {
380 case ATH9K_TX_QUEUE_BEACON:
381 q = pCap->total_queues - 1;
382 break;
383 case ATH9K_TX_QUEUE_CAB:
384 q = pCap->total_queues - 2;
385 break;
386 case ATH9K_TX_QUEUE_PSPOLL:
387 q = 1;
388 break;
389 case ATH9K_TX_QUEUE_UAPSD:
390 q = pCap->total_queues - 3;
391 break;
392 case ATH9K_TX_QUEUE_DATA:
393 for (q = 0; q < pCap->total_queues; q++)
2660b81a 394 if (ah->txq[q].tqi_type ==
f1dc5600
S
395 ATH9K_TX_QUEUE_INACTIVE)
396 break;
397 if (q == pCap->total_queues) {
3800276a 398 ath_err(common, "No available TX queue\n");
f1dc5600
S
399 return -1;
400 }
401 break;
402 default:
3800276a 403 ath_err(common, "Invalid TX queue type: %u\n", type);
f1dc5600
S
404 return -1;
405 }
406
226afe68 407 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
f1dc5600 408
2660b81a 409 qi = &ah->txq[q];
f1dc5600 410 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
3800276a 411 ath_err(common, "TX queue: %u already active\n", q);
f1dc5600
S
412 return -1;
413 }
414 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
415 qi->tqi_type = type;
416 if (qinfo == NULL) {
417 qi->tqi_qflags =
418 TXQ_FLAG_TXOKINT_ENABLE
419 | TXQ_FLAG_TXERRINT_ENABLE
420 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
421 qi->tqi_aifs = INIT_AIFS;
422 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
423 qi->tqi_cwmax = INIT_CWMAX;
424 qi->tqi_shretry = INIT_SH_RETRY;
425 qi->tqi_lgretry = INIT_LG_RETRY;
426 qi->tqi_physCompBuf = 0;
427 } else {
428 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
429 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
430 }
431
432 return q;
433}
7322fd19 434EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
f1dc5600 435
cbe61d8a 436bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
f1dc5600 437{
2660b81a 438 struct ath9k_hw_capabilities *pCap = &ah->caps;
c46917bb 439 struct ath_common *common = ath9k_hw_common(ah);
f1dc5600
S
440 struct ath9k_tx_queue_info *qi;
441
442 if (q >= pCap->total_queues) {
226afe68
JP
443 ath_dbg(common, ATH_DBG_QUEUE,
444 "Release TXQ, invalid queue: %u\n", q);
f1dc5600
S
445 return false;
446 }
2660b81a 447 qi = &ah->txq[q];
f1dc5600 448 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
226afe68
JP
449 ath_dbg(common, ATH_DBG_QUEUE,
450 "Release TXQ, inactive queue: %u\n", q);
f1dc5600
S
451 return false;
452 }
453
226afe68 454 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
f1dc5600
S
455
456 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
2660b81a
S
457 ah->txok_interrupt_mask &= ~(1 << q);
458 ah->txerr_interrupt_mask &= ~(1 << q);
459 ah->txdesc_interrupt_mask &= ~(1 << q);
460 ah->txeol_interrupt_mask &= ~(1 << q);
461 ah->txurn_interrupt_mask &= ~(1 << q);
f1dc5600
S
462 ath9k_hw_set_txq_interrupts(ah, qi);
463
464 return true;
465}
7322fd19 466EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
f1dc5600 467
cbe61d8a 468bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
f1dc5600 469{
2660b81a 470 struct ath9k_hw_capabilities *pCap = &ah->caps;
c46917bb 471 struct ath_common *common = ath9k_hw_common(ah);
2660b81a 472 struct ath9k_channel *chan = ah->curchan;
f1dc5600
S
473 struct ath9k_tx_queue_info *qi;
474 u32 cwMin, chanCwMin, value;
475
476 if (q >= pCap->total_queues) {
226afe68
JP
477 ath_dbg(common, ATH_DBG_QUEUE,
478 "Reset TXQ, invalid queue: %u\n", q);
f1dc5600
S
479 return false;
480 }
481
2660b81a 482 qi = &ah->txq[q];
f1dc5600 483 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
226afe68
JP
484 ath_dbg(common, ATH_DBG_QUEUE,
485 "Reset TXQ, inactive queue: %u\n", q);
f1dc5600
S
486 return true;
487 }
488
226afe68 489 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
f1dc5600
S
490
491 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
492 if (chan && IS_CHAN_B(chan))
493 chanCwMin = INIT_CWMIN_11B;
494 else
495 chanCwMin = INIT_CWMIN;
496
497 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
498 } else
499 cwMin = qi->tqi_cwmin;
500
7d0d0df0
S
501 ENABLE_REGWRITE_BUFFER(ah);
502
f1dc5600
S
503 REG_WRITE(ah, AR_DLCL_IFS(q),
504 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
505 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
506 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
507
508 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
509 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
510 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
511 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
512
513 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
514 REG_WRITE(ah, AR_DMISC(q),
515 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
516
517 if (qi->tqi_cbrPeriod) {
518 REG_WRITE(ah, AR_QCBRCFG(q),
519 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
520 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
521 REG_WRITE(ah, AR_QMISC(q),
522 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
523 (qi->tqi_cbrOverflowLimit ?
524 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
525 }
526 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
527 REG_WRITE(ah, AR_QRDYTIMECFG(q),
528 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
529 AR_Q_RDYTIMECFG_EN);
530 }
531
532 REG_WRITE(ah, AR_DCHNTIME(q),
533 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
534 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
535
536 if (qi->tqi_burstTime
537 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
538 REG_WRITE(ah, AR_QMISC(q),
539 REG_READ(ah, AR_QMISC(q)) |
540 AR_Q_MISC_RDYTIME_EXP_POLICY);
541
542 }
543
544 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
545 REG_WRITE(ah, AR_DMISC(q),
546 REG_READ(ah, AR_DMISC(q)) |
547 AR_D_MISC_POST_FR_BKOFF_DIS);
548 }
7d0d0df0
S
549
550 REGWRITE_BUFFER_FLUSH(ah);
7d0d0df0 551
f1dc5600
S
552 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
553 REG_WRITE(ah, AR_DMISC(q),
554 REG_READ(ah, AR_DMISC(q)) |
555 AR_D_MISC_FRAG_BKOFF_EN);
556 }
557 switch (qi->tqi_type) {
558 case ATH9K_TX_QUEUE_BEACON:
7d0d0df0
S
559 ENABLE_REGWRITE_BUFFER(ah);
560
f1dc5600
S
561 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
562 | AR_Q_MISC_FSP_DBA_GATED
563 | AR_Q_MISC_BEACON_USE
564 | AR_Q_MISC_CBR_INCR_DIS1);
565
566 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
567 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
568 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
569 | AR_D_MISC_BEACON_USE
570 | AR_D_MISC_POST_FR_BKOFF_DIS);
7d0d0df0
S
571
572 REGWRITE_BUFFER_FLUSH(ah);
7d0d0df0 573
9a2af889
LR
574 /*
575 * cwmin and cwmax should be 0 for beacon queue
576 * but not for IBSS as we would create an imbalance
577 * on beaconing fairness for participating nodes.
578 */
579 if (AR_SREV_9300_20_OR_LATER(ah) &&
580 ah->opmode != NL80211_IFTYPE_ADHOC) {
3deb4da5
LR
581 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
582 | SM(0, AR_D_LCL_IFS_CWMAX)
583 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
584 }
f1dc5600
S
585 break;
586 case ATH9K_TX_QUEUE_CAB:
7d0d0df0
S
587 ENABLE_REGWRITE_BUFFER(ah);
588
f1dc5600
S
589 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
590 | AR_Q_MISC_FSP_DBA_GATED
591 | AR_Q_MISC_CBR_INCR_DIS1
592 | AR_Q_MISC_CBR_INCR_DIS0);
593 value = (qi->tqi_readyTime -
2660b81a
S
594 (ah->config.sw_beacon_response_time -
595 ah->config.dma_beacon_response_time) -
596 ah->config.additional_swba_backoff) * 1024;
f1dc5600
S
597 REG_WRITE(ah, AR_QRDYTIMECFG(q),
598 value | AR_Q_RDYTIMECFG_EN);
599 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
600 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
601 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
7d0d0df0
S
602
603 REGWRITE_BUFFER_FLUSH(ah);
7d0d0df0 604
f1dc5600
S
605 break;
606 case ATH9K_TX_QUEUE_PSPOLL:
607 REG_WRITE(ah, AR_QMISC(q),
608 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
609 break;
610 case ATH9K_TX_QUEUE_UAPSD:
611 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
612 AR_D_MISC_POST_FR_BKOFF_DIS);
613 break;
614 default:
615 break;
616 }
617
618 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
619 REG_WRITE(ah, AR_DMISC(q),
620 REG_READ(ah, AR_DMISC(q)) |
621 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
622 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
623 AR_D_MISC_POST_FR_BKOFF_DIS);
624 }
625
79de2375
LR
626 if (AR_SREV_9300_20_OR_LATER(ah))
627 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
628
f1dc5600 629 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
2660b81a 630 ah->txok_interrupt_mask |= 1 << q;
f1dc5600 631 else
2660b81a 632 ah->txok_interrupt_mask &= ~(1 << q);
f1dc5600 633 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
2660b81a 634 ah->txerr_interrupt_mask |= 1 << q;
f1dc5600 635 else
2660b81a 636 ah->txerr_interrupt_mask &= ~(1 << q);
f1dc5600 637 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
2660b81a 638 ah->txdesc_interrupt_mask |= 1 << q;
f1dc5600 639 else
2660b81a 640 ah->txdesc_interrupt_mask &= ~(1 << q);
f1dc5600 641 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
2660b81a 642 ah->txeol_interrupt_mask |= 1 << q;
f1dc5600 643 else
2660b81a 644 ah->txeol_interrupt_mask &= ~(1 << q);
f1dc5600 645 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
2660b81a 646 ah->txurn_interrupt_mask |= 1 << q;
f1dc5600 647 else
2660b81a 648 ah->txurn_interrupt_mask &= ~(1 << q);
f1dc5600
S
649 ath9k_hw_set_txq_interrupts(ah, qi);
650
651 return true;
652}
7322fd19 653EXPORT_SYMBOL(ath9k_hw_resettxqueue);
f1dc5600 654
cbe61d8a 655int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
8e6f5aa2 656 struct ath_rx_status *rs, u64 tsf)
f1dc5600
S
657{
658 struct ar5416_desc ads;
659 struct ar5416_desc *adsp = AR5416DESC(ds);
660 u32 phyerr;
661
662 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
663 return -EINPROGRESS;
664
665 ads.u.rx = adsp->u.rx;
666
8e6f5aa2
FF
667 rs->rs_status = 0;
668 rs->rs_flags = 0;
f1dc5600 669
8e6f5aa2
FF
670 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
671 rs->rs_tstamp = ads.AR_RcvTimestamp;
f1dc5600 672
dd8b15b0 673 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
8e6f5aa2
FF
674 rs->rs_rssi = ATH9K_RSSI_BAD;
675 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
676 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
677 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
678 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
679 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
680 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
dd8b15b0 681 } else {
8e6f5aa2
FF
682 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
683 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
dd8b15b0 684 AR_RxRSSIAnt00);
8e6f5aa2 685 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
dd8b15b0 686 AR_RxRSSIAnt01);
8e6f5aa2 687 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
dd8b15b0 688 AR_RxRSSIAnt02);
8e6f5aa2 689 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
dd8b15b0 690 AR_RxRSSIAnt10);
8e6f5aa2 691 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
dd8b15b0 692 AR_RxRSSIAnt11);
8e6f5aa2 693 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
dd8b15b0
SB
694 AR_RxRSSIAnt12);
695 }
f1dc5600 696 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
8e6f5aa2 697 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
f1dc5600 698 else
8e6f5aa2 699 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
f1dc5600 700
8e6f5aa2
FF
701 rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
702 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
f1dc5600 703
8e6f5aa2
FF
704 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
705 rs->rs_moreaggr =
f1dc5600 706 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
8e6f5aa2
FF
707 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
708 rs->rs_flags =
f1dc5600 709 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
8e6f5aa2 710 rs->rs_flags |=
f1dc5600
S
711 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
712
713 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
8e6f5aa2 714 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
f1dc5600 715 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
8e6f5aa2 716 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
f1dc5600 717 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
8e6f5aa2 718 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
f1dc5600
S
719
720 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
115dad7a
FF
721 /*
722 * Treat these errors as mutually exclusive to avoid spurious
723 * extra error reports from the hardware. If a CRC error is
724 * reported, then decryption and MIC errors are irrelevant,
725 * the frame is going to be dropped either way
726 */
f1dc5600 727 if (ads.ds_rxstatus8 & AR_CRCErr)
8e6f5aa2 728 rs->rs_status |= ATH9K_RXERR_CRC;
115dad7a 729 else if (ads.ds_rxstatus8 & AR_PHYErr) {
8e6f5aa2 730 rs->rs_status |= ATH9K_RXERR_PHY;
f1dc5600 731 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
8e6f5aa2 732 rs->rs_phyerr = phyerr;
115dad7a 733 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
8e6f5aa2 734 rs->rs_status |= ATH9K_RXERR_DECRYPT;
115dad7a 735 else if (ads.ds_rxstatus8 & AR_MichaelErr)
8e6f5aa2 736 rs->rs_status |= ATH9K_RXERR_MIC;
115dad7a 737
1c30cc19 738 if (ads.ds_rxstatus8 & AR_KeyMiss)
3ae74c33 739 rs->rs_status |= ATH9K_RXERR_DECRYPT;
f1dc5600
S
740 }
741
742 return 0;
743}
7322fd19 744EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
f1dc5600 745
e7824a50
LR
746/*
747 * This can stop or re-enables RX.
748 *
749 * If bool is set this will kill any frame which is currently being
750 * transferred between the MAC and baseband and also prevent any new
751 * frames from getting started.
752 */
cbe61d8a 753bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
f1dc5600
S
754{
755 u32 reg;
756
757 if (set) {
758 REG_SET_BIT(ah, AR_DIAG_SW,
759 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
760
0caa7b14
S
761 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
762 0, AH_WAIT_TIMEOUT)) {
f1dc5600
S
763 REG_CLR_BIT(ah, AR_DIAG_SW,
764 (AR_DIAG_RX_DIS |
765 AR_DIAG_RX_ABORT));
766
767 reg = REG_READ(ah, AR_OBS_BUS_1);
3800276a
JP
768 ath_err(ath9k_hw_common(ah),
769 "RX failed to go idle in 10 ms RXSM=0x%x\n",
770 reg);
f1dc5600
S
771
772 return false;
773 }
774 } else {
775 REG_CLR_BIT(ah, AR_DIAG_SW,
776 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
777 }
778
779 return true;
780}
7322fd19 781EXPORT_SYMBOL(ath9k_hw_setrxabort);
f1dc5600 782
cbe61d8a 783void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
f1dc5600
S
784{
785 REG_WRITE(ah, AR_RXDP, rxdp);
786}
7322fd19 787EXPORT_SYMBOL(ath9k_hw_putrxbuf);
f1dc5600 788
40346b66 789void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
f1dc5600 790{
f1dc5600
S
791 ath9k_enable_mib_counters(ah);
792
40346b66 793 ath9k_ani_reset(ah, is_scanning);
e7594072 794
8aa15e15 795 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
f1dc5600 796}
7322fd19 797EXPORT_SYMBOL(ath9k_hw_startpcureceive);
f1dc5600 798
9b9cc61c
VT
799void ath9k_hw_abortpcurecv(struct ath_hw *ah)
800{
801 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
802
803 ath9k_hw_disable_mib_counters(ah);
804}
805EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
806
cbe61d8a 807bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
f1dc5600 808{
0caa7b14
S
809#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
810#define AH_RX_TIME_QUANTUM 100 /* usec */
c46917bb 811 struct ath_common *common = ath9k_hw_common(ah);
0caa7b14
S
812 int i;
813
f1dc5600
S
814 REG_WRITE(ah, AR_CR, AR_CR_RXD);
815
0caa7b14
S
816 /* Wait for rx enable bit to go low */
817 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
818 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
819 break;
820 udelay(AH_TIME_QUANTUM);
821 }
822
823 if (i == 0) {
3800276a
JP
824 ath_err(common,
825 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
826 AH_RX_STOP_DMA_TIMEOUT / 1000,
827 REG_READ(ah, AR_CR),
828 REG_READ(ah, AR_DIAG_SW));
f1dc5600
S
829 return false;
830 } else {
831 return true;
832 }
0caa7b14
S
833
834#undef AH_RX_TIME_QUANTUM
835#undef AH_RX_STOP_DMA_TIMEOUT
f1dc5600 836}
7322fd19 837EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
536b3a7a
LR
838
839int ath9k_hw_beaconq_setup(struct ath_hw *ah)
840{
841 struct ath9k_tx_queue_info qi;
842
843 memset(&qi, 0, sizeof(qi));
844 qi.tqi_aifs = 1;
845 qi.tqi_cwmin = 0;
846 qi.tqi_cwmax = 0;
847 /* NB: don't enable any interrupts */
848 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
849}
850EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
55e82df4
VT
851
852bool ath9k_hw_intrpend(struct ath_hw *ah)
853{
854 u32 host_isr;
855
856 if (AR_SREV_9100(ah))
857 return true;
858
859 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
860 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
861 return true;
862
863 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
864 if ((host_isr & AR_INTR_SYNC_DEFAULT)
865 && (host_isr != AR_INTR_SPURIOUS))
866 return true;
867
868 return false;
869}
870EXPORT_SYMBOL(ath9k_hw_intrpend);
871
4df3071e
FF
872void ath9k_hw_disable_interrupts(struct ath_hw *ah)
873{
874 struct ath_common *common = ath9k_hw_common(ah);
875
226afe68 876 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
4df3071e
FF
877 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
878 (void) REG_READ(ah, AR_IER);
879 if (!AR_SREV_9100(ah)) {
880 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
881 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
882
883 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
884 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
885 }
886}
887EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
888
889void ath9k_hw_enable_interrupts(struct ath_hw *ah)
890{
891 struct ath_common *common = ath9k_hw_common(ah);
892
893 if (!(ah->imask & ATH9K_INT_GLOBAL))
894 return;
895
226afe68 896 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
4df3071e
FF
897 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
898 if (!AR_SREV_9100(ah)) {
899 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
900 AR_INTR_MAC_IRQ);
901 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
902
903
904 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
905 AR_INTR_SYNC_DEFAULT);
906 REG_WRITE(ah, AR_INTR_SYNC_MASK,
907 AR_INTR_SYNC_DEFAULT);
908 }
226afe68
JP
909 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
910 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
4df3071e
FF
911}
912EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
913
914void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
55e82df4
VT
915{
916 enum ath9k_int omask = ah->imask;
917 u32 mask, mask2;
918 struct ath9k_hw_capabilities *pCap = &ah->caps;
919 struct ath_common *common = ath9k_hw_common(ah);
920
4df3071e
FF
921 if (!(ints & ATH9K_INT_GLOBAL))
922 ath9k_hw_enable_interrupts(ah);
55e82df4 923
226afe68 924 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
55e82df4
VT
925
926 /* TODO: global int Ref count */
927 mask = ints & ATH9K_INT_COMMON;
928 mask2 = 0;
929
930 if (ints & ATH9K_INT_TX) {
931 if (ah->config.tx_intr_mitigation)
932 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
5bea4006
LR
933 else {
934 if (ah->txok_interrupt_mask)
935 mask |= AR_IMR_TXOK;
936 if (ah->txdesc_interrupt_mask)
937 mask |= AR_IMR_TXDESC;
938 }
55e82df4
VT
939 if (ah->txerr_interrupt_mask)
940 mask |= AR_IMR_TXERR;
941 if (ah->txeol_interrupt_mask)
942 mask |= AR_IMR_TXEOL;
943 }
944 if (ints & ATH9K_INT_RX) {
945 if (AR_SREV_9300_20_OR_LATER(ah)) {
946 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
947 if (ah->config.rx_intr_mitigation) {
948 mask &= ~AR_IMR_RXOK_LP;
949 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
950 } else {
951 mask |= AR_IMR_RXOK_LP;
952 }
953 } else {
954 if (ah->config.rx_intr_mitigation)
955 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
956 else
957 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
958 }
959 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
960 mask |= AR_IMR_GENTMR;
961 }
962
963 if (ints & (ATH9K_INT_BMISC)) {
964 mask |= AR_IMR_BCNMISC;
965 if (ints & ATH9K_INT_TIM)
966 mask2 |= AR_IMR_S2_TIM;
967 if (ints & ATH9K_INT_DTIM)
968 mask2 |= AR_IMR_S2_DTIM;
969 if (ints & ATH9K_INT_DTIMSYNC)
970 mask2 |= AR_IMR_S2_DTIMSYNC;
971 if (ints & ATH9K_INT_CABEND)
972 mask2 |= AR_IMR_S2_CABEND;
973 if (ints & ATH9K_INT_TSFOOR)
974 mask2 |= AR_IMR_S2_TSFOOR;
975 }
976
977 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
978 mask |= AR_IMR_BCNMISC;
979 if (ints & ATH9K_INT_GTT)
980 mask2 |= AR_IMR_S2_GTT;
981 if (ints & ATH9K_INT_CST)
982 mask2 |= AR_IMR_S2_CST;
983 }
984
226afe68 985 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
55e82df4
VT
986 REG_WRITE(ah, AR_IMR, mask);
987 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
988 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
989 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
990 ah->imrs2_reg |= mask2;
991 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
992
993 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
994 if (ints & ATH9K_INT_TIM_TIMER)
995 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
996 else
997 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
998 }
999
4df3071e 1000 ath9k_hw_enable_interrupts(ah);
55e82df4 1001
4df3071e 1002 return;
55e82df4
VT
1003}
1004EXPORT_SYMBOL(ath9k_hw_set_interrupts);