]>
Commit | Line | Data |
---|---|---|
c6e387a2 NK |
1 | /* |
2 | * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> | |
3 | * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> | |
4 | * | |
5 | * Permission to use, copy, modify, and distribute this software for any | |
6 | * purpose with or without fee is hereby granted, provided that the above | |
7 | * copyright notice and this permission notice appear in all copies. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
16 | * | |
17 | */ | |
18 | ||
19 | /*************************************\ | |
20 | * DMA and interrupt masking functions * | |
21 | \*************************************/ | |
22 | ||
c47faa36 NK |
23 | /** |
24 | * DOC: DMA and interrupt masking functions | |
c6e387a2 NK |
25 | * |
26 | * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and | |
27 | * handle queue setup for 5210 chipset (rest are handled on qcu.c). | |
6a2a0e73 | 28 | * Also we setup interrupt mask register (IMR) and read the various interrupt |
c6e387a2 | 29 | * status registers (ISR). |
c6e387a2 NK |
30 | */ |
31 | ||
516304b0 JP |
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | ||
c6e387a2 NK |
34 | #include "ath5k.h" |
35 | #include "reg.h" | |
36 | #include "debug.h" | |
c6e387a2 | 37 | |
9320b5c4 | 38 | |
c6e387a2 NK |
39 | /*********\ |
40 | * Receive * | |
41 | \*********/ | |
42 | ||
43 | /** | |
c47faa36 | 44 | * ath5k_hw_start_rx_dma() - Start DMA receive |
c6e387a2 NK |
45 | * @ah: The &struct ath5k_hw |
46 | */ | |
c47faa36 NK |
47 | void |
48 | ath5k_hw_start_rx_dma(struct ath5k_hw *ah) | |
c6e387a2 | 49 | { |
c6e387a2 NK |
50 | ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); |
51 | ath5k_hw_reg_read(ah, AR5K_CR); | |
52 | } | |
53 | ||
54 | /** | |
c47faa36 | 55 | * ath5k_hw_stop_rx_dma() - Stop DMA receive |
c6e387a2 NK |
56 | * @ah: The &struct ath5k_hw |
57 | */ | |
c47faa36 NK |
58 | static int |
59 | ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) | |
c6e387a2 NK |
60 | { |
61 | unsigned int i; | |
62 | ||
c6e387a2 NK |
63 | ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); |
64 | ||
65 | /* | |
66 | * It may take some time to disable the DMA receive unit | |
67 | */ | |
509a106e | 68 | for (i = 1000; i > 0 && |
c6e387a2 NK |
69 | (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; |
70 | i--) | |
b3a28e68 NK |
71 | udelay(100); |
72 | ||
f0e134a5 | 73 | if (!i) |
e0d687bd | 74 | ATH5K_DBG(ah, ATH5K_DEBUG_DMA, |
b3a28e68 | 75 | "failed to stop RX DMA !\n"); |
c6e387a2 NK |
76 | |
77 | return i ? 0 : -EBUSY; | |
78 | } | |
79 | ||
80 | /** | |
c47faa36 | 81 | * ath5k_hw_get_rxdp() - Get RX Descriptor's address |
c6e387a2 | 82 | * @ah: The &struct ath5k_hw |
c6e387a2 | 83 | */ |
c47faa36 NK |
84 | u32 |
85 | ath5k_hw_get_rxdp(struct ath5k_hw *ah) | |
c6e387a2 NK |
86 | { |
87 | return ath5k_hw_reg_read(ah, AR5K_RXDP); | |
88 | } | |
89 | ||
90 | /** | |
c47faa36 | 91 | * ath5k_hw_set_rxdp() - Set RX Descriptor's address |
c6e387a2 NK |
92 | * @ah: The &struct ath5k_hw |
93 | * @phys_addr: RX descriptor address | |
94 | * | |
e8325ed8 | 95 | * Returns -EIO if rx is active |
c6e387a2 | 96 | */ |
c47faa36 NK |
97 | int |
98 | ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) | |
c6e387a2 | 99 | { |
e8325ed8 | 100 | if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { |
e0d687bd | 101 | ATH5K_DBG(ah, ATH5K_DEBUG_DMA, |
e8325ed8 NK |
102 | "tried to set RXDP while rx was active !\n"); |
103 | return -EIO; | |
104 | } | |
105 | ||
c6e387a2 | 106 | ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); |
e8325ed8 | 107 | return 0; |
c6e387a2 NK |
108 | } |
109 | ||
110 | ||
111 | /**********\ | |
112 | * Transmit * | |
113 | \**********/ | |
114 | ||
115 | /** | |
c47faa36 | 116 | * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue |
c6e387a2 NK |
117 | * @ah: The &struct ath5k_hw |
118 | * @queue: The hw queue number | |
119 | * | |
120 | * Start DMA transmit for a specific queue and since 5210 doesn't have | |
121 | * QCU/DCU, set up queue parameters for 5210 here based on queue type (one | |
122 | * queue for normal data and one queue for beacons). For queue setup | |
123 | * on newer chips check out qcu.c. Returns -EINVAL if queue number is out | |
124 | * of range or if queue is already disabled. | |
125 | * | |
126 | * NOTE: Must be called after setting up tx control descriptor for that | |
127 | * queue (see below). | |
128 | */ | |
c47faa36 NK |
129 | int |
130 | ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) | |
c6e387a2 NK |
131 | { |
132 | u32 tx_queue; | |
133 | ||
c6e387a2 NK |
134 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); |
135 | ||
136 | /* Return if queue is declared inactive */ | |
137 | if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) | |
d41174fa | 138 | return -EINVAL; |
c6e387a2 NK |
139 | |
140 | if (ah->ah_version == AR5K_AR5210) { | |
141 | tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); | |
142 | ||
143 | /* | |
144 | * Set the queue by type on 5210 | |
145 | */ | |
146 | switch (ah->ah_txq[queue].tqi_type) { | |
147 | case AR5K_TX_QUEUE_DATA: | |
148 | tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; | |
149 | break; | |
150 | case AR5K_TX_QUEUE_BEACON: | |
151 | tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; | |
152 | ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, | |
153 | AR5K_BSR); | |
154 | break; | |
155 | case AR5K_TX_QUEUE_CAB: | |
156 | tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; | |
157 | ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | | |
158 | AR5K_BCR_BDMAE, AR5K_BSR); | |
159 | break; | |
160 | default: | |
161 | return -EINVAL; | |
162 | } | |
163 | /* Start queue */ | |
164 | ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); | |
165 | ath5k_hw_reg_read(ah, AR5K_CR); | |
166 | } else { | |
167 | /* Return if queue is disabled */ | |
168 | if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) | |
169 | return -EIO; | |
170 | ||
171 | /* Start queue */ | |
172 | AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); | |
173 | } | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
178 | /** | |
c47faa36 | 179 | * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue |
c6e387a2 NK |
180 | * @ah: The &struct ath5k_hw |
181 | * @queue: The hw queue number | |
182 | * | |
183 | * Stop DMA transmit on a specific hw queue and drain queue so we don't | |
184 | * have any pending frames. Returns -EBUSY if we still have pending frames, | |
d41174fa | 185 | * -EINVAL if queue number is out of range or inactive. |
c6e387a2 | 186 | */ |
c47faa36 NK |
187 | static int |
188 | ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) | |
c6e387a2 | 189 | { |
509a106e | 190 | unsigned int i = 40; |
c6e387a2 NK |
191 | u32 tx_queue, pending; |
192 | ||
c6e387a2 NK |
193 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); |
194 | ||
195 | /* Return if queue is declared inactive */ | |
196 | if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) | |
d41174fa | 197 | return -EINVAL; |
c6e387a2 NK |
198 | |
199 | if (ah->ah_version == AR5K_AR5210) { | |
200 | tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); | |
201 | ||
202 | /* | |
203 | * Set by queue type | |
204 | */ | |
205 | switch (ah->ah_txq[queue].tqi_type) { | |
206 | case AR5K_TX_QUEUE_DATA: | |
207 | tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; | |
208 | break; | |
209 | case AR5K_TX_QUEUE_BEACON: | |
210 | case AR5K_TX_QUEUE_CAB: | |
211 | /* XXX Fix me... */ | |
212 | tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; | |
213 | ath5k_hw_reg_write(ah, 0, AR5K_BSR); | |
214 | break; | |
215 | default: | |
216 | return -EINVAL; | |
217 | } | |
218 | ||
219 | /* Stop queue */ | |
220 | ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); | |
221 | ath5k_hw_reg_read(ah, AR5K_CR); | |
222 | } else { | |
f7317ba2 NK |
223 | |
224 | /* | |
225 | * Enable DCU early termination to quickly | |
226 | * flush any pending frames from QCU | |
227 | */ | |
228 | AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), | |
229 | AR5K_QCU_MISC_DCU_EARLY); | |
230 | ||
c6e387a2 NK |
231 | /* |
232 | * Schedule TX disable and wait until queue is empty | |
233 | */ | |
234 | AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); | |
235 | ||
b3a28e68 NK |
236 | /* Wait for queue to stop */ |
237 | for (i = 1000; i > 0 && | |
238 | (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); | |
239 | i--) | |
240 | udelay(100); | |
241 | ||
242 | if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) | |
e0d687bd | 243 | ATH5K_DBG(ah, ATH5K_DEBUG_DMA, |
b3a28e68 NK |
244 | "queue %i didn't stop !\n", queue); |
245 | ||
246 | /* Check for pending frames */ | |
247 | i = 1000; | |
c6e387a2 NK |
248 | do { |
249 | pending = ath5k_hw_reg_read(ah, | |
250 | AR5K_QUEUE_STATUS(queue)) & | |
251 | AR5K_QCU_STS_FRMPENDCNT; | |
252 | udelay(100); | |
253 | } while (--i && pending); | |
254 | ||
509a106e NK |
255 | /* For 2413+ order PCU to drop packets using |
256 | * QUIET mechanism */ | |
257 | if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && | |
e4bbf2f5 | 258 | pending) { |
509a106e NK |
259 | /* Set periodicity and duration */ |
260 | ath5k_hw_reg_write(ah, | |
261 | AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| | |
262 | AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), | |
263 | AR5K_QUIET_CTL2); | |
264 | ||
265 | /* Enable quiet period for current TSF */ | |
266 | ath5k_hw_reg_write(ah, | |
267 | AR5K_QUIET_CTL1_QT_EN | | |
268 | AR5K_REG_SM(ath5k_hw_reg_read(ah, | |
269 | AR5K_TSF_L32_5211) >> 10, | |
270 | AR5K_QUIET_CTL1_NEXT_QT_TSF), | |
271 | AR5K_QUIET_CTL1); | |
272 | ||
273 | /* Force channel idle high */ | |
274 | AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, | |
eada7cad | 275 | AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); |
509a106e NK |
276 | |
277 | /* Wait a while and disable mechanism */ | |
b3a28e68 | 278 | udelay(400); |
509a106e NK |
279 | AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, |
280 | AR5K_QUIET_CTL1_QT_EN); | |
281 | ||
282 | /* Re-check for pending frames */ | |
b3a28e68 | 283 | i = 100; |
509a106e NK |
284 | do { |
285 | pending = ath5k_hw_reg_read(ah, | |
286 | AR5K_QUEUE_STATUS(queue)) & | |
287 | AR5K_QCU_STS_FRMPENDCNT; | |
288 | udelay(100); | |
289 | } while (--i && pending); | |
290 | ||
291 | AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, | |
eada7cad | 292 | AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); |
b3a28e68 NK |
293 | |
294 | if (pending) | |
e0d687bd | 295 | ATH5K_DBG(ah, ATH5K_DEBUG_DMA, |
b3a28e68 NK |
296 | "quiet mechanism didn't work q:%i !\n", |
297 | queue); | |
509a106e NK |
298 | } |
299 | ||
f7317ba2 NK |
300 | /* |
301 | * Disable DCU early termination | |
302 | */ | |
303 | AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), | |
304 | AR5K_QCU_MISC_DCU_EARLY); | |
305 | ||
c6e387a2 NK |
306 | /* Clear register */ |
307 | ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); | |
b3a28e68 | 308 | if (pending) { |
e0d687bd | 309 | ATH5K_DBG(ah, ATH5K_DEBUG_DMA, |
b3a28e68 NK |
310 | "tx dma didn't stop (q:%i, frm:%i) !\n", |
311 | queue, pending); | |
c6e387a2 | 312 | return -EBUSY; |
b3a28e68 | 313 | } |
c6e387a2 NK |
314 | } |
315 | ||
509a106e | 316 | /* TODO: Check for success on 5210 else return error */ |
c6e387a2 NK |
317 | return 0; |
318 | } | |
319 | ||
14fae2d4 | 320 | /** |
c47faa36 NK |
321 | * ath5k_hw_stop_beacon_queue() - Stop beacon queue |
322 | * @ah: The &struct ath5k_hw | |
323 | * @queue: The queue number | |
14fae2d4 NK |
324 | * |
325 | * Returns -EIO if queue didn't stop | |
326 | */ | |
c47faa36 NK |
327 | int |
328 | ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) | |
14fae2d4 NK |
329 | { |
330 | int ret; | |
331 | ret = ath5k_hw_stop_tx_dma(ah, queue); | |
332 | if (ret) { | |
e0d687bd | 333 | ATH5K_DBG(ah, ATH5K_DEBUG_DMA, |
14fae2d4 NK |
334 | "beacon queue didn't stop !\n"); |
335 | return -EIO; | |
336 | } | |
337 | return 0; | |
338 | } | |
339 | ||
c6e387a2 | 340 | /** |
c47faa36 | 341 | * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue |
c6e387a2 NK |
342 | * @ah: The &struct ath5k_hw |
343 | * @queue: The hw queue number | |
344 | * | |
345 | * Get TX descriptor's address for a specific queue. For 5210 we ignore | |
346 | * the queue number and use tx queue type since we only have 2 queues. | |
347 | * We use TXDP0 for normal data queue and TXDP1 for beacon queue. | |
348 | * For newer chips with QCU/DCU we just read the corresponding TXDP register. | |
349 | * | |
350 | * XXX: Is TXDP read and clear ? | |
351 | */ | |
c47faa36 NK |
352 | u32 |
353 | ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) | |
c6e387a2 NK |
354 | { |
355 | u16 tx_reg; | |
356 | ||
c6e387a2 NK |
357 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); |
358 | ||
359 | /* | |
360 | * Get the transmit queue descriptor pointer from the selected queue | |
361 | */ | |
362 | /*5210 doesn't have QCU*/ | |
363 | if (ah->ah_version == AR5K_AR5210) { | |
364 | switch (ah->ah_txq[queue].tqi_type) { | |
365 | case AR5K_TX_QUEUE_DATA: | |
366 | tx_reg = AR5K_NOQCU_TXDP0; | |
367 | break; | |
368 | case AR5K_TX_QUEUE_BEACON: | |
369 | case AR5K_TX_QUEUE_CAB: | |
370 | tx_reg = AR5K_NOQCU_TXDP1; | |
371 | break; | |
372 | default: | |
373 | return 0xffffffff; | |
374 | } | |
375 | } else { | |
376 | tx_reg = AR5K_QUEUE_TXDP(queue); | |
377 | } | |
378 | ||
379 | return ath5k_hw_reg_read(ah, tx_reg); | |
380 | } | |
381 | ||
382 | /** | |
c47faa36 | 383 | * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue |
c6e387a2 NK |
384 | * @ah: The &struct ath5k_hw |
385 | * @queue: The hw queue number | |
c47faa36 | 386 | * @phys_addr: The physical address |
c6e387a2 NK |
387 | * |
388 | * Set TX descriptor's address for a specific queue. For 5210 we ignore | |
389 | * the queue number and we use tx queue type since we only have 2 queues | |
390 | * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. | |
391 | * For newer chips with QCU/DCU we just set the corresponding TXDP register. | |
392 | * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still | |
393 | * active. | |
394 | */ | |
c47faa36 NK |
395 | int |
396 | ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) | |
c6e387a2 NK |
397 | { |
398 | u16 tx_reg; | |
399 | ||
c6e387a2 NK |
400 | AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); |
401 | ||
402 | /* | |
403 | * Set the transmit queue descriptor pointer register by type | |
404 | * on 5210 | |
405 | */ | |
406 | if (ah->ah_version == AR5K_AR5210) { | |
407 | switch (ah->ah_txq[queue].tqi_type) { | |
408 | case AR5K_TX_QUEUE_DATA: | |
409 | tx_reg = AR5K_NOQCU_TXDP0; | |
410 | break; | |
411 | case AR5K_TX_QUEUE_BEACON: | |
412 | case AR5K_TX_QUEUE_CAB: | |
413 | tx_reg = AR5K_NOQCU_TXDP1; | |
414 | break; | |
415 | default: | |
416 | return -EINVAL; | |
417 | } | |
418 | } else { | |
419 | /* | |
420 | * Set the transmit queue descriptor pointer for | |
421 | * the selected queue on QCU for 5211+ | |
422 | * (this won't work if the queue is still active) | |
423 | */ | |
424 | if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) | |
425 | return -EIO; | |
426 | ||
427 | tx_reg = AR5K_QUEUE_TXDP(queue); | |
428 | } | |
429 | ||
430 | /* Set descriptor pointer */ | |
431 | ath5k_hw_reg_write(ah, phys_addr, tx_reg); | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
436 | /** | |
c47faa36 | 437 | * ath5k_hw_update_tx_triglevel() - Update tx trigger level |
c6e387a2 NK |
438 | * @ah: The &struct ath5k_hw |
439 | * @increase: Flag to force increase of trigger level | |
440 | * | |
441 | * This function increases/decreases the tx trigger level for the tx fifo | |
442 | * buffer (aka FIFO threshold) that is used to indicate when PCU flushes | |
a180a130 | 443 | * the buffer and transmits its data. Lowering this results sending small |
c6e387a2 | 444 | * frames more quickly but can lead to tx underruns, raising it a lot can |
c47faa36 NK |
445 | * result other problems. Right now we start with the lowest possible |
446 | * (64Bytes) and if we get tx underrun we increase it using the increase | |
447 | * flag. Returns -EIO if we have reached maximum/minimum. | |
c6e387a2 NK |
448 | * |
449 | * XXX: Link this with tx DMA size ? | |
c47faa36 | 450 | * XXX2: Use it to save interrupts ? |
c6e387a2 | 451 | */ |
c47faa36 NK |
452 | int |
453 | ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) | |
c6e387a2 NK |
454 | { |
455 | u32 trigger_level, imr; | |
456 | int ret = -EIO; | |
457 | ||
c6e387a2 NK |
458 | /* |
459 | * Disable interrupts by setting the mask | |
460 | */ | |
461 | imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); | |
462 | ||
463 | trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), | |
464 | AR5K_TXCFG_TXFULL); | |
465 | ||
466 | if (!increase) { | |
467 | if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) | |
468 | goto done; | |
469 | } else | |
470 | trigger_level += | |
471 | ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); | |
472 | ||
473 | /* | |
474 | * Update trigger level on success | |
475 | */ | |
476 | if (ah->ah_version == AR5K_AR5210) | |
477 | ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); | |
478 | else | |
479 | AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, | |
480 | AR5K_TXCFG_TXFULL, trigger_level); | |
481 | ||
482 | ret = 0; | |
483 | ||
484 | done: | |
485 | /* | |
486 | * Restore interrupt mask | |
487 | */ | |
488 | ath5k_hw_set_imr(ah, imr); | |
489 | ||
490 | return ret; | |
491 | } | |
492 | ||
9320b5c4 | 493 | |
c6e387a2 NK |
494 | /*******************\ |
495 | * Interrupt masking * | |
496 | \*******************/ | |
497 | ||
498 | /** | |
c47faa36 | 499 | * ath5k_hw_is_intr_pending() - Check if we have pending interrupts |
c6e387a2 NK |
500 | * @ah: The &struct ath5k_hw |
501 | * | |
502 | * Check if we have pending interrupts to process. Returns 1 if we | |
503 | * have pending interrupts and 0 if we haven't. | |
504 | */ | |
c47faa36 NK |
505 | bool |
506 | ath5k_hw_is_intr_pending(struct ath5k_hw *ah) | |
c6e387a2 | 507 | { |
509a106e | 508 | return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; |
c6e387a2 NK |
509 | } |
510 | ||
511 | /** | |
c47faa36 | 512 | * ath5k_hw_get_isr() - Get interrupt status |
c6e387a2 NK |
513 | * @ah: The @struct ath5k_hw |
514 | * @interrupt_mask: Driver's interrupt mask used to filter out | |
515 | * interrupts in sw. | |
516 | * | |
517 | * This function is used inside our interrupt handler to determine the reason | |
518 | * for the interrupt by reading Primary Interrupt Status Register. Returns an | |
519 | * abstract interrupt status mask which is mostly ISR with some uncommon bits | |
520 | * being mapped on some standard non hw-specific positions | |
521 | * (check out &ath5k_int). | |
522 | * | |
7ff7c82e NK |
523 | * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this |
524 | * function gets called are cleared on return. | |
c6e387a2 | 525 | */ |
c47faa36 NK |
526 | int |
527 | ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) | |
c6e387a2 | 528 | { |
7ff7c82e | 529 | u32 data = 0; |
c6e387a2 | 530 | |
c6e387a2 | 531 | /* |
7ff7c82e NK |
532 | * Read interrupt status from Primary Interrupt |
533 | * Register. | |
534 | * | |
535 | * Note: PISR/SISR Not available on 5210 | |
c6e387a2 NK |
536 | */ |
537 | if (ah->ah_version == AR5K_AR5210) { | |
7ff7c82e NK |
538 | u32 isr = 0; |
539 | isr = ath5k_hw_reg_read(ah, AR5K_ISR); | |
540 | if (unlikely(isr == AR5K_INT_NOCARD)) { | |
541 | *interrupt_mask = isr; | |
c6e387a2 NK |
542 | return -ENODEV; |
543 | } | |
7ff7c82e | 544 | |
c6e387a2 | 545 | /* |
7ff7c82e NK |
546 | * Filter out the non-common bits from the interrupt |
547 | * status. | |
548 | */ | |
549 | *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; | |
550 | ||
551 | /* Hanlde INT_FATAL */ | |
552 | if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT | |
553 | | AR5K_ISR_DPERR))) | |
554 | *interrupt_mask |= AR5K_INT_FATAL; | |
555 | ||
556 | /* | |
557 | * XXX: BMISS interrupts may occur after association. | |
558 | * I found this on 5210 code but it needs testing. If this is | |
559 | * true we should disable them before assoc and re-enable them | |
560 | * after a successful assoc + some jiffies. | |
561 | interrupt_mask &= ~AR5K_INT_BMISS; | |
c6e387a2 | 562 | */ |
7ff7c82e NK |
563 | |
564 | data = isr; | |
565 | } else { | |
566 | u32 pisr = 0; | |
567 | u32 pisr_clear = 0; | |
568 | u32 sisr0 = 0; | |
569 | u32 sisr1 = 0; | |
570 | u32 sisr2 = 0; | |
571 | u32 sisr3 = 0; | |
572 | u32 sisr4 = 0; | |
573 | ||
574 | /* Read PISR and SISRs... */ | |
575 | pisr = ath5k_hw_reg_read(ah, AR5K_PISR); | |
576 | if (unlikely(pisr == AR5K_INT_NOCARD)) { | |
577 | *interrupt_mask = pisr; | |
4c674c60 NK |
578 | return -ENODEV; |
579 | } | |
c6e387a2 | 580 | |
7ff7c82e NK |
581 | sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); |
582 | sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); | |
583 | sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); | |
584 | sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); | |
585 | sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); | |
c6e387a2 | 586 | |
7ff7c82e NK |
587 | /* |
588 | * PISR holds the logical OR of interrupt bits | |
589 | * from SISR registers: | |
590 | * | |
591 | * TXOK and TXDESC -> Logical OR of TXOK and TXDESC | |
592 | * per-queue bits on SISR0 | |
593 | * | |
594 | * TXERR and TXEOL -> Logical OR of TXERR and TXEOL | |
595 | * per-queue bits on SISR1 | |
596 | * | |
597 | * TXURN -> Logical OR of TXURN per-queue bits on SISR2 | |
598 | * | |
599 | * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 | |
600 | * | |
601 | * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC | |
602 | * BCN_TIMEOUT, CAB_TIMEOUT and DTIM | |
603 | * (and TSFOOR ?) bits on SISR2 | |
604 | * | |
605 | * QCBRORN and QCBRURN -> Logical OR of QCBRORN and | |
606 | * QCBRURN per-queue bits on SISR3 | |
607 | * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 | |
608 | * | |
609 | * If we clean these bits on PISR we 'll also clear all | |
610 | * related bits from SISRs, e.g. if we write the TXOK bit on | |
611 | * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK | |
612 | * interrupt got fired for another queue while we were reading | |
613 | * the interrupt registers and we write back the TXOK bit on | |
614 | * PISR we 'll lose it. So make sure that we don't write back | |
615 | * on PISR any bits that come from SISRs. Clearing them from | |
616 | * SISRs will also clear PISR so no need to worry here. | |
617 | */ | |
4c674c60 | 618 | |
cb161cda NK |
619 | /* XXX: There seems to be an issue on some cards |
620 | * with tx interrupt flags not being updated | |
621 | * on PISR despite that all Tx interrupt bits | |
622 | * are cleared on SISRs. Since we handle all | |
623 | * Tx queues all together it shouldn't be an | |
624 | * issue if we clear Tx interrupt flags also | |
625 | * on PISR to avoid that. | |
626 | */ | |
627 | pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) | | |
628 | (pisr & AR5K_INT_TX_ALL); | |
c6e387a2 | 629 | |
7ff7c82e NK |
630 | /* |
631 | * Write to clear them... | |
632 | * Note: This means that each bit we write back | |
633 | * to the registers will get cleared, leaving the | |
634 | * rest unaffected. So this won't affect new interrupts | |
635 | * we didn't catch while reading/processing, we 'll get | |
636 | * them next time get_isr gets called. | |
637 | */ | |
638 | ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); | |
639 | ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); | |
640 | ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); | |
641 | ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); | |
642 | ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); | |
643 | ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); | |
644 | /* Flush previous write */ | |
645 | ath5k_hw_reg_read(ah, AR5K_PISR); | |
c6e387a2 | 646 | |
7ff7c82e NK |
647 | /* |
648 | * Filter out the non-common bits from the interrupt | |
649 | * status. | |
650 | */ | |
651 | *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; | |
652 | ||
653 | ||
654 | /* We treat TXOK,TXDESC, TXERR and TXEOL | |
655 | * the same way (schedule the tx tasklet) | |
656 | * so we track them all together per queue */ | |
657 | if (pisr & AR5K_ISR_TXOK) | |
658 | ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, | |
659 | AR5K_SISR0_QCU_TXOK); | |
660 | ||
661 | if (pisr & AR5K_ISR_TXDESC) | |
662 | ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, | |
663 | AR5K_SISR0_QCU_TXDESC); | |
664 | ||
665 | if (pisr & AR5K_ISR_TXERR) | |
666 | ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, | |
667 | AR5K_SISR1_QCU_TXERR); | |
4c674c60 | 668 | |
7ff7c82e NK |
669 | if (pisr & AR5K_ISR_TXEOL) |
670 | ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, | |
671 | AR5K_SISR1_QCU_TXEOL); | |
672 | ||
73678804 | 673 | /* Currently this is not much useful since we treat |
7ff7c82e NK |
674 | * all queues the same way if we get a TXURN (update |
675 | * tx trigger level) but we might need it later on*/ | |
676 | if (pisr & AR5K_ISR_TXURN) | |
677 | ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2, | |
678 | AR5K_SISR2_QCU_TXURN); | |
679 | ||
680 | /* Misc Beacon related interrupts */ | |
681 | ||
682 | /* For AR5211 */ | |
683 | if (pisr & AR5K_ISR_TIM) | |
4c674c60 NK |
684 | *interrupt_mask |= AR5K_INT_TIM; |
685 | ||
7ff7c82e NK |
686 | /* For AR5212+ */ |
687 | if (pisr & AR5K_ISR_BCNMISC) { | |
4c674c60 NK |
688 | if (sisr2 & AR5K_SISR2_TIM) |
689 | *interrupt_mask |= AR5K_INT_TIM; | |
690 | if (sisr2 & AR5K_SISR2_DTIM) | |
691 | *interrupt_mask |= AR5K_INT_DTIM; | |
692 | if (sisr2 & AR5K_SISR2_DTIM_SYNC) | |
693 | *interrupt_mask |= AR5K_INT_DTIM_SYNC; | |
694 | if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) | |
695 | *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; | |
696 | if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) | |
697 | *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; | |
698 | } | |
699 | ||
7ff7c82e NK |
700 | /* Below interrupts are unlikely to happen */ |
701 | ||
702 | /* HIU = Host Interface Unit (PCI etc) | |
703 | * Can be one of MCABT, SSERR, DPERR from SISR2 */ | |
704 | if (unlikely(pisr & (AR5K_ISR_HIUERR))) | |
705 | *interrupt_mask |= AR5K_INT_FATAL; | |
706 | ||
7ff7c82e NK |
707 | /*Beacon Not Ready*/ |
708 | if (unlikely(pisr & (AR5K_ISR_BNR))) | |
709 | *interrupt_mask |= AR5K_INT_BNR; | |
710 | ||
34ce644a | 711 | /* A queue got CBR overrun */ |
7ff7c82e | 712 | if (unlikely(pisr & (AR5K_ISR_QCBRORN))) { |
4c674c60 | 713 | *interrupt_mask |= AR5K_INT_QCBRORN; |
7ff7c82e NK |
714 | ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3, |
715 | AR5K_SISR3_QCBRORN); | |
4c674c60 | 716 | } |
7ff7c82e | 717 | |
34ce644a | 718 | /* A queue got CBR underrun */ |
7ff7c82e | 719 | if (unlikely(pisr & (AR5K_ISR_QCBRURN))) { |
4c674c60 | 720 | *interrupt_mask |= AR5K_INT_QCBRURN; |
7ff7c82e NK |
721 | ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3, |
722 | AR5K_SISR3_QCBRURN); | |
4c674c60 | 723 | } |
7ff7c82e | 724 | |
34ce644a | 725 | /* A queue got triggered */ |
7ff7c82e | 726 | if (unlikely(pisr & (AR5K_ISR_QTRIG))) { |
4c674c60 | 727 | *interrupt_mask |= AR5K_INT_QTRIG; |
7ff7c82e NK |
728 | ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4, |
729 | AR5K_SISR4_QTRIG); | |
4c674c60 NK |
730 | } |
731 | ||
7ff7c82e | 732 | data = pisr; |
4c674c60 | 733 | } |
c6e387a2 NK |
734 | |
735 | /* | |
736 | * In case we didn't handle anything, | |
737 | * print the register value. | |
738 | */ | |
739 | if (unlikely(*interrupt_mask == 0 && net_ratelimit())) | |
4c674c60 | 740 | ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); |
c6e387a2 NK |
741 | |
742 | return 0; | |
743 | } | |
744 | ||
745 | /** | |
c47faa36 | 746 | * ath5k_hw_set_imr() - Set interrupt mask |
c6e387a2 NK |
747 | * @ah: The &struct ath5k_hw |
748 | * @new_mask: The new interrupt mask to be set | |
749 | * | |
750 | * Set the interrupt mask in hw to save interrupts. We do that by mapping | |
751 | * ath5k_int bits to hw-specific bits to remove abstraction and writing | |
752 | * Interrupt Mask Register. | |
753 | */ | |
c47faa36 NK |
754 | enum ath5k_int |
755 | ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) | |
c6e387a2 NK |
756 | { |
757 | enum ath5k_int old_mask, int_mask; | |
758 | ||
4c674c60 NK |
759 | old_mask = ah->ah_imr; |
760 | ||
c6e387a2 NK |
761 | /* |
762 | * Disable card interrupts to prevent any race conditions | |
4c674c60 NK |
763 | * (they will be re-enabled afterwards if AR5K_INT GLOBAL |
764 | * is set again on the new mask). | |
c6e387a2 | 765 | */ |
4c674c60 NK |
766 | if (old_mask & AR5K_INT_GLOBAL) { |
767 | ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); | |
768 | ath5k_hw_reg_read(ah, AR5K_IER); | |
769 | } | |
c6e387a2 NK |
770 | |
771 | /* | |
772 | * Add additional, chipset-dependent interrupt mask flags | |
773 | * and write them to the IMR (interrupt mask register). | |
774 | */ | |
775 | int_mask = new_mask & AR5K_INT_COMMON; | |
776 | ||
c6e387a2 | 777 | if (ah->ah_version != AR5K_AR5210) { |
4c674c60 NK |
778 | /* Preserve per queue TXURN interrupt mask */ |
779 | u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) | |
780 | & AR5K_SIMR2_QCU_TXURN; | |
781 | ||
34ce644a | 782 | /* Fatal interrupt abstraction for 5211+ */ |
c6e387a2 NK |
783 | if (new_mask & AR5K_INT_FATAL) { |
784 | int_mask |= AR5K_IMR_HIUERR; | |
4c674c60 NK |
785 | simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR |
786 | | AR5K_SIMR2_DPERR); | |
c6e387a2 | 787 | } |
4c674c60 | 788 | |
34ce644a | 789 | /* Misc beacon related interrupts */ |
4c674c60 NK |
790 | if (new_mask & AR5K_INT_TIM) |
791 | int_mask |= AR5K_IMR_TIM; | |
792 | ||
793 | if (new_mask & AR5K_INT_TIM) | |
794 | simr2 |= AR5K_SISR2_TIM; | |
795 | if (new_mask & AR5K_INT_DTIM) | |
796 | simr2 |= AR5K_SISR2_DTIM; | |
797 | if (new_mask & AR5K_INT_DTIM_SYNC) | |
798 | simr2 |= AR5K_SISR2_DTIM_SYNC; | |
799 | if (new_mask & AR5K_INT_BCN_TIMEOUT) | |
800 | simr2 |= AR5K_SISR2_BCN_TIMEOUT; | |
801 | if (new_mask & AR5K_INT_CAB_TIMEOUT) | |
802 | simr2 |= AR5K_SISR2_CAB_TIMEOUT; | |
803 | ||
34ce644a NK |
804 | /*Beacon Not Ready*/ |
805 | if (new_mask & AR5K_INT_BNR) | |
806 | int_mask |= AR5K_INT_BNR; | |
807 | ||
4c674c60 | 808 | /* Note: Per queue interrupt masks |
6a2a0e73 | 809 | * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ |
4c674c60 NK |
810 | ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); |
811 | ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); | |
812 | ||
813 | } else { | |
34ce644a | 814 | /* Fatal interrupt abstraction for 5210 */ |
4c674c60 NK |
815 | if (new_mask & AR5K_INT_FATAL) |
816 | int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT | |
817 | | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); | |
818 | ||
34ce644a | 819 | /* Only common interrupts left for 5210 (no SIMRs) */ |
4c674c60 | 820 | ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); |
c6e387a2 NK |
821 | } |
822 | ||
4c674c60 NK |
823 | /* If RXNOFRM interrupt is masked disable it |
824 | * by setting AR5K_RXNOFRM to zero */ | |
825 | if (!(new_mask & AR5K_INT_RXNOFRM)) | |
826 | ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); | |
c6e387a2 NK |
827 | |
828 | /* Store new interrupt mask */ | |
829 | ah->ah_imr = new_mask; | |
830 | ||
4c674c60 NK |
831 | /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ |
832 | if (new_mask & AR5K_INT_GLOBAL) { | |
833 | ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); | |
834 | ath5k_hw_reg_read(ah, AR5K_IER); | |
835 | } | |
c6e387a2 NK |
836 | |
837 | return old_mask; | |
838 | } | |
839 | ||
9320b5c4 NK |
840 | |
841 | /********************\ | |
842 | Init/Stop functions | |
843 | \********************/ | |
844 | ||
845 | /** | |
c47faa36 | 846 | * ath5k_hw_dma_init() - Initialize DMA unit |
9320b5c4 NK |
847 | * @ah: The &struct ath5k_hw |
848 | * | |
849 | * Set DMA size and pre-enable interrupts | |
850 | * (driver handles tx/rx buffer setup and | |
851 | * dma start/stop) | |
852 | * | |
853 | * XXX: Save/restore RXDP/TXDP registers ? | |
854 | */ | |
c47faa36 NK |
855 | void |
856 | ath5k_hw_dma_init(struct ath5k_hw *ah) | |
9320b5c4 NK |
857 | { |
858 | /* | |
859 | * Set Rx/Tx DMA Configuration | |
860 | * | |
861 | * Set standard DMA size (128). Note that | |
862 | * a DMA size of 512 causes rx overruns and tx errors | |
863 | * on pci-e cards (tested on 5424 but since rx overruns | |
864 | * also occur on 5416/5418 with madwifi we set 128 | |
865 | * for all PCI-E cards to be safe). | |
866 | * | |
867 | * XXX: need to check 5210 for this | |
6a2a0e73 | 868 | * TODO: Check out tx trigger level, it's always 64 on dumps but I |
9320b5c4 NK |
869 | * guess we can tweak it and see how it goes ;-) |
870 | */ | |
871 | if (ah->ah_version != AR5K_AR5210) { | |
872 | AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, | |
873 | AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); | |
874 | AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, | |
875 | AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); | |
876 | } | |
877 | ||
878 | /* Pre-enable interrupts on 5211/5212*/ | |
879 | if (ah->ah_version != AR5K_AR5210) | |
880 | ath5k_hw_set_imr(ah, ah->ah_imr); | |
881 | ||
882 | } | |
d41174fa NK |
883 | |
884 | /** | |
c47faa36 | 885 | * ath5k_hw_dma_stop() - stop DMA unit |
d41174fa NK |
886 | * @ah: The &struct ath5k_hw |
887 | * | |
888 | * Stop tx/rx DMA and interrupts. Returns | |
889 | * -EBUSY if tx or rx dma failed to stop. | |
890 | * | |
891 | * XXX: Sometimes DMA unit hangs and we have | |
892 | * stuck frames on tx queues, only a reset | |
893 | * can fix that. | |
894 | */ | |
c47faa36 NK |
895 | int |
896 | ath5k_hw_dma_stop(struct ath5k_hw *ah) | |
d41174fa NK |
897 | { |
898 | int i, qmax, err; | |
899 | err = 0; | |
900 | ||
901 | /* Disable interrupts */ | |
902 | ath5k_hw_set_imr(ah, 0); | |
903 | ||
904 | /* Stop rx dma */ | |
905 | err = ath5k_hw_stop_rx_dma(ah); | |
906 | if (err) | |
907 | return err; | |
908 | ||
909 | /* Clear any pending interrupts | |
910 | * and disable tx dma */ | |
911 | if (ah->ah_version != AR5K_AR5210) { | |
912 | ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); | |
913 | qmax = AR5K_NUM_TX_QUEUES; | |
914 | } else { | |
915 | /* PISR/SISR Not available on 5210 */ | |
916 | ath5k_hw_reg_read(ah, AR5K_ISR); | |
917 | qmax = AR5K_NUM_TX_QUEUES_NOQCU; | |
918 | } | |
919 | ||
920 | for (i = 0; i < qmax; i++) { | |
921 | err = ath5k_hw_stop_tx_dma(ah, i); | |
922 | /* -EINVAL -> queue inactive */ | |
15411c27 | 923 | if (err && err != -EINVAL) |
d41174fa NK |
924 | return err; |
925 | } | |
926 | ||
15411c27 | 927 | return 0; |
d41174fa | 928 | } |