]>
Commit | Line | Data |
---|---|---|
ab697a9f EG |
1 | /****************************************************************************** |
2 | * | |
51368bf7 | 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
26d535ae | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
eda50cde | 5 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
a8cbb46f | 6 | * Copyright(c) 2018 Intel Corporation |
ab697a9f EG |
7 | * |
8 | * Portions of this file are derived from the ipw3945 project, as well | |
9 | * as portions of the ieee80211 subsystem header files. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify it | |
12 | * under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
18 | * more details. | |
19 | * | |
ab697a9f EG |
20 | * The full GNU General Public License is included in this distribution in the |
21 | * file called LICENSE. | |
22 | * | |
23 | * Contact Information: | |
d01c5366 | 24 | * Intel Linux Wireless <linuxwifi@intel.com> |
ab697a9f EG |
25 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | * | |
27 | *****************************************************************************/ | |
28 | #include <linux/sched.h> | |
29 | #include <linux/wait.h> | |
1a361cd8 | 30 | #include <linux/gfp.h> |
ab697a9f | 31 | |
1b29dc94 | 32 | #include "iwl-prph.h" |
ab697a9f | 33 | #include "iwl-io.h" |
6468a01a | 34 | #include "internal.h" |
db70f290 | 35 | #include "iwl-op-mode.h" |
9b58419e | 36 | #include "iwl-context-info-gen3.h" |
ab697a9f EG |
37 | |
38 | /****************************************************************************** | |
39 | * | |
40 | * RX path functions | |
41 | * | |
42 | ******************************************************************************/ | |
43 | ||
44 | /* | |
45 | * Rx theory of operation | |
46 | * | |
47 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | |
48 | * each of which point to Receive Buffers to be filled by the NIC. These get | |
49 | * used not only for Rx frames, but for any command response or notification | |
50 | * from the NIC. The driver and NIC manage the Rx buffers by means | |
51 | * of indexes into the circular buffer. | |
52 | * | |
53 | * Rx Queue Indexes | |
54 | * The host/firmware share two index registers for managing the Rx buffers. | |
55 | * | |
56 | * The READ index maps to the first position that the firmware may be writing | |
57 | * to -- the driver can read up to (but not including) this position and get | |
58 | * good data. | |
59 | * The READ index is managed by the firmware once the card is enabled. | |
60 | * | |
61 | * The WRITE index maps to the last position the driver has read from -- the | |
62 | * position preceding WRITE is the last slot the firmware can place a packet. | |
63 | * | |
64 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | |
65 | * WRITE = READ. | |
66 | * | |
67 | * During initialization, the host sets up the READ queue position to the first | |
68 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | |
69 | * | |
70 | * When the firmware places a packet in a buffer, it will advance the READ index | |
71 | * and fire the RX interrupt. The driver can then query the READ index and | |
72 | * process as many packets as possible, moving the WRITE index forward as it | |
73 | * resets the Rx queue buffers with new memory. | |
74 | * | |
75 | * The management in the driver is as follows: | |
26d535ae SS |
76 | * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. |
77 | * When the interrupt handler is called, the request is processed. | |
78 | * The page is either stolen - transferred to the upper layer | |
79 | * or reused - added immediately to the iwl->rxq->rx_free list. | |
80 | * + When the page is stolen - the driver updates the matching queue's used | |
81 | * count, detaches the RBD and transfers it to the queue used list. | |
82 | * When there are two used RBDs - they are transferred to the allocator empty | |
83 | * list. Work is then scheduled for the allocator to start allocating | |
84 | * eight buffers. | |
85 | * When there are another 6 used RBDs - they are transferred to the allocator | |
86 | * empty list and the driver tries to claim the pre-allocated buffers and | |
87 | * add them to iwl->rxq->rx_free. If it fails - it continues to claim them | |
88 | * until ready. | |
89 | * When there are 8+ buffers in the free list - either from allocation or from | |
90 | * 8 reused unstolen pages - restock is called to update the FW and indexes. | |
91 | * + In order to make sure the allocator always has RBDs to use for allocation | |
92 | * the allocator has initial pool in the size of num_queues*(8-2) - the | |
93 | * maximum missing RBDs per allocation request (request posted with 2 | |
94 | * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). | |
95 | * The queues supplies the recycle of the rest of the RBDs. | |
ab697a9f EG |
96 | * + A received packet is processed and handed to the kernel network stack, |
97 | * detached from the iwl->rxq. The driver 'processed' index is updated. | |
26d535ae | 98 | * + If there are no allocated buffers in iwl->rxq->rx_free, |
2bfb5092 JB |
99 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. |
100 | * If there were enough free buffers and RX_STALLED is set it is cleared. | |
ab697a9f EG |
101 | * |
102 | * | |
103 | * Driver sequence: | |
104 | * | |
990aa6d7 EG |
105 | * iwl_rxq_alloc() Allocates rx_free |
106 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | |
26d535ae SS |
107 | * iwl_pcie_rxq_restock. |
108 | * Used only during initialization. | |
990aa6d7 | 109 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
ab697a9f | 110 | * queue, updates firmware pointers, and updates |
26d535ae SS |
111 | * the WRITE index. |
112 | * iwl_pcie_rx_allocator() Background work for allocating pages. | |
ab697a9f EG |
113 | * |
114 | * -- enable interrupts -- | |
990aa6d7 | 115 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
ab697a9f EG |
116 | * READ INDEX, detaching the SKB from the pool. |
117 | * Moves the packet buffer from queue to rx_used. | |
26d535ae | 118 | * Posts and claims requests to the allocator. |
990aa6d7 | 119 | * Calls iwl_pcie_rxq_restock to refill any empty |
ab697a9f | 120 | * slots. |
26d535ae SS |
121 | * |
122 | * RBD life-cycle: | |
123 | * | |
124 | * Init: | |
125 | * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue | |
126 | * | |
127 | * Regular Receive interrupt: | |
128 | * Page Stolen: | |
129 | * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> | |
130 | * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue | |
131 | * Page not Stolen: | |
132 | * rxq.queue -> rxq.rx_free -> rxq.queue | |
ab697a9f EG |
133 | * ... |
134 | * | |
135 | */ | |
136 | ||
990aa6d7 EG |
137 | /* |
138 | * iwl_rxq_space - Return number of free slots available in queue. | |
ab697a9f | 139 | */ |
fecba09e | 140 | static int iwl_rxq_space(const struct iwl_rxq *rxq) |
ab697a9f | 141 | { |
96a6497b SS |
142 | /* Make sure rx queue size is a power of 2 */ |
143 | WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); | |
fecba09e | 144 | |
351746c9 IY |
145 | /* |
146 | * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity | |
147 | * between empty and completely full queues. | |
148 | * The following is equivalent to modulo by RX_QUEUE_SIZE and is well | |
149 | * defined for negative dividends. | |
150 | */ | |
96a6497b | 151 | return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); |
ab697a9f EG |
152 | } |
153 | ||
9805c446 EG |
154 | /* |
155 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | |
156 | */ | |
157 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |
158 | { | |
159 | return cpu_to_le32((u32)(dma_addr >> 8)); | |
160 | } | |
161 | ||
49bd072d EG |
162 | /* |
163 | * iwl_pcie_rx_stop - stops the Rx DMA | |
164 | */ | |
9805c446 EG |
165 | int iwl_pcie_rx_stop(struct iwl_trans *trans) |
166 | { | |
d0158235 GBA |
167 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { |
168 | /* TODO: remove this for 22560 once fw does it */ | |
169 | iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); | |
170 | return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, | |
171 | RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); | |
172 | } else if (trans->cfg->mq_rx_supported) { | |
d7fdd0e5 SS |
173 | iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); |
174 | return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, | |
175 | RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); | |
176 | } else { | |
177 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | |
178 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | |
179 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, | |
180 | 1000); | |
181 | } | |
9805c446 EG |
182 | } |
183 | ||
990aa6d7 EG |
184 | /* |
185 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | |
ab697a9f | 186 | */ |
78485054 SS |
187 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, |
188 | struct iwl_rxq *rxq) | |
ab697a9f | 189 | { |
ab697a9f EG |
190 | u32 reg; |
191 | ||
5d63f926 | 192 | lockdep_assert_held(&rxq->lock); |
ab697a9f | 193 | |
5045388c EP |
194 | /* |
195 | * explicitly wake up the NIC if: | |
196 | * 1. shadow registers aren't enabled | |
197 | * 2. there is a chance that the NIC is asleep | |
198 | */ | |
199 | if (!trans->cfg->base_params->shadow_reg_enable && | |
200 | test_bit(STATUS_TPOWER_PMI, &trans->status)) { | |
201 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | |
202 | ||
203 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | |
204 | IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", | |
205 | reg); | |
206 | iwl_set_bit(trans, CSR_GP_CNTRL, | |
a8cbb46f | 207 | BIT(trans->cfg->csr->flag_mac_access_req)); |
5d63f926 JB |
208 | rxq->need_update = true; |
209 | return; | |
ab697a9f EG |
210 | } |
211 | } | |
5045388c EP |
212 | |
213 | rxq->write_actual = round_down(rxq->write, 8); | |
1b493e30 GBA |
214 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
215 | iwl_write32(trans, HBUS_TARG_WRPTR, | |
216 | (rxq->write_actual | | |
217 | ((FIRST_RX_QUEUE + rxq->id) << 16))); | |
218 | else if (trans->cfg->mq_rx_supported) | |
1554ed20 SS |
219 | iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), |
220 | rxq->write_actual); | |
1316d595 SS |
221 | else |
222 | iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); | |
5d63f926 JB |
223 | } |
224 | ||
225 | static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) | |
226 | { | |
227 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 228 | int i; |
5d63f926 | 229 | |
78485054 SS |
230 | for (i = 0; i < trans->num_rx_queues; i++) { |
231 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
ab697a9f | 232 | |
78485054 SS |
233 | if (!rxq->need_update) |
234 | continue; | |
235 | spin_lock(&rxq->lock); | |
236 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
237 | rxq->need_update = false; | |
238 | spin_unlock(&rxq->lock); | |
239 | } | |
ab697a9f EG |
240 | } |
241 | ||
0307c839 GBA |
242 | static void iwl_pcie_restock_bd(struct iwl_trans *trans, |
243 | struct iwl_rxq *rxq, | |
244 | struct iwl_rx_mem_buffer *rxb) | |
245 | { | |
246 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { | |
247 | struct iwl_rx_transfer_desc *bd = rxq->bd; | |
248 | ||
249 | bd[rxq->write].type_n_size = | |
250 | cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) | | |
251 | ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK)); | |
252 | bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); | |
253 | bd[rxq->write].rbid = cpu_to_le16(rxb->vid); | |
254 | } else { | |
255 | __le64 *bd = rxq->bd; | |
256 | ||
257 | bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); | |
258 | } | |
259 | } | |
260 | ||
e0e168dc | 261 | /* |
2047fa54 | 262 | * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx |
e0e168dc | 263 | */ |
2047fa54 SS |
264 | static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, |
265 | struct iwl_rxq *rxq) | |
96a6497b SS |
266 | { |
267 | struct iwl_rx_mem_buffer *rxb; | |
268 | ||
269 | /* | |
270 | * If the device isn't enabled - no need to try to add buffers... | |
271 | * This can happen when we stop the device and still have an interrupt | |
272 | * pending. We stop the APM before we sync the interrupts because we | |
273 | * have to (see comment there). On the other hand, since the APM is | |
274 | * stopped, we cannot access the HW (in particular not prph). | |
275 | * So don't try to restock if the APM has been already stopped. | |
276 | */ | |
277 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) | |
278 | return; | |
279 | ||
280 | spin_lock(&rxq->lock); | |
281 | while (rxq->free_count) { | |
96a6497b SS |
282 | /* Get next free Rx buffer, remove from free list */ |
283 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, | |
284 | list); | |
285 | list_del(&rxb->list); | |
b1753c62 | 286 | rxb->invalid = false; |
96a6497b SS |
287 | /* 12 first bits are expected to be empty */ |
288 | WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); | |
289 | /* Point to Rx buffer via next RBD in circular buffer */ | |
0307c839 | 290 | iwl_pcie_restock_bd(trans, rxq, rxb); |
96a6497b SS |
291 | rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; |
292 | rxq->free_count--; | |
293 | } | |
294 | spin_unlock(&rxq->lock); | |
295 | ||
296 | /* | |
297 | * If we've added more space for the firmware to place data, tell it. | |
298 | * Increment device's write pointer in multiples of 8. | |
299 | */ | |
300 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
301 | spin_lock(&rxq->lock); | |
302 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | |
303 | spin_unlock(&rxq->lock); | |
304 | } | |
305 | } | |
306 | ||
990aa6d7 | 307 | /* |
2047fa54 | 308 | * iwl_pcie_rxsq_restock - restock implementation for single queue rx |
ab697a9f | 309 | */ |
2047fa54 SS |
310 | static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, |
311 | struct iwl_rxq *rxq) | |
ab697a9f | 312 | { |
ab697a9f | 313 | struct iwl_rx_mem_buffer *rxb; |
ab697a9f | 314 | |
7439046d EG |
315 | /* |
316 | * If the device isn't enabled - not need to try to add buffers... | |
317 | * This can happen when we stop the device and still have an interrupt | |
2bfb5092 JB |
318 | * pending. We stop the APM before we sync the interrupts because we |
319 | * have to (see comment there). On the other hand, since the APM is | |
320 | * stopped, we cannot access the HW (in particular not prph). | |
7439046d EG |
321 | * So don't try to restock if the APM has been already stopped. |
322 | */ | |
eb7ff77e | 323 | if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) |
7439046d EG |
324 | return; |
325 | ||
51232f7e | 326 | spin_lock(&rxq->lock); |
990aa6d7 | 327 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
96a6497b | 328 | __le32 *bd = (__le32 *)rxq->bd; |
ab697a9f EG |
329 | /* The overwritten rxb must be a used one */ |
330 | rxb = rxq->queue[rxq->write]; | |
331 | BUG_ON(rxb && rxb->page); | |
332 | ||
333 | /* Get next free Rx buffer, remove from free list */ | |
e2b1930e JB |
334 | rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, |
335 | list); | |
336 | list_del(&rxb->list); | |
b1753c62 | 337 | rxb->invalid = false; |
ab697a9f EG |
338 | |
339 | /* Point to Rx buffer via next RBD in circular buffer */ | |
96a6497b | 340 | bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
ab697a9f EG |
341 | rxq->queue[rxq->write] = rxb; |
342 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | |
343 | rxq->free_count--; | |
344 | } | |
51232f7e | 345 | spin_unlock(&rxq->lock); |
ab697a9f | 346 | |
ab697a9f EG |
347 | /* If we've added more space for the firmware to place data, tell it. |
348 | * Increment device's write pointer in multiples of 8. */ | |
349 | if (rxq->write_actual != (rxq->write & ~0x7)) { | |
51232f7e | 350 | spin_lock(&rxq->lock); |
78485054 | 351 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
51232f7e | 352 | spin_unlock(&rxq->lock); |
ab697a9f EG |
353 | } |
354 | } | |
355 | ||
e0e168dc GG |
356 | /* |
357 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool | |
358 | * | |
359 | * If there are slots in the RX queue that need to be restocked, | |
360 | * and we have free pre-allocated buffers, fill the ranks as much | |
361 | * as we can, pulling from rx_free. | |
362 | * | |
363 | * This moves the 'write' index forward to catch up with 'processed', and | |
364 | * also updates the memory address in the firmware to reference the new | |
365 | * target buffer. | |
366 | */ | |
367 | static | |
368 | void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) | |
369 | { | |
370 | if (trans->cfg->mq_rx_supported) | |
2047fa54 | 371 | iwl_pcie_rxmq_restock(trans, rxq); |
e0e168dc | 372 | else |
2047fa54 | 373 | iwl_pcie_rxsq_restock(trans, rxq); |
e0e168dc GG |
374 | } |
375 | ||
26d535ae SS |
376 | /* |
377 | * iwl_pcie_rx_alloc_page - allocates and returns a page. | |
378 | * | |
379 | */ | |
380 | static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, | |
381 | gfp_t priority) | |
382 | { | |
383 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae SS |
384 | struct page *page; |
385 | gfp_t gfp_mask = priority; | |
386 | ||
26d535ae SS |
387 | if (trans_pcie->rx_page_order > 0) |
388 | gfp_mask |= __GFP_COMP; | |
389 | ||
390 | /* Alloc a new receive buffer */ | |
391 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); | |
392 | if (!page) { | |
393 | if (net_ratelimit()) | |
394 | IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", | |
395 | trans_pcie->rx_page_order); | |
78485054 SS |
396 | /* |
397 | * Issue an error if we don't have enough pre-allocated | |
398 | * buffers. | |
26d535ae | 399 | ` */ |
78485054 | 400 | if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) |
26d535ae | 401 | IWL_CRIT(trans, |
78485054 | 402 | "Failed to alloc_pages\n"); |
26d535ae SS |
403 | return NULL; |
404 | } | |
405 | return page; | |
406 | } | |
407 | ||
358a46d4 | 408 | /* |
9805c446 | 409 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
ab697a9f | 410 | * |
358a46d4 EG |
411 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
412 | * a page must be allocated and the RBD must point to the page. This function | |
413 | * doesn't change the HW pointer but handles the list of pages that is used by | |
990aa6d7 | 414 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
358a46d4 | 415 | * allocated buffers. |
ab697a9f | 416 | */ |
ff932f61 GBA |
417 | void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, |
418 | struct iwl_rxq *rxq) | |
ab697a9f | 419 | { |
20d3b647 | 420 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
ab697a9f EG |
421 | struct iwl_rx_mem_buffer *rxb; |
422 | struct page *page; | |
ab697a9f EG |
423 | |
424 | while (1) { | |
51232f7e | 425 | spin_lock(&rxq->lock); |
ab697a9f | 426 | if (list_empty(&rxq->rx_used)) { |
51232f7e | 427 | spin_unlock(&rxq->lock); |
ab697a9f EG |
428 | return; |
429 | } | |
51232f7e | 430 | spin_unlock(&rxq->lock); |
ab697a9f | 431 | |
ab697a9f | 432 | /* Alloc a new receive buffer */ |
26d535ae SS |
433 | page = iwl_pcie_rx_alloc_page(trans, priority); |
434 | if (!page) | |
ab697a9f | 435 | return; |
ab697a9f | 436 | |
51232f7e | 437 | spin_lock(&rxq->lock); |
ab697a9f EG |
438 | |
439 | if (list_empty(&rxq->rx_used)) { | |
51232f7e | 440 | spin_unlock(&rxq->lock); |
b2cf410c | 441 | __free_pages(page, trans_pcie->rx_page_order); |
ab697a9f EG |
442 | return; |
443 | } | |
e2b1930e JB |
444 | rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, |
445 | list); | |
446 | list_del(&rxb->list); | |
51232f7e | 447 | spin_unlock(&rxq->lock); |
ab697a9f EG |
448 | |
449 | BUG_ON(rxb->page); | |
450 | rxb->page = page; | |
451 | /* Get physical address of the RB */ | |
20d3b647 JB |
452 | rxb->page_dma = |
453 | dma_map_page(trans->dev, page, 0, | |
454 | PAGE_SIZE << trans_pcie->rx_page_order, | |
455 | DMA_FROM_DEVICE); | |
7c341582 JB |
456 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
457 | rxb->page = NULL; | |
51232f7e | 458 | spin_lock(&rxq->lock); |
7c341582 | 459 | list_add(&rxb->list, &rxq->rx_used); |
51232f7e | 460 | spin_unlock(&rxq->lock); |
7c341582 JB |
461 | __free_pages(page, trans_pcie->rx_page_order); |
462 | return; | |
463 | } | |
ab697a9f | 464 | |
51232f7e | 465 | spin_lock(&rxq->lock); |
ab697a9f EG |
466 | |
467 | list_add_tail(&rxb->list, &rxq->rx_free); | |
468 | rxq->free_count++; | |
469 | ||
51232f7e | 470 | spin_unlock(&rxq->lock); |
ab697a9f EG |
471 | } |
472 | } | |
473 | ||
ff932f61 | 474 | void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) |
9805c446 EG |
475 | { |
476 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
9805c446 EG |
477 | int i; |
478 | ||
7b542436 | 479 | for (i = 0; i < RX_POOL_SIZE; i++) { |
78485054 | 480 | if (!trans_pcie->rx_pool[i].page) |
c7df1f4b | 481 | continue; |
78485054 | 482 | dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, |
c7df1f4b JB |
483 | PAGE_SIZE << trans_pcie->rx_page_order, |
484 | DMA_FROM_DEVICE); | |
78485054 SS |
485 | __free_pages(trans_pcie->rx_pool[i].page, |
486 | trans_pcie->rx_page_order); | |
487 | trans_pcie->rx_pool[i].page = NULL; | |
9805c446 EG |
488 | } |
489 | } | |
490 | ||
26d535ae SS |
491 | /* |
492 | * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues | |
493 | * | |
494 | * Allocates for each received request 8 pages | |
495 | * Called as a scheduled work item. | |
496 | */ | |
497 | static void iwl_pcie_rx_allocator(struct iwl_trans *trans) | |
498 | { | |
499 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
500 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
501 | struct list_head local_empty; | |
502 | int pending = atomic_xchg(&rba->req_pending, 0); | |
503 | ||
504 | IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); | |
505 | ||
506 | /* If we were scheduled - there is at least one request */ | |
507 | spin_lock(&rba->lock); | |
508 | /* swap out the rba->rbd_empty to a local list */ | |
509 | list_replace_init(&rba->rbd_empty, &local_empty); | |
510 | spin_unlock(&rba->lock); | |
511 | ||
512 | while (pending) { | |
513 | int i; | |
0979a913 | 514 | LIST_HEAD(local_allocated); |
78485054 SS |
515 | gfp_t gfp_mask = GFP_KERNEL; |
516 | ||
517 | /* Do not post a warning if there are only a few requests */ | |
518 | if (pending < RX_PENDING_WATERMARK) | |
519 | gfp_mask |= __GFP_NOWARN; | |
26d535ae | 520 | |
26d535ae SS |
521 | for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { |
522 | struct iwl_rx_mem_buffer *rxb; | |
523 | struct page *page; | |
524 | ||
525 | /* List should never be empty - each reused RBD is | |
526 | * returned to the list, and initial pool covers any | |
527 | * possible gap between the time the page is allocated | |
528 | * to the time the RBD is added. | |
529 | */ | |
530 | BUG_ON(list_empty(&local_empty)); | |
531 | /* Get the first rxb from the rbd list */ | |
532 | rxb = list_first_entry(&local_empty, | |
533 | struct iwl_rx_mem_buffer, list); | |
534 | BUG_ON(rxb->page); | |
535 | ||
536 | /* Alloc a new receive buffer */ | |
78485054 | 537 | page = iwl_pcie_rx_alloc_page(trans, gfp_mask); |
26d535ae SS |
538 | if (!page) |
539 | continue; | |
540 | rxb->page = page; | |
541 | ||
542 | /* Get physical address of the RB */ | |
543 | rxb->page_dma = dma_map_page(trans->dev, page, 0, | |
544 | PAGE_SIZE << trans_pcie->rx_page_order, | |
545 | DMA_FROM_DEVICE); | |
546 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | |
547 | rxb->page = NULL; | |
548 | __free_pages(page, trans_pcie->rx_page_order); | |
549 | continue; | |
550 | } | |
26d535ae SS |
551 | |
552 | /* move the allocated entry to the out list */ | |
553 | list_move(&rxb->list, &local_allocated); | |
554 | i++; | |
555 | } | |
556 | ||
557 | pending--; | |
558 | if (!pending) { | |
559 | pending = atomic_xchg(&rba->req_pending, 0); | |
560 | IWL_DEBUG_RX(trans, | |
561 | "Pending allocation requests = %d\n", | |
562 | pending); | |
563 | } | |
564 | ||
565 | spin_lock(&rba->lock); | |
566 | /* add the allocated rbds to the allocator allocated list */ | |
567 | list_splice_tail(&local_allocated, &rba->rbd_allocated); | |
568 | /* get more empty RBDs for current pending requests */ | |
569 | list_splice_tail_init(&rba->rbd_empty, &local_empty); | |
570 | spin_unlock(&rba->lock); | |
571 | ||
572 | atomic_inc(&rba->req_ready); | |
573 | } | |
574 | ||
575 | spin_lock(&rba->lock); | |
576 | /* return unused rbds to the allocator empty list */ | |
577 | list_splice_tail(&local_empty, &rba->rbd_empty); | |
578 | spin_unlock(&rba->lock); | |
579 | } | |
580 | ||
581 | /* | |
d56daea4 | 582 | * iwl_pcie_rx_allocator_get - returns the pre-allocated pages |
26d535ae SS |
583 | .* |
584 | .* Called by queue when the queue posted allocation request and | |
585 | * has freed 8 RBDs in order to restock itself. | |
d56daea4 SS |
586 | * This function directly moves the allocated RBs to the queue's ownership |
587 | * and updates the relevant counters. | |
26d535ae | 588 | */ |
d56daea4 SS |
589 | static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, |
590 | struct iwl_rxq *rxq) | |
26d535ae SS |
591 | { |
592 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
593 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
594 | int i; | |
595 | ||
d56daea4 SS |
596 | lockdep_assert_held(&rxq->lock); |
597 | ||
26d535ae SS |
598 | /* |
599 | * atomic_dec_if_positive returns req_ready - 1 for any scenario. | |
600 | * If req_ready is 0 atomic_dec_if_positive will return -1 and this | |
d56daea4 | 601 | * function will return early, as there are no ready requests. |
26d535ae SS |
602 | * atomic_dec_if_positive will perofrm the *actual* decrement only if |
603 | * req_ready > 0, i.e. - there are ready requests and the function | |
604 | * hands one request to the caller. | |
605 | */ | |
606 | if (atomic_dec_if_positive(&rba->req_ready) < 0) | |
d56daea4 | 607 | return; |
26d535ae SS |
608 | |
609 | spin_lock(&rba->lock); | |
610 | for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { | |
611 | /* Get next free Rx buffer, remove it from free list */ | |
d56daea4 SS |
612 | struct iwl_rx_mem_buffer *rxb = |
613 | list_first_entry(&rba->rbd_allocated, | |
614 | struct iwl_rx_mem_buffer, list); | |
615 | ||
616 | list_move(&rxb->list, &rxq->rx_free); | |
26d535ae SS |
617 | } |
618 | spin_unlock(&rba->lock); | |
619 | ||
d56daea4 SS |
620 | rxq->used_count -= RX_CLAIM_REQ_ALLOC; |
621 | rxq->free_count += RX_CLAIM_REQ_ALLOC; | |
26d535ae SS |
622 | } |
623 | ||
10a54d81 | 624 | void iwl_pcie_rx_allocator_work(struct work_struct *data) |
ab697a9f | 625 | { |
26d535ae SS |
626 | struct iwl_rb_allocator *rba_p = |
627 | container_of(data, struct iwl_rb_allocator, rx_alloc); | |
5a878bf6 | 628 | struct iwl_trans_pcie *trans_pcie = |
26d535ae | 629 | container_of(rba_p, struct iwl_trans_pcie, rba); |
ab697a9f | 630 | |
26d535ae | 631 | iwl_pcie_rx_allocator(trans_pcie->trans); |
ab697a9f EG |
632 | } |
633 | ||
0307c839 GBA |
634 | static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) |
635 | { | |
636 | struct iwl_rx_transfer_desc *rx_td; | |
637 | ||
638 | if (use_rx_td) | |
639 | return sizeof(*rx_td); | |
640 | else | |
641 | return trans->cfg->mq_rx_supported ? sizeof(__le64) : | |
642 | sizeof(__le32); | |
643 | } | |
644 | ||
1b493e30 GBA |
645 | static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, |
646 | struct iwl_rxq *rxq) | |
9805c446 | 647 | { |
9805c446 | 648 | struct device *dev = trans->dev; |
0307c839 GBA |
649 | bool use_rx_td = (trans->cfg->device_family >= |
650 | IWL_DEVICE_FAMILY_22560); | |
651 | int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); | |
9805c446 | 652 | |
1b493e30 | 653 | if (rxq->bd) |
0307c839 GBA |
654 | dma_free_coherent(trans->dev, |
655 | free_size * rxq->queue_size, | |
1b493e30 GBA |
656 | rxq->bd, rxq->bd_dma); |
657 | rxq->bd_dma = 0; | |
658 | rxq->bd = NULL; | |
659 | ||
660 | if (rxq->rb_stts) | |
661 | dma_free_coherent(trans->dev, | |
0307c839 | 662 | use_rx_td ? sizeof(__le16) : |
1b493e30 GBA |
663 | sizeof(struct iwl_rb_status), |
664 | rxq->rb_stts, rxq->rb_stts_dma); | |
665 | rxq->rb_stts_dma = 0; | |
666 | rxq->rb_stts = NULL; | |
667 | ||
668 | if (rxq->used_bd) | |
0307c839 | 669 | dma_free_coherent(trans->dev, |
b2a58c97 | 670 | (use_rx_td ? sizeof(*rxq->cd) : |
0307c839 | 671 | sizeof(__le32)) * rxq->queue_size, |
1b493e30 GBA |
672 | rxq->used_bd, rxq->used_bd_dma); |
673 | rxq->used_bd_dma = 0; | |
674 | rxq->used_bd = NULL; | |
675 | ||
676 | if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) | |
677 | return; | |
9805c446 | 678 | |
1b493e30 GBA |
679 | if (rxq->tr_tail) |
680 | dma_free_coherent(dev, sizeof(__le16), | |
681 | rxq->tr_tail, rxq->tr_tail_dma); | |
682 | rxq->tr_tail_dma = 0; | |
683 | rxq->tr_tail = NULL; | |
684 | ||
685 | if (rxq->cr_tail) | |
686 | dma_free_coherent(dev, sizeof(__le16), | |
687 | rxq->cr_tail, rxq->cr_tail_dma); | |
688 | rxq->cr_tail_dma = 0; | |
689 | rxq->cr_tail = NULL; | |
690 | } | |
9805c446 | 691 | |
1b493e30 GBA |
692 | static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, |
693 | struct iwl_rxq *rxq) | |
694 | { | |
695 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
696 | struct device *dev = trans->dev; | |
697 | int i; | |
0307c839 GBA |
698 | int free_size; |
699 | bool use_rx_td = (trans->cfg->device_family >= | |
700 | IWL_DEVICE_FAMILY_22560); | |
96a6497b | 701 | |
1b493e30 GBA |
702 | spin_lock_init(&rxq->lock); |
703 | if (trans->cfg->mq_rx_supported) | |
704 | rxq->queue_size = MQ_RX_TABLE_SIZE; | |
705 | else | |
706 | rxq->queue_size = RX_QUEUE_SIZE; | |
9805c446 | 707 | |
0307c839 GBA |
708 | free_size = iwl_pcie_free_bd_size(trans, use_rx_td); |
709 | ||
1b493e30 GBA |
710 | /* |
711 | * Allocate the circular buffer of Read Buffer Descriptors | |
712 | * (RBDs) | |
713 | */ | |
750afb08 LC |
714 | rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, |
715 | &rxq->bd_dma, GFP_KERNEL); | |
1b493e30 GBA |
716 | if (!rxq->bd) |
717 | goto err; | |
9805c446 | 718 | |
1b493e30 | 719 | if (trans->cfg->mq_rx_supported) { |
750afb08 LC |
720 | rxq->used_bd = dma_alloc_coherent(dev, |
721 | (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, | |
722 | &rxq->used_bd_dma, | |
723 | GFP_KERNEL); | |
1b493e30 | 724 | if (!rxq->used_bd) |
78485054 SS |
725 | goto err; |
726 | } | |
1b493e30 GBA |
727 | |
728 | /* Allocate the driver's pointer to receive buffer status */ | |
750afb08 LC |
729 | rxq->rb_stts = dma_alloc_coherent(dev, |
730 | use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), | |
731 | &rxq->rb_stts_dma, GFP_KERNEL); | |
1b493e30 GBA |
732 | if (!rxq->rb_stts) |
733 | goto err; | |
734 | ||
0307c839 | 735 | if (!use_rx_td) |
1b493e30 GBA |
736 | return 0; |
737 | ||
738 | /* Allocate the driver's pointer to TR tail */ | |
750afb08 LC |
739 | rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
740 | &rxq->tr_tail_dma, GFP_KERNEL); | |
1b493e30 GBA |
741 | if (!rxq->tr_tail) |
742 | goto err; | |
743 | ||
744 | /* Allocate the driver's pointer to CR tail */ | |
750afb08 LC |
745 | rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), |
746 | &rxq->cr_tail_dma, GFP_KERNEL); | |
1b493e30 GBA |
747 | if (!rxq->cr_tail) |
748 | goto err; | |
0307c839 GBA |
749 | /* |
750 | * W/A 22560 device step Z0 must be non zero bug | |
751 | * TODO: remove this when stop supporting Z0 | |
752 | */ | |
753 | *rxq->cr_tail = cpu_to_le16(500); | |
1b493e30 | 754 | |
9805c446 EG |
755 | return 0; |
756 | ||
78485054 SS |
757 | err: |
758 | for (i = 0; i < trans->num_rx_queues; i++) { | |
759 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
760 | ||
1b493e30 | 761 | iwl_pcie_free_rxq_dma(trans, rxq); |
78485054 SS |
762 | } |
763 | kfree(trans_pcie->rxq); | |
96a6497b | 764 | |
9805c446 | 765 | return -ENOMEM; |
ab697a9f EG |
766 | } |
767 | ||
89d5e833 | 768 | int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
1b493e30 GBA |
769 | { |
770 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
771 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
772 | int i, ret; | |
773 | ||
774 | if (WARN_ON(trans_pcie->rxq)) | |
775 | return -EINVAL; | |
776 | ||
777 | trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), | |
778 | GFP_KERNEL); | |
779 | if (!trans_pcie->rxq) | |
780 | return -EINVAL; | |
781 | ||
782 | spin_lock_init(&rba->lock); | |
783 | ||
784 | for (i = 0; i < trans->num_rx_queues; i++) { | |
785 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
786 | ||
787 | ret = iwl_pcie_alloc_rxq_dma(trans, rxq); | |
788 | if (ret) | |
789 | return ret; | |
790 | } | |
791 | return 0; | |
792 | } | |
793 | ||
9805c446 EG |
794 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
795 | { | |
796 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
797 | u32 rb_size; | |
dfcfeef9 | 798 | unsigned long flags; |
9805c446 EG |
799 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ |
800 | ||
6c4fbcbc EG |
801 | switch (trans_pcie->rx_buf_size) { |
802 | case IWL_AMSDU_4K: | |
803 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | |
804 | break; | |
805 | case IWL_AMSDU_8K: | |
9805c446 | 806 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; |
6c4fbcbc EG |
807 | break; |
808 | case IWL_AMSDU_12K: | |
809 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; | |
810 | break; | |
811 | default: | |
812 | WARN_ON(1); | |
9805c446 | 813 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; |
6c4fbcbc | 814 | } |
9805c446 | 815 | |
dfcfeef9 SS |
816 | if (!iwl_trans_grab_nic_access(trans, &flags)) |
817 | return; | |
818 | ||
9805c446 | 819 | /* Stop Rx DMA */ |
dfcfeef9 | 820 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); |
ddaf5a5b | 821 | /* reset and flush pointers */ |
dfcfeef9 SS |
822 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); |
823 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); | |
824 | iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); | |
9805c446 EG |
825 | |
826 | /* Reset driver's Rx queue write index */ | |
dfcfeef9 | 827 | iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); |
9805c446 EG |
828 | |
829 | /* Tell device where to find RBD circular buffer in DRAM */ | |
dfcfeef9 SS |
830 | iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, |
831 | (u32)(rxq->bd_dma >> 8)); | |
9805c446 EG |
832 | |
833 | /* Tell device where in DRAM to update its Rx status */ | |
dfcfeef9 SS |
834 | iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, |
835 | rxq->rb_stts_dma >> 4); | |
9805c446 EG |
836 | |
837 | /* Enable Rx DMA | |
838 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | |
839 | * the credit mechanism in 5000 HW RX FIFO | |
840 | * Direct rx interrupts to hosts | |
6c4fbcbc | 841 | * Rx buffer size 4 or 8k or 12k |
9805c446 EG |
842 | * RB timeout 0x10 |
843 | * 256 RBDs | |
844 | */ | |
dfcfeef9 SS |
845 | iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, |
846 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | |
847 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | |
848 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | |
849 | rb_size | | |
850 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | | |
851 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | |
852 | ||
853 | iwl_trans_release_nic_access(trans, &flags); | |
9805c446 EG |
854 | |
855 | /* Set interrupt coalescing timer to default (2048 usecs) */ | |
856 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
6960a059 EG |
857 | |
858 | /* W/A for interrupt coalescing bug in 7260 and 3160 */ | |
859 | if (trans->cfg->host_interrupt_operation_mode) | |
860 | iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); | |
9805c446 EG |
861 | } |
862 | ||
1316d595 SS |
863 | void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) |
864 | { | |
565291c6 JB |
865 | if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000) |
866 | return; | |
867 | ||
868 | if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP) | |
869 | return; | |
870 | ||
871 | if (!trans->cfg->integrated) | |
872 | return; | |
873 | ||
1316d595 SS |
874 | /* |
875 | * Turn on the chicken-bits that cause MAC wakeup for RX-related | |
876 | * values. | |
877 | * This costs some power, but needed for W/A 9000 integrated A-step | |
878 | * bug where shadow registers are not in the retention list and their | |
879 | * value is lost when NIC powers down | |
880 | */ | |
565291c6 JB |
881 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, |
882 | CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); | |
883 | iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, | |
884 | CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); | |
1316d595 SS |
885 | } |
886 | ||
bce97731 | 887 | static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) |
c7df1f4b | 888 | { |
96a6497b SS |
889 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
890 | u32 rb_size, enabled = 0; | |
dfcfeef9 | 891 | unsigned long flags; |
96a6497b | 892 | int i; |
c7df1f4b | 893 | |
96a6497b | 894 | switch (trans_pcie->rx_buf_size) { |
1a4968d1 GBA |
895 | case IWL_AMSDU_2K: |
896 | rb_size = RFH_RXF_DMA_RB_SIZE_2K; | |
897 | break; | |
96a6497b SS |
898 | case IWL_AMSDU_4K: |
899 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
900 | break; | |
901 | case IWL_AMSDU_8K: | |
902 | rb_size = RFH_RXF_DMA_RB_SIZE_8K; | |
903 | break; | |
904 | case IWL_AMSDU_12K: | |
905 | rb_size = RFH_RXF_DMA_RB_SIZE_12K; | |
906 | break; | |
907 | default: | |
908 | WARN_ON(1); | |
909 | rb_size = RFH_RXF_DMA_RB_SIZE_4K; | |
910 | } | |
c7df1f4b | 911 | |
dfcfeef9 SS |
912 | if (!iwl_trans_grab_nic_access(trans, &flags)) |
913 | return; | |
914 | ||
96a6497b | 915 | /* Stop Rx DMA */ |
dfcfeef9 | 916 | iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); |
96a6497b | 917 | /* disable free amd used rx queue operation */ |
dfcfeef9 | 918 | iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); |
26d535ae | 919 | |
96a6497b SS |
920 | for (i = 0; i < trans->num_rx_queues; i++) { |
921 | /* Tell device where to find RBD free table in DRAM */ | |
12a17458 SS |
922 | iwl_write_prph64_no_grab(trans, |
923 | RFH_Q_FRBDCB_BA_LSB(i), | |
924 | trans_pcie->rxq[i].bd_dma); | |
96a6497b | 925 | /* Tell device where to find RBD used table in DRAM */ |
12a17458 SS |
926 | iwl_write_prph64_no_grab(trans, |
927 | RFH_Q_URBDCB_BA_LSB(i), | |
928 | trans_pcie->rxq[i].used_bd_dma); | |
96a6497b | 929 | /* Tell device where in DRAM to update its Rx status */ |
12a17458 SS |
930 | iwl_write_prph64_no_grab(trans, |
931 | RFH_Q_URBD_STTS_WPTR_LSB(i), | |
932 | trans_pcie->rxq[i].rb_stts_dma); | |
96a6497b | 933 | /* Reset device indice tables */ |
dfcfeef9 SS |
934 | iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); |
935 | iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); | |
936 | iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); | |
96a6497b SS |
937 | |
938 | enabled |= BIT(i) | BIT(i + 16); | |
939 | } | |
26d535ae | 940 | |
96a6497b SS |
941 | /* |
942 | * Enable Rx DMA | |
96a6497b SS |
943 | * Rx buffer size 4 or 8k or 12k |
944 | * Min RB size 4 or 8 | |
88076015 | 945 | * Drop frames that exceed RB size |
96a6497b SS |
946 | * 512 RBDs |
947 | */ | |
dfcfeef9 | 948 | iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, |
63044335 | 949 | RFH_DMA_EN_ENABLE_VAL | rb_size | |
dfcfeef9 SS |
950 | RFH_RXF_DMA_MIN_RB_4_8 | |
951 | RFH_RXF_DMA_DROP_TOO_LARGE_MASK | | |
952 | RFH_RXF_DMA_RBDCB_SIZE_512); | |
96a6497b | 953 | |
88076015 SS |
954 | /* |
955 | * Activate DMA snooping. | |
b0262f07 | 956 | * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe |
88076015 SS |
957 | * Default queue is 0 |
958 | */ | |
f3779f47 JB |
959 | iwl_write_prph_no_grab(trans, RFH_GEN_CFG, |
960 | RFH_GEN_CFG_RFH_DMA_SNOOP | | |
961 | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | | |
b0262f07 | 962 | RFH_GEN_CFG_SERVICE_DMA_SNOOP | |
f3779f47 JB |
963 | RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, |
964 | trans->cfg->integrated ? | |
965 | RFH_GEN_CFG_RB_CHUNK_SIZE_64 : | |
966 | RFH_GEN_CFG_RB_CHUNK_SIZE_128)); | |
88076015 | 967 | /* Enable the relevant rx queues */ |
dfcfeef9 SS |
968 | iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); |
969 | ||
970 | iwl_trans_release_nic_access(trans, &flags); | |
26d535ae | 971 | |
96a6497b SS |
972 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
973 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
1316d595 SS |
974 | |
975 | iwl_pcie_enable_rx_wake(trans, true); | |
26d535ae SS |
976 | } |
977 | ||
ff932f61 | 978 | void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) |
26d535ae | 979 | { |
96a6497b | 980 | lockdep_assert_held(&rxq->lock); |
26d535ae | 981 | |
96a6497b SS |
982 | INIT_LIST_HEAD(&rxq->rx_free); |
983 | INIT_LIST_HEAD(&rxq->rx_used); | |
984 | rxq->free_count = 0; | |
985 | rxq->used_count = 0; | |
26d535ae SS |
986 | } |
987 | ||
ff932f61 | 988 | int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) |
bce97731 SS |
989 | { |
990 | WARN_ON(1); | |
991 | return 0; | |
992 | } | |
993 | ||
89d5e833 | 994 | int _iwl_pcie_rx_init(struct iwl_trans *trans) |
9805c446 EG |
995 | { |
996 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
78485054 | 997 | struct iwl_rxq *def_rxq; |
26d535ae | 998 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
7b542436 | 999 | int i, err, queue_size, allocator_pool_size, num_alloc; |
9805c446 | 1000 | |
78485054 | 1001 | if (!trans_pcie->rxq) { |
9805c446 EG |
1002 | err = iwl_pcie_rx_alloc(trans); |
1003 | if (err) | |
1004 | return err; | |
1005 | } | |
78485054 | 1006 | def_rxq = trans_pcie->rxq; |
26d535ae | 1007 | |
0f22e400 ST |
1008 | cancel_work_sync(&rba->rx_alloc); |
1009 | ||
26d535ae SS |
1010 | spin_lock(&rba->lock); |
1011 | atomic_set(&rba->req_pending, 0); | |
1012 | atomic_set(&rba->req_ready, 0); | |
96a6497b SS |
1013 | INIT_LIST_HEAD(&rba->rbd_allocated); |
1014 | INIT_LIST_HEAD(&rba->rbd_empty); | |
26d535ae | 1015 | spin_unlock(&rba->lock); |
9805c446 | 1016 | |
c7df1f4b | 1017 | /* free all first - we might be reconfigured for a different size */ |
78485054 | 1018 | iwl_pcie_free_rbs_pool(trans); |
9805c446 EG |
1019 | |
1020 | for (i = 0; i < RX_QUEUE_SIZE; i++) | |
78485054 | 1021 | def_rxq->queue[i] = NULL; |
9805c446 | 1022 | |
78485054 SS |
1023 | for (i = 0; i < trans->num_rx_queues; i++) { |
1024 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
1025 | ||
96a6497b SS |
1026 | rxq->id = i; |
1027 | ||
78485054 SS |
1028 | spin_lock(&rxq->lock); |
1029 | /* | |
1030 | * Set read write pointer to reflect that we have processed | |
1031 | * and used all buffers, but have not restocked the Rx queue | |
1032 | * with fresh buffers | |
1033 | */ | |
1034 | rxq->read = 0; | |
1035 | rxq->write = 0; | |
1036 | rxq->write_actual = 0; | |
0307c839 GBA |
1037 | memset(rxq->rb_stts, 0, |
1038 | (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? | |
1039 | sizeof(__le16) : sizeof(struct iwl_rb_status)); | |
9805c446 | 1040 | |
78485054 SS |
1041 | iwl_pcie_rx_init_rxb_lists(rxq); |
1042 | ||
bce97731 SS |
1043 | if (!rxq->napi.poll) |
1044 | netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, | |
1045 | iwl_pcie_dummy_napi_poll, 64); | |
1046 | ||
78485054 SS |
1047 | spin_unlock(&rxq->lock); |
1048 | } | |
9805c446 | 1049 | |
96a6497b | 1050 | /* move the pool to the default queue and allocator ownerships */ |
7b542436 SS |
1051 | queue_size = trans->cfg->mq_rx_supported ? |
1052 | MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; | |
96a6497b SS |
1053 | allocator_pool_size = trans->num_rx_queues * |
1054 | (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); | |
7b542436 | 1055 | num_alloc = queue_size + allocator_pool_size; |
43146925 SS |
1056 | BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != |
1057 | ARRAY_SIZE(trans_pcie->rx_pool)); | |
7b542436 | 1058 | for (i = 0; i < num_alloc; i++) { |
96a6497b SS |
1059 | struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; |
1060 | ||
1061 | if (i < allocator_pool_size) | |
1062 | list_add(&rxb->list, &rba->rbd_empty); | |
1063 | else | |
1064 | list_add(&rxb->list, &def_rxq->rx_used); | |
1065 | trans_pcie->global_table[i] = rxb; | |
e25d65f2 | 1066 | rxb->vid = (u16)(i + 1); |
b1753c62 | 1067 | rxb->invalid = true; |
96a6497b | 1068 | } |
9805c446 | 1069 | |
78485054 | 1070 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); |
2047fa54 | 1071 | |
eda50cde SS |
1072 | return 0; |
1073 | } | |
1074 | ||
1075 | int iwl_pcie_rx_init(struct iwl_trans *trans) | |
1076 | { | |
1077 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1078 | int ret = _iwl_pcie_rx_init(trans); | |
1079 | ||
1080 | if (ret) | |
1081 | return ret; | |
1082 | ||
2047fa54 | 1083 | if (trans->cfg->mq_rx_supported) |
bce97731 | 1084 | iwl_pcie_rx_mq_hw_init(trans); |
2047fa54 | 1085 | else |
eda50cde | 1086 | iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); |
2047fa54 | 1087 | |
eda50cde | 1088 | iwl_pcie_rxq_restock(trans, trans_pcie->rxq); |
78485054 | 1089 | |
eda50cde SS |
1090 | spin_lock(&trans_pcie->rxq->lock); |
1091 | iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); | |
1092 | spin_unlock(&trans_pcie->rxq->lock); | |
9805c446 EG |
1093 | |
1094 | return 0; | |
1095 | } | |
1096 | ||
eda50cde SS |
1097 | int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) |
1098 | { | |
e506b481 SS |
1099 | /* Set interrupt coalescing timer to default (2048 usecs) */ |
1100 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | |
1101 | ||
eda50cde SS |
1102 | /* |
1103 | * We don't configure the RFH. | |
1104 | * Restock will be done at alive, after firmware configured the RFH. | |
1105 | */ | |
1106 | return _iwl_pcie_rx_init(trans); | |
1107 | } | |
1108 | ||
9805c446 EG |
1109 | void iwl_pcie_rx_free(struct iwl_trans *trans) |
1110 | { | |
1111 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
26d535ae | 1112 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
78485054 | 1113 | int i; |
9805c446 | 1114 | |
78485054 SS |
1115 | /* |
1116 | * if rxq is NULL, it means that nothing has been allocated, | |
1117 | * exit now | |
1118 | */ | |
1119 | if (!trans_pcie->rxq) { | |
9805c446 EG |
1120 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); |
1121 | return; | |
1122 | } | |
1123 | ||
26d535ae | 1124 | cancel_work_sync(&rba->rx_alloc); |
26d535ae | 1125 | |
78485054 SS |
1126 | iwl_pcie_free_rbs_pool(trans); |
1127 | ||
1128 | for (i = 0; i < trans->num_rx_queues; i++) { | |
1129 | struct iwl_rxq *rxq = &trans_pcie->rxq[i]; | |
1130 | ||
1b493e30 | 1131 | iwl_pcie_free_rxq_dma(trans, rxq); |
bce97731 SS |
1132 | |
1133 | if (rxq->napi.poll) | |
1134 | netif_napi_del(&rxq->napi); | |
96a6497b | 1135 | } |
78485054 | 1136 | kfree(trans_pcie->rxq); |
9805c446 EG |
1137 | } |
1138 | ||
868a1e86 ST |
1139 | static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, |
1140 | struct iwl_rb_allocator *rba) | |
1141 | { | |
1142 | spin_lock(&rba->lock); | |
1143 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | |
1144 | spin_unlock(&rba->lock); | |
1145 | } | |
1146 | ||
26d535ae SS |
1147 | /* |
1148 | * iwl_pcie_rx_reuse_rbd - Recycle used RBDs | |
1149 | * | |
1150 | * Called when a RBD can be reused. The RBD is transferred to the allocator. | |
1151 | * When there are 2 empty RBDs - a request for allocation is posted | |
1152 | */ | |
1153 | static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, | |
1154 | struct iwl_rx_mem_buffer *rxb, | |
1155 | struct iwl_rxq *rxq, bool emergency) | |
1156 | { | |
1157 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1158 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | |
1159 | ||
1160 | /* Move the RBD to the used list, will be moved to allocator in batches | |
1161 | * before claiming or posting a request*/ | |
1162 | list_add_tail(&rxb->list, &rxq->rx_used); | |
1163 | ||
1164 | if (unlikely(emergency)) | |
1165 | return; | |
1166 | ||
1167 | /* Count the allocator owned RBDs */ | |
1168 | rxq->used_count++; | |
1169 | ||
1170 | /* If we have RX_POST_REQ_ALLOC new released rx buffers - | |
1171 | * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is | |
1172 | * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, | |
1173 | * after but we still need to post another request. | |
1174 | */ | |
1175 | if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { | |
1176 | /* Move the 2 RBDs to the allocator ownership. | |
1177 | Allocator has another 6 from pool for the request completion*/ | |
868a1e86 | 1178 | iwl_pcie_rx_move_to_allocator(rxq, rba); |
26d535ae SS |
1179 | |
1180 | atomic_inc(&rba->req_pending); | |
1181 | queue_work(rba->alloc_wq, &rba->rx_alloc); | |
1182 | } | |
1183 | } | |
1184 | ||
9805c446 | 1185 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, |
78485054 | 1186 | struct iwl_rxq *rxq, |
26d535ae | 1187 | struct iwl_rx_mem_buffer *rxb, |
7891965d SS |
1188 | bool emergency, |
1189 | int i) | |
df2f3216 JB |
1190 | { |
1191 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
b2a3b1c1 | 1192 | struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
0c19744c | 1193 | bool page_stolen = false; |
b2cf410c | 1194 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
0c19744c | 1195 | u32 offset = 0; |
df2f3216 JB |
1196 | |
1197 | if (WARN_ON(!rxb)) | |
1198 | return; | |
1199 | ||
0c19744c JB |
1200 | dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); |
1201 | ||
1202 | while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { | |
1203 | struct iwl_rx_packet *pkt; | |
0c19744c JB |
1204 | u16 sequence; |
1205 | bool reclaim; | |
f7e6469f | 1206 | int index, cmd_index, len; |
0c19744c JB |
1207 | struct iwl_rx_cmd_buffer rxcb = { |
1208 | ._offset = offset, | |
d13f1862 | 1209 | ._rx_page_order = trans_pcie->rx_page_order, |
0c19744c JB |
1210 | ._page = rxb->page, |
1211 | ._page_stolen = false, | |
0d6c4a2e | 1212 | .truesize = max_len, |
0c19744c JB |
1213 | }; |
1214 | ||
7891965d SS |
1215 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
1216 | rxcb.status = rxq->cd[i].status; | |
1217 | ||
0c19744c JB |
1218 | pkt = rxb_addr(&rxcb); |
1219 | ||
3bfdee76 JB |
1220 | if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { |
1221 | IWL_DEBUG_RX(trans, | |
1222 | "Q %d: RB end marker at offset %d\n", | |
1223 | rxq->id, offset); | |
0c19744c | 1224 | break; |
3bfdee76 | 1225 | } |
0c19744c | 1226 | |
a395058e JB |
1227 | WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> |
1228 | FH_RSCSR_RXQ_POS != rxq->id, | |
1229 | "frame on invalid queue - is on %d and indicates %d\n", | |
1230 | rxq->id, | |
1231 | (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> | |
1232 | FH_RSCSR_RXQ_POS); | |
ab2e696b | 1233 | |
9243efcc | 1234 | IWL_DEBUG_RX(trans, |
3bfdee76 JB |
1235 | "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", |
1236 | rxq->id, offset, | |
39bdb17e SD |
1237 | iwl_get_cmd_string(trans, |
1238 | iwl_cmd_id(pkt->hdr.cmd, | |
1239 | pkt->hdr.group_id, | |
1240 | 0)), | |
35177c99 SS |
1241 | pkt->hdr.group_id, pkt->hdr.cmd, |
1242 | le16_to_cpu(pkt->hdr.sequence)); | |
0c19744c | 1243 | |
65b30348 | 1244 | len = iwl_rx_packet_len(pkt); |
0c19744c | 1245 | len += sizeof(u32); /* account for status word */ |
f042c2eb JB |
1246 | trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); |
1247 | trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); | |
0c19744c JB |
1248 | |
1249 | /* Reclaim a command buffer only if this packet is a response | |
1250 | * to a (driver-originated) command. | |
1251 | * If the packet (e.g. Rx frame) originated from uCode, | |
1252 | * there is no command buffer to reclaim. | |
1253 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | |
1254 | * but apparently a few don't get set; catch them here. */ | |
1255 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); | |
d8a130b0 | 1256 | if (reclaim && !pkt->hdr.group_id) { |
0c19744c JB |
1257 | int i; |
1258 | ||
1259 | for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { | |
1260 | if (trans_pcie->no_reclaim_cmds[i] == | |
1261 | pkt->hdr.cmd) { | |
1262 | reclaim = false; | |
1263 | break; | |
1264 | } | |
d663ee73 JB |
1265 | } |
1266 | } | |
df2f3216 | 1267 | |
0c19744c JB |
1268 | sequence = le16_to_cpu(pkt->hdr.sequence); |
1269 | index = SEQ_TO_INDEX(sequence); | |
4ecab561 | 1270 | cmd_index = iwl_pcie_get_cmd_index(txq, index); |
0c19744c | 1271 | |
9416560e | 1272 | if (rxq->id == trans_pcie->def_rx_queue) |
bce97731 SS |
1273 | iwl_op_mode_rx(trans->op_mode, &rxq->napi, |
1274 | &rxcb); | |
1275 | else | |
1276 | iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, | |
1277 | &rxcb, rxq->id); | |
0c19744c | 1278 | |
96791422 | 1279 | if (reclaim) { |
5d4185ae | 1280 | kzfree(txq->entries[cmd_index].free_buf); |
f4feb8ac | 1281 | txq->entries[cmd_index].free_buf = NULL; |
96791422 EG |
1282 | } |
1283 | ||
0c19744c JB |
1284 | /* |
1285 | * After here, we should always check rxcb._page_stolen, | |
1286 | * if it is true then one of the handlers took the page. | |
1287 | */ | |
1288 | ||
1289 | if (reclaim) { | |
1290 | /* Invoke any callbacks, transfer the buffer to caller, | |
1291 | * and fire off the (possibly) blocking | |
1292 | * iwl_trans_send_cmd() | |
1293 | * as we reclaim the driver command queue */ | |
1294 | if (!rxcb._page_stolen) | |
f7e6469f | 1295 | iwl_pcie_hcmd_complete(trans, &rxcb); |
0c19744c JB |
1296 | else |
1297 | IWL_WARN(trans, "Claim null rxb?\n"); | |
1298 | } | |
1299 | ||
1300 | page_stolen |= rxcb._page_stolen; | |
0307c839 GBA |
1301 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
1302 | break; | |
0c19744c | 1303 | offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); |
df2f3216 JB |
1304 | } |
1305 | ||
0c19744c JB |
1306 | /* page was stolen from us -- free our reference */ |
1307 | if (page_stolen) { | |
b2cf410c | 1308 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
df2f3216 | 1309 | rxb->page = NULL; |
0c19744c | 1310 | } |
df2f3216 JB |
1311 | |
1312 | /* Reuse the page if possible. For notification packets and | |
1313 | * SKBs that fail to Rx correctly, add them back into the | |
1314 | * rx_free list for reuse later. */ | |
df2f3216 JB |
1315 | if (rxb->page != NULL) { |
1316 | rxb->page_dma = | |
1317 | dma_map_page(trans->dev, rxb->page, 0, | |
20d3b647 JB |
1318 | PAGE_SIZE << trans_pcie->rx_page_order, |
1319 | DMA_FROM_DEVICE); | |
7c341582 JB |
1320 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
1321 | /* | |
1322 | * free the page(s) as well to not break | |
1323 | * the invariant that the items on the used | |
1324 | * list have no page(s) | |
1325 | */ | |
1326 | __free_pages(rxb->page, trans_pcie->rx_page_order); | |
1327 | rxb->page = NULL; | |
26d535ae | 1328 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
7c341582 JB |
1329 | } else { |
1330 | list_add_tail(&rxb->list, &rxq->rx_free); | |
1331 | rxq->free_count++; | |
1332 | } | |
df2f3216 | 1333 | } else |
26d535ae | 1334 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); |
df2f3216 JB |
1335 | } |
1336 | ||
1b4bbe8b SS |
1337 | static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, |
1338 | struct iwl_rxq *rxq, int i) | |
1339 | { | |
1340 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1341 | struct iwl_rx_mem_buffer *rxb; | |
1342 | u16 vid; | |
1343 | ||
1344 | if (!trans->cfg->mq_rx_supported) { | |
1345 | rxb = rxq->queue[i]; | |
1346 | rxq->queue[i] = NULL; | |
1347 | return rxb; | |
1348 | } | |
1349 | ||
1350 | /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ | |
1351 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) | |
1352 | vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; | |
1353 | else | |
1354 | vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; | |
1355 | ||
1356 | if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) | |
1357 | goto out_err; | |
1358 | ||
1359 | rxb = trans_pcie->global_table[vid - 1]; | |
1360 | if (rxb->invalid) | |
1361 | goto out_err; | |
1362 | ||
1363 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) | |
1364 | rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; | |
1365 | ||
1366 | rxb->invalid = true; | |
1367 | ||
1368 | return rxb; | |
1369 | ||
1370 | out_err: | |
1371 | WARN(1, "Invalid rxb from HW %u\n", (u32)vid); | |
1372 | iwl_force_nmi(trans); | |
1373 | return NULL; | |
1374 | } | |
1375 | ||
990aa6d7 EG |
1376 | /* |
1377 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw | |
ab697a9f | 1378 | */ |
2e5d4a8f | 1379 | static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) |
ab697a9f | 1380 | { |
df2f3216 | 1381 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
2e5d4a8f | 1382 | struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; |
d56daea4 | 1383 | u32 r, i, count = 0; |
26d535ae | 1384 | bool emergency = false; |
ab697a9f | 1385 | |
f14d6b39 JB |
1386 | restart: |
1387 | spin_lock(&rxq->lock); | |
ab697a9f EG |
1388 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1389 | * buffer that the driver may process (last buffer filled by ucode). */ | |
0307c839 | 1390 | r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; |
ab697a9f EG |
1391 | i = rxq->read; |
1392 | ||
5eae443e SS |
1393 | /* W/A 9000 device step A0 wrap-around bug */ |
1394 | r &= (rxq->queue_size - 1); | |
1395 | ||
ab697a9f EG |
1396 | /* Rx interrupt, but nothing sent from uCode */ |
1397 | if (i == r) | |
5eae443e | 1398 | IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); |
ab697a9f | 1399 | |
ab697a9f | 1400 | while (i != r) { |
868a1e86 | 1401 | struct iwl_rb_allocator *rba = &trans_pcie->rba; |
48a2d66f | 1402 | struct iwl_rx_mem_buffer *rxb; |
868a1e86 ST |
1403 | /* number of RBDs still waiting for page allocation */ |
1404 | u32 rb_pending_alloc = | |
1405 | atomic_read(&trans_pcie->rba.req_pending) * | |
1406 | RX_CLAIM_REQ_ALLOC; | |
1407 | ||
1408 | if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && | |
1409 | !emergency)) { | |
1410 | iwl_pcie_rx_move_to_allocator(rxq, rba); | |
26d535ae | 1411 | emergency = true; |
868a1e86 | 1412 | } |
26d535ae | 1413 | |
1b4bbe8b SS |
1414 | rxb = iwl_pcie_get_rxb(trans, rxq, i); |
1415 | if (!rxb) | |
1416 | goto out; | |
ab697a9f | 1417 | |
5eae443e | 1418 | IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); |
7891965d | 1419 | iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); |
ab697a9f | 1420 | |
96a6497b | 1421 | i = (i + 1) & (rxq->queue_size - 1); |
26d535ae | 1422 | |
d56daea4 SS |
1423 | /* |
1424 | * If we have RX_CLAIM_REQ_ALLOC released rx buffers - | |
1425 | * try to claim the pre-allocated buffers from the allocator. | |
1426 | * If not ready - will try to reclaim next time. | |
1427 | * There is no need to reschedule work - allocator exits only | |
1428 | * on success | |
1429 | */ | |
1430 | if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) | |
1431 | iwl_pcie_rx_allocator_get(trans, rxq); | |
1432 | ||
1433 | if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { | |
d56daea4 | 1434 | /* Add the remaining empty RBDs for allocator use */ |
868a1e86 | 1435 | iwl_pcie_rx_move_to_allocator(rxq, rba); |
d56daea4 | 1436 | } else if (emergency) { |
255ba065 | 1437 | count++; |
26d535ae | 1438 | if (count == 8) { |
255ba065 | 1439 | count = 0; |
868a1e86 | 1440 | if (rb_pending_alloc < rxq->queue_size / 3) |
26d535ae | 1441 | emergency = false; |
e0e168dc GG |
1442 | |
1443 | rxq->read = i; | |
26d535ae | 1444 | spin_unlock(&rxq->lock); |
78485054 | 1445 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
96a6497b | 1446 | iwl_pcie_rxq_restock(trans, rxq); |
e0e168dc GG |
1447 | goto restart; |
1448 | } | |
26d535ae | 1449 | } |
ab697a9f | 1450 | } |
5eae443e | 1451 | out: |
ab697a9f EG |
1452 | /* Backtrack one entry */ |
1453 | rxq->read = i; | |
0307c839 GBA |
1454 | /* update cr tail with the rxq read pointer */ |
1455 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) | |
1456 | *rxq->cr_tail = cpu_to_le16(r); | |
f14d6b39 JB |
1457 | spin_unlock(&rxq->lock); |
1458 | ||
26d535ae SS |
1459 | /* |
1460 | * handle a case where in emergency there are some unallocated RBDs. | |
1461 | * those RBDs are in the used list, but are not tracked by the queue's | |
1462 | * used_count which counts allocator owned RBDs. | |
1463 | * unallocated emergency RBDs must be allocated on exit, otherwise | |
1464 | * when called again the function may not be in emergency mode and | |
1465 | * they will be handed to the allocator with no tracking in the RBD | |
1466 | * allocator counters, which will lead to them never being claimed back | |
1467 | * by the queue. | |
1468 | * by allocating them here, they are now in the queue free list, and | |
1469 | * will be restocked by the next call of iwl_pcie_rxq_restock. | |
1470 | */ | |
1471 | if (unlikely(emergency && count)) | |
78485054 | 1472 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); |
255ba065 | 1473 | |
bce97731 SS |
1474 | if (rxq->napi.poll) |
1475 | napi_gro_flush(&rxq->napi, false); | |
e0e168dc GG |
1476 | |
1477 | iwl_pcie_rxq_restock(trans, rxq); | |
ab697a9f EG |
1478 | } |
1479 | ||
2e5d4a8f HD |
1480 | static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) |
1481 | { | |
1482 | u8 queue = entry->entry; | |
1483 | struct msix_entry *entries = entry - queue; | |
1484 | ||
1485 | return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); | |
1486 | } | |
1487 | ||
2e5d4a8f HD |
1488 | /* |
1489 | * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw | |
1490 | * This interrupt handler should be used with RSS queue only. | |
1491 | */ | |
1492 | irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) | |
1493 | { | |
1494 | struct msix_entry *entry = dev_id; | |
1495 | struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); | |
1496 | struct iwl_trans *trans = trans_pcie->trans; | |
1497 | ||
c42ff65d JB |
1498 | trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); |
1499 | ||
5eae443e SS |
1500 | if (WARN_ON(entry->entry >= trans->num_rx_queues)) |
1501 | return IRQ_NONE; | |
1502 | ||
2e5d4a8f HD |
1503 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1504 | ||
1505 | local_bh_disable(); | |
1506 | iwl_pcie_rx_handle(trans, entry->entry); | |
1507 | local_bh_enable(); | |
1508 | ||
1509 | iwl_pcie_clear_irq(trans, entry); | |
1510 | ||
1511 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1512 | ||
1513 | return IRQ_HANDLED; | |
1514 | } | |
1515 | ||
990aa6d7 EG |
1516 | /* |
1517 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card | |
7ff94706 | 1518 | */ |
990aa6d7 | 1519 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
7ff94706 | 1520 | { |
f946b529 | 1521 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1103323c | 1522 | int i; |
f946b529 | 1523 | |
7ff94706 | 1524 | /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ |
035f7ff2 | 1525 | if (trans->cfg->internal_wimax_coex && |
95411d04 | 1526 | !trans->cfg->apmg_not_supported && |
1042db2a | 1527 | (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & |
20d3b647 | 1528 | APMS_CLK_VAL_MRB_FUNC_MODE) || |
1042db2a | 1529 | (iwl_read_prph(trans, APMG_PS_CTRL_REG) & |
20d3b647 | 1530 | APMG_PS_CTRL_VAL_RESET_REQ))) { |
eb7ff77e | 1531 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
8a8bbdb4 | 1532 | iwl_op_mode_wimax_active(trans->op_mode); |
f946b529 | 1533 | wake_up(&trans_pcie->wait_command_queue); |
7ff94706 EG |
1534 | return; |
1535 | } | |
1536 | ||
13a3a390 SS |
1537 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { |
1538 | if (!trans_pcie->txq[i]) | |
1539 | continue; | |
b2a3b1c1 | 1540 | del_timer(&trans_pcie->txq[i]->stuck_timer); |
13a3a390 | 1541 | } |
1103323c | 1542 | |
7d75f32e EG |
1543 | /* The STATUS_FW_ERROR bit is set in this function. This must happen |
1544 | * before we wake up the command caller, to ensure a proper cleanup. */ | |
1545 | iwl_trans_fw_error(trans); | |
1546 | ||
2a988e98 AN |
1547 | clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); |
1548 | wake_up(&trans_pcie->wait_command_queue); | |
7ff94706 EG |
1549 | } |
1550 | ||
7117c000 | 1551 | static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) |
fc84472b | 1552 | { |
fc84472b EG |
1553 | u32 inta; |
1554 | ||
46e81af9 | 1555 | lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); |
fc84472b EG |
1556 | |
1557 | trace_iwlwifi_dev_irq(trans->dev); | |
1558 | ||
1559 | /* Discover which interrupts are active/pending */ | |
1560 | inta = iwl_read32(trans, CSR_INT); | |
1561 | ||
fc84472b | 1562 | /* the thread will service interrupts and re-enable them */ |
fe523dc9 | 1563 | return inta; |
fc84472b EG |
1564 | } |
1565 | ||
1566 | /* a device (PCI-E) page is 4096 bytes long */ | |
1567 | #define ICT_SHIFT 12 | |
1568 | #define ICT_SIZE (1 << ICT_SHIFT) | |
1569 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | |
1570 | ||
1571 | /* interrupt handler using ict table, with this interrupt driver will | |
1572 | * stop using INTA register to get device's interrupt, reading this register | |
1573 | * is expensive, device will write interrupts in ICT dram table, increment | |
1574 | * index then will fire interrupt to driver, driver will OR all ICT table | |
1575 | * entries from current index up to table entry with 0 value. the result is | |
1576 | * the interrupt we need to service, driver will set the entries back to 0 and | |
1577 | * set index. | |
1578 | */ | |
7117c000 | 1579 | static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) |
fc84472b EG |
1580 | { |
1581 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
fc84472b EG |
1582 | u32 inta; |
1583 | u32 val = 0; | |
1584 | u32 read; | |
1585 | ||
fc84472b EG |
1586 | trace_iwlwifi_dev_irq(trans->dev); |
1587 | ||
1588 | /* Ignore interrupt if there's nothing in NIC to service. | |
1589 | * This may be due to IRQ shared with another device, | |
1590 | * or due to sporadic interrupts thrown from our NIC. */ | |
1591 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1592 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); | |
7ba1faa4 EG |
1593 | if (!read) |
1594 | return 0; | |
fc84472b EG |
1595 | |
1596 | /* | |
1597 | * Collect all entries up to the first 0, starting from ict_index; | |
1598 | * note we already read at ict_index. | |
1599 | */ | |
1600 | do { | |
1601 | val |= read; | |
1602 | IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", | |
1603 | trans_pcie->ict_index, read); | |
1604 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; | |
1605 | trans_pcie->ict_index = | |
83f32a4b | 1606 | ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); |
fc84472b EG |
1607 | |
1608 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | |
1609 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, | |
1610 | read); | |
1611 | } while (read); | |
1612 | ||
1613 | /* We should not get this value, just ignore it. */ | |
1614 | if (val == 0xffffffff) | |
1615 | val = 0; | |
1616 | ||
1617 | /* | |
1618 | * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit | |
1619 | * (bit 15 before shifting it to 31) to clear when using interrupt | |
1620 | * coalescing. fortunately, bits 18 and 19 stay set when this happens | |
1621 | * so we use them to decide on the real state of the Rx bit. | |
1622 | * In order words, bit 15 is set if bit 18 or bit 19 are set. | |
1623 | */ | |
1624 | if (val & 0xC0000) | |
1625 | val |= 0x8000; | |
1626 | ||
1627 | inta = (0xff & val) | ((0xff00 & val) << 16); | |
fe523dc9 | 1628 | return inta; |
fc84472b EG |
1629 | } |
1630 | ||
fa4de7f7 | 1631 | void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) |
3a6e168b JB |
1632 | { |
1633 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | |
1634 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
326477e4 | 1635 | bool hw_rfkill, prev, report; |
3a6e168b JB |
1636 | |
1637 | mutex_lock(&trans_pcie->mutex); | |
326477e4 | 1638 | prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); |
3a6e168b | 1639 | hw_rfkill = iwl_is_rfkill_set(trans); |
326477e4 JB |
1640 | if (hw_rfkill) { |
1641 | set_bit(STATUS_RFKILL_OPMODE, &trans->status); | |
1642 | set_bit(STATUS_RFKILL_HW, &trans->status); | |
1643 | } | |
1644 | if (trans_pcie->opmode_down) | |
1645 | report = hw_rfkill; | |
1646 | else | |
1647 | report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); | |
3a6e168b JB |
1648 | |
1649 | IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", | |
1650 | hw_rfkill ? "disable radio" : "enable radio"); | |
1651 | ||
1652 | isr_stats->rfkill++; | |
1653 | ||
326477e4 JB |
1654 | if (prev != report) |
1655 | iwl_trans_pcie_rf_kill(trans, report); | |
3a6e168b JB |
1656 | mutex_unlock(&trans_pcie->mutex); |
1657 | ||
1658 | if (hw_rfkill) { | |
1659 | if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, | |
1660 | &trans->status)) | |
1661 | IWL_DEBUG_RF_KILL(trans, | |
1662 | "Rfkill while SYNC HCMD in flight\n"); | |
1663 | wake_up(&trans_pcie->wait_command_queue); | |
1664 | } else { | |
326477e4 JB |
1665 | clear_bit(STATUS_RFKILL_HW, &trans->status); |
1666 | if (trans_pcie->opmode_down) | |
1667 | clear_bit(STATUS_RFKILL_OPMODE, &trans->status); | |
3a6e168b JB |
1668 | } |
1669 | } | |
1670 | ||
2bfb5092 | 1671 | irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) |
ab697a9f | 1672 | { |
2bfb5092 | 1673 | struct iwl_trans *trans = dev_id; |
20d3b647 JB |
1674 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1675 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | |
ab697a9f EG |
1676 | u32 inta = 0; |
1677 | u32 handled = 0; | |
ab697a9f | 1678 | |
2bfb5092 JB |
1679 | lock_map_acquire(&trans->sync_cmd_lockdep_map); |
1680 | ||
7b70bd63 | 1681 | spin_lock(&trans_pcie->irq_lock); |
ab697a9f | 1682 | |
0fec9542 EG |
1683 | /* dram interrupt table not set yet, |
1684 | * use legacy interrupt. | |
1685 | */ | |
1686 | if (likely(trans_pcie->use_ict)) | |
7117c000 | 1687 | inta = iwl_pcie_int_cause_ict(trans); |
0fec9542 | 1688 | else |
7117c000 | 1689 | inta = iwl_pcie_int_cause_non_ict(trans); |
0fec9542 | 1690 | |
7ba1faa4 EG |
1691 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
1692 | IWL_DEBUG_ISR(trans, | |
1693 | "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", | |
1694 | inta, trans_pcie->inta_mask, | |
1695 | iwl_read32(trans, CSR_INT_MASK), | |
1696 | iwl_read32(trans, CSR_FH_INT_STATUS)); | |
1697 | if (inta & (~trans_pcie->inta_mask)) | |
1698 | IWL_DEBUG_ISR(trans, | |
1699 | "We got a masked interrupt (0x%08x)\n", | |
1700 | inta & (~trans_pcie->inta_mask)); | |
1701 | } | |
1702 | ||
1703 | inta &= trans_pcie->inta_mask; | |
1704 | ||
1705 | /* | |
1706 | * Ignore interrupt if there's nothing in NIC to service. | |
1707 | * This may be due to IRQ shared with another device, | |
1708 | * or due to sporadic interrupts thrown from our NIC. | |
1709 | */ | |
7117c000 | 1710 | if (unlikely(!inta)) { |
7ba1faa4 EG |
1711 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); |
1712 | /* | |
1713 | * Re-enable interrupts here since we don't | |
1714 | * have anything to service | |
1715 | */ | |
1716 | if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
f16c3ebf | 1717 | _iwl_enable_interrupts(trans); |
7b70bd63 | 1718 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 EG |
1719 | lock_map_release(&trans->sync_cmd_lockdep_map); |
1720 | return IRQ_NONE; | |
1721 | } | |
1722 | ||
7ba1faa4 EG |
1723 | if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { |
1724 | /* | |
1725 | * Hardware disappeared. It might have | |
1726 | * already raised an interrupt. | |
1727 | */ | |
1728 | IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); | |
7b70bd63 | 1729 | spin_unlock(&trans_pcie->irq_lock); |
7117c000 | 1730 | goto out; |
a0f337cc EG |
1731 | } |
1732 | ||
ab697a9f EG |
1733 | /* Ack/clear/reset pending uCode interrupts. |
1734 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | |
1735 | */ | |
1736 | /* There is a hardware bug in the interrupt mask function that some | |
1737 | * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if | |
1738 | * they are disabled in the CSR_INT_MASK register. Furthermore the | |
1739 | * ICT interrupt handling mechanism has another bug that might cause | |
1740 | * these unmasked interrupts fail to be detected. We workaround the | |
1741 | * hardware bugs here by ACKing all the possible interrupts so that | |
1742 | * interrupt coalescing can still be achieved. | |
1743 | */ | |
7117c000 | 1744 | iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); |
ab697a9f | 1745 | |
51cd53ad | 1746 | if (iwl_have_debug_level(IWL_DL_ISR)) |
0ca24daf | 1747 | IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", |
51cd53ad | 1748 | inta, iwl_read32(trans, CSR_INT_MASK)); |
ab697a9f | 1749 | |
7b70bd63 | 1750 | spin_unlock(&trans_pcie->irq_lock); |
b49ba04a | 1751 | |
ab697a9f EG |
1752 | /* Now service all interrupt bits discovered above. */ |
1753 | if (inta & CSR_INT_BIT_HW_ERR) { | |
0c325769 | 1754 | IWL_ERR(trans, "Hardware error detected. Restarting.\n"); |
ab697a9f EG |
1755 | |
1756 | /* Tell the device to stop sending interrupts */ | |
0c325769 | 1757 | iwl_disable_interrupts(trans); |
ab697a9f | 1758 | |
1f7b6172 | 1759 | isr_stats->hw++; |
990aa6d7 | 1760 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1761 | |
1762 | handled |= CSR_INT_BIT_HW_ERR; | |
1763 | ||
2bfb5092 | 1764 | goto out; |
ab697a9f EG |
1765 | } |
1766 | ||
a8bceb39 | 1767 | if (iwl_have_debug_level(IWL_DL_ISR)) { |
ab697a9f EG |
1768 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ |
1769 | if (inta & CSR_INT_BIT_SCD) { | |
51cd53ad JB |
1770 | IWL_DEBUG_ISR(trans, |
1771 | "Scheduler finished to transmit the frame/frames.\n"); | |
1f7b6172 | 1772 | isr_stats->sch++; |
ab697a9f EG |
1773 | } |
1774 | ||
1775 | /* Alive notification via Rx interrupt will do the real work */ | |
1776 | if (inta & CSR_INT_BIT_ALIVE) { | |
0c325769 | 1777 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); |
1f7b6172 | 1778 | isr_stats->alive++; |
eda50cde SS |
1779 | if (trans->cfg->gen2) { |
1780 | /* | |
1781 | * We can restock, since firmware configured | |
1782 | * the RFH | |
1783 | */ | |
1784 | iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); | |
1785 | } | |
ab697a9f EG |
1786 | } |
1787 | } | |
51cd53ad | 1788 | |
ab697a9f EG |
1789 | /* Safely ignore these bits for debug checks below */ |
1790 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | |
1791 | ||
1792 | /* HW RF KILL switch toggled */ | |
1793 | if (inta & CSR_INT_BIT_RF_KILL) { | |
3a6e168b | 1794 | iwl_pcie_handle_rfkill_irq(trans); |
ab697a9f EG |
1795 | handled |= CSR_INT_BIT_RF_KILL; |
1796 | } | |
1797 | ||
1798 | /* Chip got too hot and stopped itself */ | |
1799 | if (inta & CSR_INT_BIT_CT_KILL) { | |
0c325769 | 1800 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); |
1f7b6172 | 1801 | isr_stats->ctkill++; |
ab697a9f EG |
1802 | handled |= CSR_INT_BIT_CT_KILL; |
1803 | } | |
1804 | ||
1805 | /* Error detected by uCode */ | |
1806 | if (inta & CSR_INT_BIT_SW_ERR) { | |
0c325769 | 1807 | IWL_ERR(trans, "Microcode SW error detected. " |
ab697a9f | 1808 | " Restarting 0x%X.\n", inta); |
1f7b6172 | 1809 | isr_stats->sw++; |
990aa6d7 | 1810 | iwl_pcie_irq_handle_error(trans); |
ab697a9f EG |
1811 | handled |= CSR_INT_BIT_SW_ERR; |
1812 | } | |
1813 | ||
1814 | /* uCode wakes up after power-down sleep */ | |
1815 | if (inta & CSR_INT_BIT_WAKEUP) { | |
0c325769 | 1816 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
5d63f926 | 1817 | iwl_pcie_rxq_check_wrptr(trans); |
ea68f460 | 1818 | iwl_pcie_txq_check_wrptrs(trans); |
ab697a9f | 1819 | |
1f7b6172 | 1820 | isr_stats->wakeup++; |
ab697a9f EG |
1821 | |
1822 | handled |= CSR_INT_BIT_WAKEUP; | |
1823 | } | |
1824 | ||
1825 | /* All uCode command responses, including Tx command responses, | |
1826 | * Rx "responses" (frame-received notification), and other | |
1827 | * notifications from uCode come through here*/ | |
1828 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | | |
20d3b647 | 1829 | CSR_INT_BIT_RX_PERIODIC)) { |
0c325769 | 1830 | IWL_DEBUG_ISR(trans, "Rx interrupt\n"); |
ab697a9f EG |
1831 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
1832 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | |
1042db2a | 1833 | iwl_write32(trans, CSR_FH_INT_STATUS, |
ab697a9f EG |
1834 | CSR_FH_INT_RX_MASK); |
1835 | } | |
1836 | if (inta & CSR_INT_BIT_RX_PERIODIC) { | |
1837 | handled |= CSR_INT_BIT_RX_PERIODIC; | |
1042db2a | 1838 | iwl_write32(trans, |
0c325769 | 1839 | CSR_INT, CSR_INT_BIT_RX_PERIODIC); |
ab697a9f EG |
1840 | } |
1841 | /* Sending RX interrupt require many steps to be done in the | |
1842 | * the device: | |
1843 | * 1- write interrupt to current index in ICT table. | |
1844 | * 2- dma RX frame. | |
1845 | * 3- update RX shared data to indicate last write index. | |
1846 | * 4- send interrupt. | |
1847 | * This could lead to RX race, driver could receive RX interrupt | |
1848 | * but the shared data changes does not reflect this; | |
1849 | * periodic interrupt will detect any dangling Rx activity. | |
1850 | */ | |
1851 | ||
1852 | /* Disable periodic interrupt; we use it as just a one-shot. */ | |
1042db2a | 1853 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
ab697a9f | 1854 | CSR_INT_PERIODIC_DIS); |
6379103e | 1855 | |
ab697a9f EG |
1856 | /* |
1857 | * Enable periodic interrupt in 8 msec only if we received | |
1858 | * real RX interrupt (instead of just periodic int), to catch | |
1859 | * any dangling Rx interrupt. If it was just the periodic | |
1860 | * interrupt, there was no dangling Rx activity, and no need | |
1861 | * to extend the periodic interrupt; one-shot is enough. | |
1862 | */ | |
1863 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) | |
1042db2a | 1864 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
20d3b647 | 1865 | CSR_INT_PERIODIC_ENA); |
ab697a9f | 1866 | |
1f7b6172 | 1867 | isr_stats->rx++; |
f14d6b39 JB |
1868 | |
1869 | local_bh_disable(); | |
2e5d4a8f | 1870 | iwl_pcie_rx_handle(trans, 0); |
f14d6b39 | 1871 | local_bh_enable(); |
ab697a9f EG |
1872 | } |
1873 | ||
1874 | /* This "Tx" DMA channel is used only for loading uCode */ | |
1875 | if (inta & CSR_INT_BIT_FH_TX) { | |
1042db2a | 1876 | iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); |
0c325769 | 1877 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); |
1f7b6172 | 1878 | isr_stats->tx++; |
ab697a9f EG |
1879 | handled |= CSR_INT_BIT_FH_TX; |
1880 | /* Wake up uCode load routine, now that load is complete */ | |
13df1aab JB |
1881 | trans_pcie->ucode_write_complete = true; |
1882 | wake_up(&trans_pcie->ucode_write_waitq); | |
ab697a9f EG |
1883 | } |
1884 | ||
1885 | if (inta & ~handled) { | |
0c325769 | 1886 | IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); |
1f7b6172 | 1887 | isr_stats->unhandled++; |
ab697a9f EG |
1888 | } |
1889 | ||
0c325769 EG |
1890 | if (inta & ~(trans_pcie->inta_mask)) { |
1891 | IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", | |
1892 | inta & ~trans_pcie->inta_mask); | |
ab697a9f EG |
1893 | } |
1894 | ||
f16c3ebf EG |
1895 | spin_lock(&trans_pcie->irq_lock); |
1896 | /* only Re-enable all interrupt if disabled by irq */ | |
1897 | if (test_bit(STATUS_INT_ENABLED, &trans->status)) | |
1898 | _iwl_enable_interrupts(trans); | |
a6bd005f | 1899 | /* we are loading the firmware, enable FH_TX interrupt only */ |
f16c3ebf | 1900 | else if (handled & CSR_INT_BIT_FH_TX) |
a6bd005f | 1901 | iwl_enable_fw_load_int(trans); |
ab697a9f | 1902 | /* Re-enable RF_KILL if it occurred */ |
8722c899 SG |
1903 | else if (handled & CSR_INT_BIT_RF_KILL) |
1904 | iwl_enable_rfkill_int(trans); | |
f16c3ebf | 1905 | spin_unlock(&trans_pcie->irq_lock); |
2bfb5092 JB |
1906 | |
1907 | out: | |
1908 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
1909 | return IRQ_HANDLED; | |
ab697a9f EG |
1910 | } |
1911 | ||
1a361cd8 EG |
1912 | /****************************************************************************** |
1913 | * | |
1914 | * ICT functions | |
1915 | * | |
1916 | ******************************************************************************/ | |
10667136 | 1917 | |
1a361cd8 | 1918 | /* Free dram table */ |
990aa6d7 | 1919 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
1a361cd8 | 1920 | { |
20d3b647 | 1921 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
0c325769 | 1922 | |
10667136 | 1923 | if (trans_pcie->ict_tbl) { |
1042db2a | 1924 | dma_free_coherent(trans->dev, ICT_SIZE, |
10667136 | 1925 | trans_pcie->ict_tbl, |
0c325769 | 1926 | trans_pcie->ict_tbl_dma); |
10667136 JB |
1927 | trans_pcie->ict_tbl = NULL; |
1928 | trans_pcie->ict_tbl_dma = 0; | |
1a361cd8 EG |
1929 | } |
1930 | } | |
1931 | ||
10667136 JB |
1932 | /* |
1933 | * allocate dram shared table, it is an aligned memory | |
1934 | * block of ICT_SIZE. | |
1a361cd8 EG |
1935 | * also reset all data related to ICT table interrupt. |
1936 | */ | |
990aa6d7 | 1937 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
1a361cd8 | 1938 | { |
20d3b647 | 1939 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1940 | |
10667136 | 1941 | trans_pcie->ict_tbl = |
750afb08 LC |
1942 | dma_alloc_coherent(trans->dev, ICT_SIZE, |
1943 | &trans_pcie->ict_tbl_dma, GFP_KERNEL); | |
10667136 | 1944 | if (!trans_pcie->ict_tbl) |
1a361cd8 EG |
1945 | return -ENOMEM; |
1946 | ||
10667136 JB |
1947 | /* just an API sanity check ... it is guaranteed to be aligned */ |
1948 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | |
990aa6d7 | 1949 | iwl_pcie_free_ict(trans); |
10667136 JB |
1950 | return -EINVAL; |
1951 | } | |
1a361cd8 | 1952 | |
1a361cd8 EG |
1953 | return 0; |
1954 | } | |
1955 | ||
1956 | /* Device is going up inform it about using ICT interrupt table, | |
1957 | * also we need to tell the driver to start using ICT interrupt. | |
1958 | */ | |
990aa6d7 | 1959 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
1a361cd8 | 1960 | { |
20d3b647 | 1961 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1962 | u32 val; |
1a361cd8 | 1963 | |
10667136 | 1964 | if (!trans_pcie->ict_tbl) |
ed6a3803 | 1965 | return; |
1a361cd8 | 1966 | |
7b70bd63 | 1967 | spin_lock(&trans_pcie->irq_lock); |
f16c3ebf | 1968 | _iwl_disable_interrupts(trans); |
1a361cd8 | 1969 | |
10667136 | 1970 | memset(trans_pcie->ict_tbl, 0, ICT_SIZE); |
1a361cd8 | 1971 | |
10667136 | 1972 | val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; |
1a361cd8 | 1973 | |
18f5a374 EP |
1974 | val |= CSR_DRAM_INT_TBL_ENABLE | |
1975 | CSR_DRAM_INIT_TBL_WRAP_CHECK | | |
1976 | CSR_DRAM_INIT_TBL_WRITE_POINTER; | |
1a361cd8 | 1977 | |
10667136 | 1978 | IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); |
1a361cd8 | 1979 | |
1042db2a | 1980 | iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); |
0c325769 EG |
1981 | trans_pcie->use_ict = true; |
1982 | trans_pcie->ict_index = 0; | |
1042db2a | 1983 | iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); |
f16c3ebf | 1984 | _iwl_enable_interrupts(trans); |
7b70bd63 | 1985 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
1986 | } |
1987 | ||
1988 | /* Device is going down disable ict interrupt usage */ | |
990aa6d7 | 1989 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
1a361cd8 | 1990 | { |
20d3b647 | 1991 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1a361cd8 | 1992 | |
7b70bd63 | 1993 | spin_lock(&trans_pcie->irq_lock); |
0c325769 | 1994 | trans_pcie->use_ict = false; |
7b70bd63 | 1995 | spin_unlock(&trans_pcie->irq_lock); |
1a361cd8 EG |
1996 | } |
1997 | ||
85bf9da1 EG |
1998 | irqreturn_t iwl_pcie_isr(int irq, void *data) |
1999 | { | |
2000 | struct iwl_trans *trans = data; | |
2001 | ||
2002 | if (!trans) | |
2003 | return IRQ_NONE; | |
2004 | ||
2005 | /* Disable (but don't clear!) interrupts here to avoid | |
2006 | * back-to-back ISRs and sporadic interrupts from our NIC. | |
2007 | * If we have something to service, the tasklet will re-enable ints. | |
2008 | * If we *don't* have something, we'll re-enable before leaving here. | |
2009 | */ | |
2010 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | |
2011 | ||
a0f337cc | 2012 | return IRQ_WAKE_THREAD; |
85bf9da1 | 2013 | } |
2e5d4a8f HD |
2014 | |
2015 | irqreturn_t iwl_pcie_msix_isr(int irq, void *data) | |
2016 | { | |
2017 | return IRQ_WAKE_THREAD; | |
2018 | } | |
2019 | ||
2020 | irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) | |
2021 | { | |
2022 | struct msix_entry *entry = dev_id; | |
2023 | struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); | |
2024 | struct iwl_trans *trans = trans_pcie->trans; | |
46167a8f | 2025 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; |
2e5d4a8f HD |
2026 | u32 inta_fh, inta_hw; |
2027 | ||
2028 | lock_map_acquire(&trans->sync_cmd_lockdep_map); | |
2029 | ||
2030 | spin_lock(&trans_pcie->irq_lock); | |
7ef3dd26 HD |
2031 | inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); |
2032 | inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); | |
2e5d4a8f HD |
2033 | /* |
2034 | * Clear causes registers to avoid being handling the same cause. | |
2035 | */ | |
7ef3dd26 HD |
2036 | iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); |
2037 | iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); | |
2e5d4a8f HD |
2038 | spin_unlock(&trans_pcie->irq_lock); |
2039 | ||
c42ff65d JB |
2040 | trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); |
2041 | ||
2e5d4a8f HD |
2042 | if (unlikely(!(inta_fh | inta_hw))) { |
2043 | IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); | |
2044 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
2045 | return IRQ_NONE; | |
2046 | } | |
2047 | ||
2048 | if (iwl_have_debug_level(IWL_DL_ISR)) | |
2049 | IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", | |
2050 | inta_fh, | |
2051 | iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); | |
2052 | ||
496d83ca HD |
2053 | if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && |
2054 | inta_fh & MSIX_FH_INT_CAUSES_Q0) { | |
2055 | local_bh_disable(); | |
2056 | iwl_pcie_rx_handle(trans, 0); | |
2057 | local_bh_enable(); | |
2058 | } | |
2059 | ||
2060 | if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && | |
2061 | inta_fh & MSIX_FH_INT_CAUSES_Q1) { | |
2062 | local_bh_disable(); | |
2063 | iwl_pcie_rx_handle(trans, 1); | |
2064 | local_bh_enable(); | |
2065 | } | |
2066 | ||
2e5d4a8f HD |
2067 | /* This "Tx" DMA channel is used only for loading uCode */ |
2068 | if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { | |
2069 | IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); | |
2070 | isr_stats->tx++; | |
2071 | /* | |
2072 | * Wake up uCode load routine, | |
2073 | * now that load is complete | |
2074 | */ | |
2075 | trans_pcie->ucode_write_complete = true; | |
2076 | wake_up(&trans_pcie->ucode_write_waitq); | |
2077 | } | |
2078 | ||
2079 | /* Error detected by uCode */ | |
2080 | if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || | |
9b58419e GBA |
2081 | (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || |
2082 | (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { | |
2e5d4a8f HD |
2083 | IWL_ERR(trans, |
2084 | "Microcode SW error detected. Restarting 0x%X.\n", | |
2085 | inta_fh); | |
2086 | isr_stats->sw++; | |
2087 | iwl_pcie_irq_handle_error(trans); | |
2088 | } | |
2089 | ||
2090 | /* After checking FH register check HW register */ | |
2091 | if (iwl_have_debug_level(IWL_DL_ISR)) | |
2092 | IWL_DEBUG_ISR(trans, | |
2093 | "ISR inta_hw 0x%08x, enabled 0x%08x\n", | |
2094 | inta_hw, | |
2095 | iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); | |
2096 | ||
2097 | /* Alive notification via Rx interrupt will do the real work */ | |
2098 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { | |
2099 | IWL_DEBUG_ISR(trans, "Alive interrupt\n"); | |
2100 | isr_stats->alive++; | |
eda50cde SS |
2101 | if (trans->cfg->gen2) { |
2102 | /* We can restock, since firmware configured the RFH */ | |
2103 | iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); | |
2104 | } | |
2e5d4a8f HD |
2105 | } |
2106 | ||
9b58419e GBA |
2107 | if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 && |
2108 | inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { | |
2109 | /* Reflect IML transfer status */ | |
2110 | int res = iwl_read32(trans, CSR_IML_RESP_ADDR); | |
2111 | ||
2112 | IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); | |
2113 | if (res == IWL_IMAGE_RESP_FAIL) { | |
2114 | isr_stats->sw++; | |
2115 | iwl_pcie_irq_handle_error(trans); | |
2116 | } | |
2117 | } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { | |
2118 | /* uCode wakes up after power-down sleep */ | |
2e5d4a8f HD |
2119 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
2120 | iwl_pcie_rxq_check_wrptr(trans); | |
2121 | iwl_pcie_txq_check_wrptrs(trans); | |
2122 | ||
2123 | isr_stats->wakeup++; | |
2124 | } | |
2125 | ||
2126 | /* Chip got too hot and stopped itself */ | |
2127 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { | |
2128 | IWL_ERR(trans, "Microcode CT kill error detected.\n"); | |
2129 | isr_stats->ctkill++; | |
2130 | } | |
2131 | ||
2132 | /* HW RF KILL switch toggled */ | |
3a6e168b JB |
2133 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) |
2134 | iwl_pcie_handle_rfkill_irq(trans); | |
2e5d4a8f HD |
2135 | |
2136 | if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { | |
2137 | IWL_ERR(trans, | |
2138 | "Hardware error detected. Restarting.\n"); | |
2139 | ||
2140 | isr_stats->hw++; | |
2141 | iwl_pcie_irq_handle_error(trans); | |
2142 | } | |
2143 | ||
2144 | iwl_pcie_clear_irq(trans, entry); | |
2145 | ||
2146 | lock_map_release(&trans->sync_cmd_lockdep_map); | |
2147 | ||
2148 | return IRQ_HANDLED; | |
2149 | } |