1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *****************************************************************************/
31 #include <linux/sched.h>
32 #include <linux/wait.h>
33 #include <linux/gfp.h>
38 #include "iwl-op-mode.h"
40 /******************************************************************************
44 ******************************************************************************/
47 * Rx theory of operation
49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
50 * each of which point to Receive Buffers to be filled by the NIC. These get
51 * used not only for Rx frames, but for any command response or notification
52 * from the NIC. The driver and NIC manage the Rx buffers by means
53 * of indexes into the circular buffer.
56 * The host/firmware share two index registers for managing the Rx buffers.
58 * The READ index maps to the first position that the firmware may be writing
59 * to -- the driver can read up to (but not including) this position and get
61 * The READ index is managed by the firmware once the card is enabled.
63 * The WRITE index maps to the last position the driver has read from -- the
64 * position preceding WRITE is the last slot the firmware can place a packet.
66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
69 * During initialization, the host sets up the READ queue position to the first
70 * INDEX position, and WRITE to the last (READ - 1 wrapped)
72 * When the firmware places a packet in a buffer, it will advance the READ index
73 * and fire the RX interrupt. The driver can then query the READ index and
74 * process as many packets as possible, moving the WRITE index forward as it
75 * resets the Rx queue buffers with new memory.
77 * The management in the driver is as follows:
78 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
79 * When the interrupt handler is called, the request is processed.
80 * The page is either stolen - transferred to the upper layer
81 * or reused - added immediately to the iwl->rxq->rx_free list.
82 * + When the page is stolen - the driver updates the matching queue's used
83 * count, detaches the RBD and transfers it to the queue used list.
84 * When there are two used RBDs - they are transferred to the allocator empty
85 * list. Work is then scheduled for the allocator to start allocating
87 * When there are another 6 used RBDs - they are transferred to the allocator
88 * empty list and the driver tries to claim the pre-allocated buffers and
89 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
91 * When there are 8+ buffers in the free list - either from allocation or from
92 * 8 reused unstolen pages - restock is called to update the FW and indexes.
93 * + In order to make sure the allocator always has RBDs to use for allocation
94 * the allocator has initial pool in the size of num_queues*(8-2) - the
95 * maximum missing RBDs per allocation request (request posted with 2
96 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
97 * The queues supplies the recycle of the rest of the RBDs.
98 * + A received packet is processed and handed to the kernel network stack,
99 * detached from the iwl->rxq. The driver 'processed' index is updated.
100 * + If there are no allocated buffers in iwl->rxq->rx_free,
101 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
102 * If there were enough free buffers and RX_STALLED is set it is cleared.
107 * iwl_rxq_alloc() Allocates rx_free
108 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
109 * iwl_pcie_rxq_restock.
110 * Used only during initialization.
111 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
112 * queue, updates firmware pointers, and updates
114 * iwl_pcie_rx_allocator() Background work for allocating pages.
116 * -- enable interrupts --
117 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
118 * READ INDEX, detaching the SKB from the pool.
119 * Moves the packet buffer from queue to rx_used.
120 * Posts and claims requests to the allocator.
121 * Calls iwl_pcie_rxq_restock to refill any empty
127 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
129 * Regular Receive interrupt:
131 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
132 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
134 * rxq.queue -> rxq.rx_free -> rxq.queue
140 * iwl_rxq_space - Return number of free slots available in queue.
142 static int iwl_rxq_space(const struct iwl_rxq
*rxq
)
144 /* Make sure rx queue size is a power of 2 */
145 WARN_ON(rxq
->queue_size
& (rxq
->queue_size
- 1));
148 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
149 * between empty and completely full queues.
150 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
151 * defined for negative dividends.
153 return (rxq
->read
- rxq
->write
- 1) & (rxq
->queue_size
- 1);
157 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
159 static inline __le32
iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr
)
161 return cpu_to_le32((u32
)(dma_addr
>> 8));
164 static void iwl_pcie_write_prph_64(struct iwl_trans
*trans
, u64 ofs
, u64 val
)
166 iwl_write_prph(trans
, ofs
, val
& 0xffffffff);
167 iwl_write_prph(trans
, ofs
+ 4, val
>> 32);
171 * iwl_pcie_rx_stop - stops the Rx DMA
173 int iwl_pcie_rx_stop(struct iwl_trans
*trans
)
175 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
176 return iwl_poll_direct_bit(trans
, FH_MEM_RSSR_RX_STATUS_REG
,
177 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
, 1000);
181 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
183 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans
*trans
,
188 lockdep_assert_held(&rxq
->lock
);
191 * explicitly wake up the NIC if:
192 * 1. shadow registers aren't enabled
193 * 2. there is a chance that the NIC is asleep
195 if (!trans
->cfg
->base_params
->shadow_reg_enable
&&
196 test_bit(STATUS_TPOWER_PMI
, &trans
->status
)) {
197 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
199 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
200 IWL_DEBUG_INFO(trans
, "Rx queue requesting wakeup, GP1 = 0x%x\n",
202 iwl_set_bit(trans
, CSR_GP_CNTRL
,
203 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
204 rxq
->need_update
= true;
209 rxq
->write_actual
= round_down(rxq
->write
, 8);
210 if (trans
->cfg
->mq_rx_supported
)
211 iwl_write_prph(trans
, RFH_Q_FRBDCB_WIDX(rxq
->id
),
214 iwl_write32(trans
, FH_RSCSR_CHNL0_WPTR
, rxq
->write_actual
);
217 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans
*trans
)
219 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
222 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
223 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
225 if (!rxq
->need_update
)
227 spin_lock(&rxq
->lock
);
228 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
229 rxq
->need_update
= false;
230 spin_unlock(&rxq
->lock
);
235 * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx
237 static void iwl_pcie_rxq_mq_restock(struct iwl_trans
*trans
,
240 struct iwl_rx_mem_buffer
*rxb
;
243 * If the device isn't enabled - no need to try to add buffers...
244 * This can happen when we stop the device and still have an interrupt
245 * pending. We stop the APM before we sync the interrupts because we
246 * have to (see comment there). On the other hand, since the APM is
247 * stopped, we cannot access the HW (in particular not prph).
248 * So don't try to restock if the APM has been already stopped.
250 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
253 spin_lock(&rxq
->lock
);
254 while (rxq
->free_count
) {
255 __le64
*bd
= (__le64
*)rxq
->bd
;
257 /* Get next free Rx buffer, remove from free list */
258 rxb
= list_first_entry(&rxq
->rx_free
, struct iwl_rx_mem_buffer
,
260 list_del(&rxb
->list
);
262 /* 12 first bits are expected to be empty */
263 WARN_ON(rxb
->page_dma
& DMA_BIT_MASK(12));
264 /* Point to Rx buffer via next RBD in circular buffer */
265 bd
[rxq
->write
] = cpu_to_le64(rxb
->page_dma
| rxb
->vid
);
266 rxq
->write
= (rxq
->write
+ 1) & MQ_RX_TABLE_MASK
;
269 spin_unlock(&rxq
->lock
);
272 * If we've added more space for the firmware to place data, tell it.
273 * Increment device's write pointer in multiples of 8.
275 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
276 spin_lock(&rxq
->lock
);
277 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
278 spin_unlock(&rxq
->lock
);
283 * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx
285 static void iwl_pcie_rxq_sq_restock(struct iwl_trans
*trans
,
288 struct iwl_rx_mem_buffer
*rxb
;
291 * If the device isn't enabled - not need to try to add buffers...
292 * This can happen when we stop the device and still have an interrupt
293 * pending. We stop the APM before we sync the interrupts because we
294 * have to (see comment there). On the other hand, since the APM is
295 * stopped, we cannot access the HW (in particular not prph).
296 * So don't try to restock if the APM has been already stopped.
298 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
301 spin_lock(&rxq
->lock
);
302 while ((iwl_rxq_space(rxq
) > 0) && (rxq
->free_count
)) {
303 __le32
*bd
= (__le32
*)rxq
->bd
;
304 /* The overwritten rxb must be a used one */
305 rxb
= rxq
->queue
[rxq
->write
];
306 BUG_ON(rxb
&& rxb
->page
);
308 /* Get next free Rx buffer, remove from free list */
309 rxb
= list_first_entry(&rxq
->rx_free
, struct iwl_rx_mem_buffer
,
311 list_del(&rxb
->list
);
313 /* Point to Rx buffer via next RBD in circular buffer */
314 bd
[rxq
->write
] = iwl_pcie_dma_addr2rbd_ptr(rxb
->page_dma
);
315 rxq
->queue
[rxq
->write
] = rxb
;
316 rxq
->write
= (rxq
->write
+ 1) & RX_QUEUE_MASK
;
319 spin_unlock(&rxq
->lock
);
321 /* If we've added more space for the firmware to place data, tell it.
322 * Increment device's write pointer in multiples of 8. */
323 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
324 spin_lock(&rxq
->lock
);
325 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
326 spin_unlock(&rxq
->lock
);
331 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
333 * If there are slots in the RX queue that need to be restocked,
334 * and we have free pre-allocated buffers, fill the ranks as much
335 * as we can, pulling from rx_free.
337 * This moves the 'write' index forward to catch up with 'processed', and
338 * also updates the memory address in the firmware to reference the new
342 void iwl_pcie_rxq_restock(struct iwl_trans
*trans
, struct iwl_rxq
*rxq
)
344 if (trans
->cfg
->mq_rx_supported
)
345 iwl_pcie_rxq_mq_restock(trans
, rxq
);
347 iwl_pcie_rxq_sq_restock(trans
, rxq
);
351 * iwl_pcie_rx_alloc_page - allocates and returns a page.
354 static struct page
*iwl_pcie_rx_alloc_page(struct iwl_trans
*trans
,
357 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
359 gfp_t gfp_mask
= priority
;
361 if (trans_pcie
->rx_page_order
> 0)
362 gfp_mask
|= __GFP_COMP
;
364 /* Alloc a new receive buffer */
365 page
= alloc_pages(gfp_mask
, trans_pcie
->rx_page_order
);
368 IWL_DEBUG_INFO(trans
, "alloc_pages failed, order: %d\n",
369 trans_pcie
->rx_page_order
);
371 * Issue an error if we don't have enough pre-allocated
374 if (!(gfp_mask
& __GFP_NOWARN
) && net_ratelimit())
376 "Failed to alloc_pages\n");
383 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
385 * A used RBD is an Rx buffer that has been given to the stack. To use it again
386 * a page must be allocated and the RBD must point to the page. This function
387 * doesn't change the HW pointer but handles the list of pages that is used by
388 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
391 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans
*trans
, gfp_t priority
,
394 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
395 struct iwl_rx_mem_buffer
*rxb
;
399 spin_lock(&rxq
->lock
);
400 if (list_empty(&rxq
->rx_used
)) {
401 spin_unlock(&rxq
->lock
);
404 spin_unlock(&rxq
->lock
);
406 /* Alloc a new receive buffer */
407 page
= iwl_pcie_rx_alloc_page(trans
, priority
);
411 spin_lock(&rxq
->lock
);
413 if (list_empty(&rxq
->rx_used
)) {
414 spin_unlock(&rxq
->lock
);
415 __free_pages(page
, trans_pcie
->rx_page_order
);
418 rxb
= list_first_entry(&rxq
->rx_used
, struct iwl_rx_mem_buffer
,
420 list_del(&rxb
->list
);
421 spin_unlock(&rxq
->lock
);
425 /* Get physical address of the RB */
427 dma_map_page(trans
->dev
, page
, 0,
428 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
430 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
432 spin_lock(&rxq
->lock
);
433 list_add(&rxb
->list
, &rxq
->rx_used
);
434 spin_unlock(&rxq
->lock
);
435 __free_pages(page
, trans_pcie
->rx_page_order
);
439 spin_lock(&rxq
->lock
);
441 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
444 spin_unlock(&rxq
->lock
);
448 static void iwl_pcie_free_rbs_pool(struct iwl_trans
*trans
)
450 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
453 for (i
= 0; i
< RX_POOL_SIZE
; i
++) {
454 if (!trans_pcie
->rx_pool
[i
].page
)
456 dma_unmap_page(trans
->dev
, trans_pcie
->rx_pool
[i
].page_dma
,
457 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
459 __free_pages(trans_pcie
->rx_pool
[i
].page
,
460 trans_pcie
->rx_page_order
);
461 trans_pcie
->rx_pool
[i
].page
= NULL
;
466 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
468 * Allocates for each received request 8 pages
469 * Called as a scheduled work item.
471 static void iwl_pcie_rx_allocator(struct iwl_trans
*trans
)
473 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
474 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
475 struct list_head local_empty
;
476 int pending
= atomic_xchg(&rba
->req_pending
, 0);
478 IWL_DEBUG_RX(trans
, "Pending allocation requests = %d\n", pending
);
480 /* If we were scheduled - there is at least one request */
481 spin_lock(&rba
->lock
);
482 /* swap out the rba->rbd_empty to a local list */
483 list_replace_init(&rba
->rbd_empty
, &local_empty
);
484 spin_unlock(&rba
->lock
);
488 struct list_head local_allocated
;
489 gfp_t gfp_mask
= GFP_KERNEL
;
491 /* Do not post a warning if there are only a few requests */
492 if (pending
< RX_PENDING_WATERMARK
)
493 gfp_mask
|= __GFP_NOWARN
;
495 INIT_LIST_HEAD(&local_allocated
);
497 for (i
= 0; i
< RX_CLAIM_REQ_ALLOC
;) {
498 struct iwl_rx_mem_buffer
*rxb
;
501 /* List should never be empty - each reused RBD is
502 * returned to the list, and initial pool covers any
503 * possible gap between the time the page is allocated
504 * to the time the RBD is added.
506 BUG_ON(list_empty(&local_empty
));
507 /* Get the first rxb from the rbd list */
508 rxb
= list_first_entry(&local_empty
,
509 struct iwl_rx_mem_buffer
, list
);
512 /* Alloc a new receive buffer */
513 page
= iwl_pcie_rx_alloc_page(trans
, gfp_mask
);
518 /* Get physical address of the RB */
519 rxb
->page_dma
= dma_map_page(trans
->dev
, page
, 0,
520 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
522 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
524 __free_pages(page
, trans_pcie
->rx_page_order
);
528 /* move the allocated entry to the out list */
529 list_move(&rxb
->list
, &local_allocated
);
535 pending
= atomic_xchg(&rba
->req_pending
, 0);
537 "Pending allocation requests = %d\n",
541 spin_lock(&rba
->lock
);
542 /* add the allocated rbds to the allocator allocated list */
543 list_splice_tail(&local_allocated
, &rba
->rbd_allocated
);
544 /* get more empty RBDs for current pending requests */
545 list_splice_tail_init(&rba
->rbd_empty
, &local_empty
);
546 spin_unlock(&rba
->lock
);
548 atomic_inc(&rba
->req_ready
);
551 spin_lock(&rba
->lock
);
552 /* return unused rbds to the allocator empty list */
553 list_splice_tail(&local_empty
, &rba
->rbd_empty
);
554 spin_unlock(&rba
->lock
);
558 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
560 .* Called by queue when the queue posted allocation request and
561 * has freed 8 RBDs in order to restock itself.
562 * This function directly moves the allocated RBs to the queue's ownership
563 * and updates the relevant counters.
565 static void iwl_pcie_rx_allocator_get(struct iwl_trans
*trans
,
568 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
569 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
572 lockdep_assert_held(&rxq
->lock
);
575 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
576 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
577 * function will return early, as there are no ready requests.
578 * atomic_dec_if_positive will perofrm the *actual* decrement only if
579 * req_ready > 0, i.e. - there are ready requests and the function
580 * hands one request to the caller.
582 if (atomic_dec_if_positive(&rba
->req_ready
) < 0)
585 spin_lock(&rba
->lock
);
586 for (i
= 0; i
< RX_CLAIM_REQ_ALLOC
; i
++) {
587 /* Get next free Rx buffer, remove it from free list */
588 struct iwl_rx_mem_buffer
*rxb
=
589 list_first_entry(&rba
->rbd_allocated
,
590 struct iwl_rx_mem_buffer
, list
);
592 list_move(&rxb
->list
, &rxq
->rx_free
);
594 spin_unlock(&rba
->lock
);
596 rxq
->used_count
-= RX_CLAIM_REQ_ALLOC
;
597 rxq
->free_count
+= RX_CLAIM_REQ_ALLOC
;
600 static void iwl_pcie_rx_allocator_work(struct work_struct
*data
)
602 struct iwl_rb_allocator
*rba_p
=
603 container_of(data
, struct iwl_rb_allocator
, rx_alloc
);
604 struct iwl_trans_pcie
*trans_pcie
=
605 container_of(rba_p
, struct iwl_trans_pcie
, rba
);
607 iwl_pcie_rx_allocator(trans_pcie
->trans
);
610 static int iwl_pcie_rx_alloc(struct iwl_trans
*trans
)
612 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
613 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
614 struct device
*dev
= trans
->dev
;
616 int free_size
= trans
->cfg
->mq_rx_supported
? sizeof(__le64
) :
619 if (WARN_ON(trans_pcie
->rxq
))
622 trans_pcie
->rxq
= kcalloc(trans
->num_rx_queues
, sizeof(struct iwl_rxq
),
624 if (!trans_pcie
->rxq
)
627 spin_lock_init(&rba
->lock
);
629 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
630 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
632 spin_lock_init(&rxq
->lock
);
633 if (trans
->cfg
->mq_rx_supported
)
634 rxq
->queue_size
= MQ_RX_TABLE_SIZE
;
636 rxq
->queue_size
= RX_QUEUE_SIZE
;
639 * Allocate the circular buffer of Read Buffer Descriptors
642 rxq
->bd
= dma_zalloc_coherent(dev
,
643 free_size
* rxq
->queue_size
,
644 &rxq
->bd_dma
, GFP_KERNEL
);
648 if (trans
->cfg
->mq_rx_supported
) {
649 rxq
->used_bd
= dma_zalloc_coherent(dev
,
658 /*Allocate the driver's pointer to receive buffer status */
659 rxq
->rb_stts
= dma_zalloc_coherent(dev
, sizeof(*rxq
->rb_stts
),
668 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
669 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
672 dma_free_coherent(dev
, free_size
* rxq
->queue_size
,
673 rxq
->bd
, rxq
->bd_dma
);
678 dma_free_coherent(trans
->dev
,
679 sizeof(struct iwl_rb_status
),
680 rxq
->rb_stts
, rxq
->rb_stts_dma
);
683 dma_free_coherent(dev
, sizeof(__le32
) * rxq
->queue_size
,
684 rxq
->used_bd
, rxq
->used_bd_dma
);
685 rxq
->used_bd_dma
= 0;
688 kfree(trans_pcie
->rxq
);
693 static void iwl_pcie_rx_hw_init(struct iwl_trans
*trans
, struct iwl_rxq
*rxq
)
695 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
697 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
699 switch (trans_pcie
->rx_buf_size
) {
701 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
704 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
707 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K
;
711 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
715 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
716 /* reset and flush pointers */
717 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_RBDCB_WPTR
, 0);
718 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ
, 0);
719 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RDPTR
, 0);
721 /* Reset driver's Rx queue write index */
722 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
724 /* Tell device where to find RBD circular buffer in DRAM */
725 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
726 (u32
)(rxq
->bd_dma
>> 8));
728 /* Tell device where in DRAM to update its Rx status */
729 iwl_write_direct32(trans
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
730 rxq
->rb_stts_dma
>> 4);
733 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
734 * the credit mechanism in 5000 HW RX FIFO
735 * Direct rx interrupts to hosts
736 * Rx buffer size 4 or 8k or 12k
740 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
741 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
742 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
743 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
745 (RX_RB_TIMEOUT
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
)|
746 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
748 /* Set interrupt coalescing timer to default (2048 usecs) */
749 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
751 /* W/A for interrupt coalescing bug in 7260 and 3160 */
752 if (trans
->cfg
->host_interrupt_operation_mode
)
753 iwl_set_bit(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_OPER_MODE
);
756 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans
*trans
)
758 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
759 u32 rb_size
, enabled
= 0;
762 switch (trans_pcie
->rx_buf_size
) {
764 rb_size
= RFH_RXF_DMA_RB_SIZE_4K
;
767 rb_size
= RFH_RXF_DMA_RB_SIZE_8K
;
770 rb_size
= RFH_RXF_DMA_RB_SIZE_12K
;
774 rb_size
= RFH_RXF_DMA_RB_SIZE_4K
;
778 iwl_write_prph(trans
, RFH_RXF_DMA_CFG
, 0);
779 /* disable free amd used rx queue operation */
780 iwl_write_prph(trans
, RFH_RXF_RXQ_ACTIVE
, 0);
782 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
783 /* Tell device where to find RBD free table in DRAM */
784 iwl_pcie_write_prph_64(trans
, RFH_Q_FRBDCB_BA_LSB(i
),
785 (u64
)(trans_pcie
->rxq
[i
].bd_dma
));
786 /* Tell device where to find RBD used table in DRAM */
787 iwl_pcie_write_prph_64(trans
, RFH_Q_URBDCB_BA_LSB(i
),
788 (u64
)(trans_pcie
->rxq
[i
].used_bd_dma
));
789 /* Tell device where in DRAM to update its Rx status */
790 iwl_pcie_write_prph_64(trans
, RFH_Q_URBD_STTS_WPTR_LSB(i
),
791 trans_pcie
->rxq
[i
].rb_stts_dma
);
792 /* Reset device indice tables */
793 iwl_write_prph(trans
, RFH_Q_FRBDCB_WIDX(i
), 0);
794 iwl_write_prph(trans
, RFH_Q_FRBDCB_RIDX(i
), 0);
795 iwl_write_prph(trans
, RFH_Q_URBDCB_WIDX(i
), 0);
797 enabled
|= BIT(i
) | BIT(i
+ 16);
800 /* restock default queue */
801 iwl_pcie_rxq_mq_restock(trans
, &trans_pcie
->rxq
[0]);
806 * Rx buffer size 4 or 8k or 12k
808 * Drop frames that exceed RB size
811 iwl_write_prph(trans
, RFH_RXF_DMA_CFG
,
812 RFH_DMA_EN_ENABLE_VAL
|
813 rb_size
| RFH_RXF_DMA_SINGLE_FRAME_MASK
|
814 RFH_RXF_DMA_MIN_RB_4_8
|
815 RFH_RXF_DMA_DROP_TOO_LARGE_MASK
|
816 RFH_RXF_DMA_RBDCB_SIZE_512
);
819 * Activate DMA snooping.
820 * Set RX DMA chunk size to 64B
823 iwl_write_prph(trans
, RFH_GEN_CFG
, RFH_GEN_CFG_RFH_DMA_SNOOP
|
824 (DEFAULT_RXQ_NUM
<< RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS
) |
825 RFH_GEN_CFG_SERVICE_DMA_SNOOP
);
826 /* Enable the relevant rx queues */
827 iwl_write_prph(trans
, RFH_RXF_RXQ_ACTIVE
, enabled
);
829 /* Set interrupt coalescing timer to default (2048 usecs) */
830 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
833 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq
*rxq
)
835 lockdep_assert_held(&rxq
->lock
);
837 INIT_LIST_HEAD(&rxq
->rx_free
);
838 INIT_LIST_HEAD(&rxq
->rx_used
);
843 static int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
)
849 int iwl_pcie_rx_init(struct iwl_trans
*trans
)
851 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
852 struct iwl_rxq
*def_rxq
;
853 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
854 int i
, err
, queue_size
, allocator_pool_size
, num_alloc
;
856 if (!trans_pcie
->rxq
) {
857 err
= iwl_pcie_rx_alloc(trans
);
861 def_rxq
= trans_pcie
->rxq
;
863 rba
->alloc_wq
= alloc_workqueue("rb_allocator",
864 WQ_HIGHPRI
| WQ_UNBOUND
, 1);
865 INIT_WORK(&rba
->rx_alloc
, iwl_pcie_rx_allocator_work
);
867 spin_lock(&rba
->lock
);
868 atomic_set(&rba
->req_pending
, 0);
869 atomic_set(&rba
->req_ready
, 0);
870 INIT_LIST_HEAD(&rba
->rbd_allocated
);
871 INIT_LIST_HEAD(&rba
->rbd_empty
);
872 spin_unlock(&rba
->lock
);
874 /* free all first - we might be reconfigured for a different size */
875 iwl_pcie_free_rbs_pool(trans
);
877 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
878 def_rxq
->queue
[i
] = NULL
;
880 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
881 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
885 spin_lock(&rxq
->lock
);
887 * Set read write pointer to reflect that we have processed
888 * and used all buffers, but have not restocked the Rx queue
893 rxq
->write_actual
= 0;
894 memset(rxq
->rb_stts
, 0, sizeof(*rxq
->rb_stts
));
896 iwl_pcie_rx_init_rxb_lists(rxq
);
899 netif_napi_add(&trans_pcie
->napi_dev
, &rxq
->napi
,
900 iwl_pcie_dummy_napi_poll
, 64);
902 spin_unlock(&rxq
->lock
);
905 /* move the pool to the default queue and allocator ownerships */
906 queue_size
= trans
->cfg
->mq_rx_supported
?
907 MQ_RX_NUM_RBDS
: RX_QUEUE_SIZE
;
908 allocator_pool_size
= trans
->num_rx_queues
*
909 (RX_CLAIM_REQ_ALLOC
- RX_POST_REQ_ALLOC
);
910 num_alloc
= queue_size
+ allocator_pool_size
;
911 for (i
= 0; i
< num_alloc
; i
++) {
912 struct iwl_rx_mem_buffer
*rxb
= &trans_pcie
->rx_pool
[i
];
914 if (i
< allocator_pool_size
)
915 list_add(&rxb
->list
, &rba
->rbd_empty
);
917 list_add(&rxb
->list
, &def_rxq
->rx_used
);
918 trans_pcie
->global_table
[i
] = rxb
;
922 iwl_pcie_rxq_alloc_rbs(trans
, GFP_KERNEL
, def_rxq
);
923 if (trans
->cfg
->mq_rx_supported
) {
924 iwl_pcie_rx_mq_hw_init(trans
);
926 iwl_pcie_rxq_sq_restock(trans
, def_rxq
);
927 iwl_pcie_rx_hw_init(trans
, def_rxq
);
930 spin_lock(&def_rxq
->lock
);
931 iwl_pcie_rxq_inc_wr_ptr(trans
, def_rxq
);
932 spin_unlock(&def_rxq
->lock
);
937 void iwl_pcie_rx_free(struct iwl_trans
*trans
)
939 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
940 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
941 int free_size
= trans
->cfg
->mq_rx_supported
? sizeof(__le64
) :
946 * if rxq is NULL, it means that nothing has been allocated,
949 if (!trans_pcie
->rxq
) {
950 IWL_DEBUG_INFO(trans
, "Free NULL rx context\n");
954 cancel_work_sync(&rba
->rx_alloc
);
956 destroy_workqueue(rba
->alloc_wq
);
957 rba
->alloc_wq
= NULL
;
960 iwl_pcie_free_rbs_pool(trans
);
962 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
963 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
966 dma_free_coherent(trans
->dev
,
967 free_size
* rxq
->queue_size
,
968 rxq
->bd
, rxq
->bd_dma
);
973 dma_free_coherent(trans
->dev
,
974 sizeof(struct iwl_rb_status
),
975 rxq
->rb_stts
, rxq
->rb_stts_dma
);
977 IWL_DEBUG_INFO(trans
,
978 "Free rxq->rb_stts which is NULL\n");
981 dma_free_coherent(trans
->dev
,
982 sizeof(__le32
) * rxq
->queue_size
,
983 rxq
->used_bd
, rxq
->used_bd_dma
);
984 rxq
->used_bd_dma
= 0;
988 netif_napi_del(&rxq
->napi
);
990 kfree(trans_pcie
->rxq
);
994 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
996 * Called when a RBD can be reused. The RBD is transferred to the allocator.
997 * When there are 2 empty RBDs - a request for allocation is posted
999 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans
*trans
,
1000 struct iwl_rx_mem_buffer
*rxb
,
1001 struct iwl_rxq
*rxq
, bool emergency
)
1003 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1004 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1006 /* Move the RBD to the used list, will be moved to allocator in batches
1007 * before claiming or posting a request*/
1008 list_add_tail(&rxb
->list
, &rxq
->rx_used
);
1010 if (unlikely(emergency
))
1013 /* Count the allocator owned RBDs */
1016 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1017 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1018 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1019 * after but we still need to post another request.
1021 if ((rxq
->used_count
% RX_CLAIM_REQ_ALLOC
) == RX_POST_REQ_ALLOC
) {
1022 /* Move the 2 RBDs to the allocator ownership.
1023 Allocator has another 6 from pool for the request completion*/
1024 spin_lock(&rba
->lock
);
1025 list_splice_tail_init(&rxq
->rx_used
, &rba
->rbd_empty
);
1026 spin_unlock(&rba
->lock
);
1028 atomic_inc(&rba
->req_pending
);
1029 queue_work(rba
->alloc_wq
, &rba
->rx_alloc
);
1033 static void iwl_pcie_rx_handle_rb(struct iwl_trans
*trans
,
1034 struct iwl_rxq
*rxq
,
1035 struct iwl_rx_mem_buffer
*rxb
,
1038 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1039 struct iwl_txq
*txq
= &trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1040 bool page_stolen
= false;
1041 int max_len
= PAGE_SIZE
<< trans_pcie
->rx_page_order
;
1047 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
, DMA_FROM_DEVICE
);
1049 while (offset
+ sizeof(u32
) + sizeof(struct iwl_cmd_header
) < max_len
) {
1050 struct iwl_rx_packet
*pkt
;
1053 int index
, cmd_index
, len
;
1054 struct iwl_rx_cmd_buffer rxcb
= {
1056 ._rx_page_order
= trans_pcie
->rx_page_order
,
1058 ._page_stolen
= false,
1059 .truesize
= max_len
,
1062 pkt
= rxb_addr(&rxcb
);
1064 if (pkt
->len_n_flags
== cpu_to_le32(FH_RSCSR_FRAME_INVALID
))
1068 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
1070 iwl_get_cmd_string(trans
,
1071 iwl_cmd_id(pkt
->hdr
.cmd
,
1074 pkt
->hdr
.cmd
, le16_to_cpu(pkt
->hdr
.sequence
));
1076 len
= iwl_rx_packet_len(pkt
);
1077 len
+= sizeof(u32
); /* account for status word */
1078 trace_iwlwifi_dev_rx(trans
->dev
, trans
, pkt
, len
);
1079 trace_iwlwifi_dev_rx_data(trans
->dev
, trans
, pkt
, len
);
1081 /* Reclaim a command buffer only if this packet is a response
1082 * to a (driver-originated) command.
1083 * If the packet (e.g. Rx frame) originated from uCode,
1084 * there is no command buffer to reclaim.
1085 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1086 * but apparently a few don't get set; catch them here. */
1087 reclaim
= !(pkt
->hdr
.sequence
& SEQ_RX_FRAME
);
1091 for (i
= 0; i
< trans_pcie
->n_no_reclaim_cmds
; i
++) {
1092 if (trans_pcie
->no_reclaim_cmds
[i
] ==
1100 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1101 index
= SEQ_TO_INDEX(sequence
);
1102 cmd_index
= get_cmd_index(&txq
->q
, index
);
1105 iwl_op_mode_rx(trans
->op_mode
, &rxq
->napi
,
1108 iwl_op_mode_rx_rss(trans
->op_mode
, &rxq
->napi
,
1112 kzfree(txq
->entries
[cmd_index
].free_buf
);
1113 txq
->entries
[cmd_index
].free_buf
= NULL
;
1117 * After here, we should always check rxcb._page_stolen,
1118 * if it is true then one of the handlers took the page.
1122 /* Invoke any callbacks, transfer the buffer to caller,
1123 * and fire off the (possibly) blocking
1124 * iwl_trans_send_cmd()
1125 * as we reclaim the driver command queue */
1126 if (!rxcb
._page_stolen
)
1127 iwl_pcie_hcmd_complete(trans
, &rxcb
);
1129 IWL_WARN(trans
, "Claim null rxb?\n");
1132 page_stolen
|= rxcb
._page_stolen
;
1133 offset
+= ALIGN(len
, FH_RSCSR_FRAME_ALIGN
);
1136 /* page was stolen from us -- free our reference */
1138 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
1142 /* Reuse the page if possible. For notification packets and
1143 * SKBs that fail to Rx correctly, add them back into the
1144 * rx_free list for reuse later. */
1145 if (rxb
->page
!= NULL
) {
1147 dma_map_page(trans
->dev
, rxb
->page
, 0,
1148 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
1150 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
1152 * free the page(s) as well to not break
1153 * the invariant that the items on the used
1154 * list have no page(s)
1156 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
1158 iwl_pcie_rx_reuse_rbd(trans
, rxb
, rxq
, emergency
);
1160 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
1164 iwl_pcie_rx_reuse_rbd(trans
, rxb
, rxq
, emergency
);
1168 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1170 static void iwl_pcie_rx_handle(struct iwl_trans
*trans
, int queue
)
1172 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1173 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[queue
];
1174 u32 r
, i
, count
= 0;
1175 bool emergency
= false;
1178 spin_lock(&rxq
->lock
);
1179 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1180 * buffer that the driver may process (last buffer filled by ucode). */
1181 r
= le16_to_cpu(ACCESS_ONCE(rxq
->rb_stts
->closed_rb_num
)) & 0x0FFF;
1184 /* W/A 9000 device step A0 wrap-around bug */
1185 r
&= (rxq
->queue_size
- 1);
1187 /* Rx interrupt, but nothing sent from uCode */
1189 IWL_DEBUG_RX(trans
, "Q %d: HW = SW = %d\n", rxq
->id
, r
);
1192 struct iwl_rx_mem_buffer
*rxb
;
1194 if (unlikely(rxq
->used_count
== rxq
->queue_size
/ 2))
1197 if (trans
->cfg
->mq_rx_supported
) {
1199 * used_bd is a 32 bit but only 12 are used to retrieve
1202 u16 vid
= le32_to_cpu(rxq
->used_bd
[i
]) & 0x0FFF;
1204 if (WARN(vid
>= ARRAY_SIZE(trans_pcie
->global_table
),
1205 "Invalid rxb index from HW %u\n", (u32
)vid
))
1207 rxb
= trans_pcie
->global_table
[vid
];
1209 rxb
= rxq
->queue
[i
];
1210 rxq
->queue
[i
] = NULL
;
1213 IWL_DEBUG_RX(trans
, "Q %d: HW = %d, SW = %d\n", rxq
->id
, r
, i
);
1214 iwl_pcie_rx_handle_rb(trans
, rxq
, rxb
, emergency
);
1216 i
= (i
+ 1) & (rxq
->queue_size
- 1);
1219 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1220 * try to claim the pre-allocated buffers from the allocator.
1221 * If not ready - will try to reclaim next time.
1222 * There is no need to reschedule work - allocator exits only
1225 if (rxq
->used_count
>= RX_CLAIM_REQ_ALLOC
)
1226 iwl_pcie_rx_allocator_get(trans
, rxq
);
1228 if (rxq
->used_count
% RX_CLAIM_REQ_ALLOC
== 0 && !emergency
) {
1229 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1231 /* Add the remaining empty RBDs for allocator use */
1232 spin_lock(&rba
->lock
);
1233 list_splice_tail_init(&rxq
->rx_used
, &rba
->rbd_empty
);
1234 spin_unlock(&rba
->lock
);
1235 } else if (emergency
) {
1239 if (rxq
->used_count
< rxq
->queue_size
/ 3)
1243 spin_unlock(&rxq
->lock
);
1244 iwl_pcie_rxq_alloc_rbs(trans
, GFP_ATOMIC
, rxq
);
1245 iwl_pcie_rxq_restock(trans
, rxq
);
1251 /* Backtrack one entry */
1253 spin_unlock(&rxq
->lock
);
1256 * handle a case where in emergency there are some unallocated RBDs.
1257 * those RBDs are in the used list, but are not tracked by the queue's
1258 * used_count which counts allocator owned RBDs.
1259 * unallocated emergency RBDs must be allocated on exit, otherwise
1260 * when called again the function may not be in emergency mode and
1261 * they will be handed to the allocator with no tracking in the RBD
1262 * allocator counters, which will lead to them never being claimed back
1264 * by allocating them here, they are now in the queue free list, and
1265 * will be restocked by the next call of iwl_pcie_rxq_restock.
1267 if (unlikely(emergency
&& count
))
1268 iwl_pcie_rxq_alloc_rbs(trans
, GFP_ATOMIC
, rxq
);
1271 napi_gro_flush(&rxq
->napi
, false);
1273 iwl_pcie_rxq_restock(trans
, rxq
);
1276 static struct iwl_trans_pcie
*iwl_pcie_get_trans_pcie(struct msix_entry
*entry
)
1278 u8 queue
= entry
->entry
;
1279 struct msix_entry
*entries
= entry
- queue
;
1281 return container_of(entries
, struct iwl_trans_pcie
, msix_entries
[0]);
1284 static inline void iwl_pcie_clear_irq(struct iwl_trans
*trans
,
1285 struct msix_entry
*entry
)
1288 * Before sending the interrupt the HW disables it to prevent
1289 * a nested interrupt. This is done by writing 1 to the corresponding
1290 * bit in the mask register. After handling the interrupt, it should be
1291 * re-enabled by clearing this bit. This register is defined as
1292 * write 1 clear (W1C) register, meaning that it's being clear
1293 * by writing 1 to the bit.
1295 iwl_write_direct32(trans
, CSR_MSIX_AUTOMASK_ST_AD
, BIT(entry
->entry
));
1299 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1300 * This interrupt handler should be used with RSS queue only.
1302 irqreturn_t
iwl_pcie_irq_rx_msix_handler(int irq
, void *dev_id
)
1304 struct msix_entry
*entry
= dev_id
;
1305 struct iwl_trans_pcie
*trans_pcie
= iwl_pcie_get_trans_pcie(entry
);
1306 struct iwl_trans
*trans
= trans_pcie
->trans
;
1308 if (WARN_ON(entry
->entry
>= trans
->num_rx_queues
))
1311 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1314 iwl_pcie_rx_handle(trans
, entry
->entry
);
1317 iwl_pcie_clear_irq(trans
, entry
);
1319 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1325 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1327 static void iwl_pcie_irq_handle_error(struct iwl_trans
*trans
)
1329 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1332 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1333 if (trans
->cfg
->internal_wimax_coex
&&
1334 !trans
->cfg
->apmg_not_supported
&&
1335 (!(iwl_read_prph(trans
, APMG_CLK_CTRL_REG
) &
1336 APMS_CLK_VAL_MRB_FUNC_MODE
) ||
1337 (iwl_read_prph(trans
, APMG_PS_CTRL_REG
) &
1338 APMG_PS_CTRL_VAL_RESET_REQ
))) {
1339 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1340 iwl_op_mode_wimax_active(trans
->op_mode
);
1341 wake_up(&trans_pcie
->wait_command_queue
);
1345 iwl_pcie_dump_csr(trans
);
1346 iwl_dump_fh(trans
, NULL
);
1349 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1350 * before we wake up the command caller, to ensure a proper cleanup. */
1351 iwl_trans_fw_error(trans
);
1354 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++)
1355 del_timer(&trans_pcie
->txq
[i
].stuck_timer
);
1357 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1358 wake_up(&trans_pcie
->wait_command_queue
);
1361 static u32
iwl_pcie_int_cause_non_ict(struct iwl_trans
*trans
)
1365 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans
)->irq_lock
);
1367 trace_iwlwifi_dev_irq(trans
->dev
);
1369 /* Discover which interrupts are active/pending */
1370 inta
= iwl_read32(trans
, CSR_INT
);
1372 /* the thread will service interrupts and re-enable them */
1376 /* a device (PCI-E) page is 4096 bytes long */
1377 #define ICT_SHIFT 12
1378 #define ICT_SIZE (1 << ICT_SHIFT)
1379 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1381 /* interrupt handler using ict table, with this interrupt driver will
1382 * stop using INTA register to get device's interrupt, reading this register
1383 * is expensive, device will write interrupts in ICT dram table, increment
1384 * index then will fire interrupt to driver, driver will OR all ICT table
1385 * entries from current index up to table entry with 0 value. the result is
1386 * the interrupt we need to service, driver will set the entries back to 0 and
1389 static u32
iwl_pcie_int_cause_ict(struct iwl_trans
*trans
)
1391 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1396 trace_iwlwifi_dev_irq(trans
->dev
);
1398 /* Ignore interrupt if there's nothing in NIC to service.
1399 * This may be due to IRQ shared with another device,
1400 * or due to sporadic interrupts thrown from our NIC. */
1401 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1402 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
, read
);
1407 * Collect all entries up to the first 0, starting from ict_index;
1408 * note we already read at ict_index.
1412 IWL_DEBUG_ISR(trans
, "ICT index %d value 0x%08X\n",
1413 trans_pcie
->ict_index
, read
);
1414 trans_pcie
->ict_tbl
[trans_pcie
->ict_index
] = 0;
1415 trans_pcie
->ict_index
=
1416 ((trans_pcie
->ict_index
+ 1) & (ICT_COUNT
- 1));
1418 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1419 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
,
1423 /* We should not get this value, just ignore it. */
1424 if (val
== 0xffffffff)
1428 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1429 * (bit 15 before shifting it to 31) to clear when using interrupt
1430 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1431 * so we use them to decide on the real state of the Rx bit.
1432 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1437 inta
= (0xff & val
) | ((0xff00 & val
) << 16);
1441 irqreturn_t
iwl_pcie_irq_handler(int irq
, void *dev_id
)
1443 struct iwl_trans
*trans
= dev_id
;
1444 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1445 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1449 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1451 spin_lock(&trans_pcie
->irq_lock
);
1453 /* dram interrupt table not set yet,
1454 * use legacy interrupt.
1456 if (likely(trans_pcie
->use_ict
))
1457 inta
= iwl_pcie_int_cause_ict(trans
);
1459 inta
= iwl_pcie_int_cause_non_ict(trans
);
1461 if (iwl_have_debug_level(IWL_DL_ISR
)) {
1462 IWL_DEBUG_ISR(trans
,
1463 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1464 inta
, trans_pcie
->inta_mask
,
1465 iwl_read32(trans
, CSR_INT_MASK
),
1466 iwl_read32(trans
, CSR_FH_INT_STATUS
));
1467 if (inta
& (~trans_pcie
->inta_mask
))
1468 IWL_DEBUG_ISR(trans
,
1469 "We got a masked interrupt (0x%08x)\n",
1470 inta
& (~trans_pcie
->inta_mask
));
1473 inta
&= trans_pcie
->inta_mask
;
1476 * Ignore interrupt if there's nothing in NIC to service.
1477 * This may be due to IRQ shared with another device,
1478 * or due to sporadic interrupts thrown from our NIC.
1480 if (unlikely(!inta
)) {
1481 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
1483 * Re-enable interrupts here since we don't
1484 * have anything to service
1486 if (test_bit(STATUS_INT_ENABLED
, &trans
->status
))
1487 iwl_enable_interrupts(trans
);
1488 spin_unlock(&trans_pcie
->irq_lock
);
1489 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1493 if (unlikely(inta
== 0xFFFFFFFF || (inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
1495 * Hardware disappeared. It might have
1496 * already raised an interrupt.
1498 IWL_WARN(trans
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
1499 spin_unlock(&trans_pcie
->irq_lock
);
1503 /* Ack/clear/reset pending uCode interrupts.
1504 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1506 /* There is a hardware bug in the interrupt mask function that some
1507 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1508 * they are disabled in the CSR_INT_MASK register. Furthermore the
1509 * ICT interrupt handling mechanism has another bug that might cause
1510 * these unmasked interrupts fail to be detected. We workaround the
1511 * hardware bugs here by ACKing all the possible interrupts so that
1512 * interrupt coalescing can still be achieved.
1514 iwl_write32(trans
, CSR_INT
, inta
| ~trans_pcie
->inta_mask
);
1516 if (iwl_have_debug_level(IWL_DL_ISR
))
1517 IWL_DEBUG_ISR(trans
, "inta 0x%08x, enabled 0x%08x\n",
1518 inta
, iwl_read32(trans
, CSR_INT_MASK
));
1520 spin_unlock(&trans_pcie
->irq_lock
);
1522 /* Now service all interrupt bits discovered above. */
1523 if (inta
& CSR_INT_BIT_HW_ERR
) {
1524 IWL_ERR(trans
, "Hardware error detected. Restarting.\n");
1526 /* Tell the device to stop sending interrupts */
1527 iwl_disable_interrupts(trans
);
1530 iwl_pcie_irq_handle_error(trans
);
1532 handled
|= CSR_INT_BIT_HW_ERR
;
1537 if (iwl_have_debug_level(IWL_DL_ISR
)) {
1538 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1539 if (inta
& CSR_INT_BIT_SCD
) {
1540 IWL_DEBUG_ISR(trans
,
1541 "Scheduler finished to transmit the frame/frames.\n");
1545 /* Alive notification via Rx interrupt will do the real work */
1546 if (inta
& CSR_INT_BIT_ALIVE
) {
1547 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
1552 /* Safely ignore these bits for debug checks below */
1553 inta
&= ~(CSR_INT_BIT_SCD
| CSR_INT_BIT_ALIVE
);
1555 /* HW RF KILL switch toggled */
1556 if (inta
& CSR_INT_BIT_RF_KILL
) {
1559 hw_rfkill
= iwl_is_rfkill_set(trans
);
1560 IWL_WARN(trans
, "RF_KILL bit toggled to %s.\n",
1561 hw_rfkill
? "disable radio" : "enable radio");
1563 isr_stats
->rfkill
++;
1565 mutex_lock(&trans_pcie
->mutex
);
1566 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1567 mutex_unlock(&trans_pcie
->mutex
);
1569 set_bit(STATUS_RFKILL
, &trans
->status
);
1570 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE
,
1572 IWL_DEBUG_RF_KILL(trans
,
1573 "Rfkill while SYNC HCMD in flight\n");
1574 wake_up(&trans_pcie
->wait_command_queue
);
1576 clear_bit(STATUS_RFKILL
, &trans
->status
);
1579 handled
|= CSR_INT_BIT_RF_KILL
;
1582 /* Chip got too hot and stopped itself */
1583 if (inta
& CSR_INT_BIT_CT_KILL
) {
1584 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
1585 isr_stats
->ctkill
++;
1586 handled
|= CSR_INT_BIT_CT_KILL
;
1589 /* Error detected by uCode */
1590 if (inta
& CSR_INT_BIT_SW_ERR
) {
1591 IWL_ERR(trans
, "Microcode SW error detected. "
1592 " Restarting 0x%X.\n", inta
);
1594 iwl_pcie_irq_handle_error(trans
);
1595 handled
|= CSR_INT_BIT_SW_ERR
;
1598 /* uCode wakes up after power-down sleep */
1599 if (inta
& CSR_INT_BIT_WAKEUP
) {
1600 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
1601 iwl_pcie_rxq_check_wrptr(trans
);
1602 iwl_pcie_txq_check_wrptrs(trans
);
1604 isr_stats
->wakeup
++;
1606 handled
|= CSR_INT_BIT_WAKEUP
;
1609 /* All uCode command responses, including Tx command responses,
1610 * Rx "responses" (frame-received notification), and other
1611 * notifications from uCode come through here*/
1612 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
|
1613 CSR_INT_BIT_RX_PERIODIC
)) {
1614 IWL_DEBUG_ISR(trans
, "Rx interrupt\n");
1615 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
)) {
1616 handled
|= (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
);
1617 iwl_write32(trans
, CSR_FH_INT_STATUS
,
1618 CSR_FH_INT_RX_MASK
);
1620 if (inta
& CSR_INT_BIT_RX_PERIODIC
) {
1621 handled
|= CSR_INT_BIT_RX_PERIODIC
;
1623 CSR_INT
, CSR_INT_BIT_RX_PERIODIC
);
1625 /* Sending RX interrupt require many steps to be done in the
1627 * 1- write interrupt to current index in ICT table.
1629 * 3- update RX shared data to indicate last write index.
1630 * 4- send interrupt.
1631 * This could lead to RX race, driver could receive RX interrupt
1632 * but the shared data changes does not reflect this;
1633 * periodic interrupt will detect any dangling Rx activity.
1636 /* Disable periodic interrupt; we use it as just a one-shot. */
1637 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
1638 CSR_INT_PERIODIC_DIS
);
1641 * Enable periodic interrupt in 8 msec only if we received
1642 * real RX interrupt (instead of just periodic int), to catch
1643 * any dangling Rx interrupt. If it was just the periodic
1644 * interrupt, there was no dangling Rx activity, and no need
1645 * to extend the periodic interrupt; one-shot is enough.
1647 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
))
1648 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
1649 CSR_INT_PERIODIC_ENA
);
1654 iwl_pcie_rx_handle(trans
, 0);
1658 /* This "Tx" DMA channel is used only for loading uCode */
1659 if (inta
& CSR_INT_BIT_FH_TX
) {
1660 iwl_write32(trans
, CSR_FH_INT_STATUS
, CSR_FH_INT_TX_MASK
);
1661 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
1663 handled
|= CSR_INT_BIT_FH_TX
;
1664 /* Wake up uCode load routine, now that load is complete */
1665 trans_pcie
->ucode_write_complete
= true;
1666 wake_up(&trans_pcie
->ucode_write_waitq
);
1669 if (inta
& ~handled
) {
1670 IWL_ERR(trans
, "Unhandled INTA bits 0x%08x\n", inta
& ~handled
);
1671 isr_stats
->unhandled
++;
1674 if (inta
& ~(trans_pcie
->inta_mask
)) {
1675 IWL_WARN(trans
, "Disabled INTA bits 0x%08x were pending\n",
1676 inta
& ~trans_pcie
->inta_mask
);
1679 /* we are loading the firmware, enable FH_TX interrupt only */
1680 if (handled
& CSR_INT_BIT_FH_TX
)
1681 iwl_enable_fw_load_int(trans
);
1682 /* only Re-enable all interrupt if disabled by irq */
1683 else if (test_bit(STATUS_INT_ENABLED
, &trans
->status
))
1684 iwl_enable_interrupts(trans
);
1685 /* Re-enable RF_KILL if it occurred */
1686 else if (handled
& CSR_INT_BIT_RF_KILL
)
1687 iwl_enable_rfkill_int(trans
);
1690 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1694 /******************************************************************************
1698 ******************************************************************************/
1700 /* Free dram table */
1701 void iwl_pcie_free_ict(struct iwl_trans
*trans
)
1703 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1705 if (trans_pcie
->ict_tbl
) {
1706 dma_free_coherent(trans
->dev
, ICT_SIZE
,
1707 trans_pcie
->ict_tbl
,
1708 trans_pcie
->ict_tbl_dma
);
1709 trans_pcie
->ict_tbl
= NULL
;
1710 trans_pcie
->ict_tbl_dma
= 0;
1715 * allocate dram shared table, it is an aligned memory
1716 * block of ICT_SIZE.
1717 * also reset all data related to ICT table interrupt.
1719 int iwl_pcie_alloc_ict(struct iwl_trans
*trans
)
1721 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1723 trans_pcie
->ict_tbl
=
1724 dma_zalloc_coherent(trans
->dev
, ICT_SIZE
,
1725 &trans_pcie
->ict_tbl_dma
,
1727 if (!trans_pcie
->ict_tbl
)
1730 /* just an API sanity check ... it is guaranteed to be aligned */
1731 if (WARN_ON(trans_pcie
->ict_tbl_dma
& (ICT_SIZE
- 1))) {
1732 iwl_pcie_free_ict(trans
);
1739 /* Device is going up inform it about using ICT interrupt table,
1740 * also we need to tell the driver to start using ICT interrupt.
1742 void iwl_pcie_reset_ict(struct iwl_trans
*trans
)
1744 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1747 if (!trans_pcie
->ict_tbl
)
1750 spin_lock(&trans_pcie
->irq_lock
);
1751 iwl_disable_interrupts(trans
);
1753 memset(trans_pcie
->ict_tbl
, 0, ICT_SIZE
);
1755 val
= trans_pcie
->ict_tbl_dma
>> ICT_SHIFT
;
1757 val
|= CSR_DRAM_INT_TBL_ENABLE
|
1758 CSR_DRAM_INIT_TBL_WRAP_CHECK
|
1759 CSR_DRAM_INIT_TBL_WRITE_POINTER
;
1761 IWL_DEBUG_ISR(trans
, "CSR_DRAM_INT_TBL_REG =0x%x\n", val
);
1763 iwl_write32(trans
, CSR_DRAM_INT_TBL_REG
, val
);
1764 trans_pcie
->use_ict
= true;
1765 trans_pcie
->ict_index
= 0;
1766 iwl_write32(trans
, CSR_INT
, trans_pcie
->inta_mask
);
1767 iwl_enable_interrupts(trans
);
1768 spin_unlock(&trans_pcie
->irq_lock
);
1771 /* Device is going down disable ict interrupt usage */
1772 void iwl_pcie_disable_ict(struct iwl_trans
*trans
)
1774 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1776 spin_lock(&trans_pcie
->irq_lock
);
1777 trans_pcie
->use_ict
= false;
1778 spin_unlock(&trans_pcie
->irq_lock
);
1781 irqreturn_t
iwl_pcie_isr(int irq
, void *data
)
1783 struct iwl_trans
*trans
= data
;
1788 /* Disable (but don't clear!) interrupts here to avoid
1789 * back-to-back ISRs and sporadic interrupts from our NIC.
1790 * If we have something to service, the tasklet will re-enable ints.
1791 * If we *don't* have something, we'll re-enable before leaving here.
1793 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
1795 return IRQ_WAKE_THREAD
;
1798 irqreturn_t
iwl_pcie_msix_isr(int irq
, void *data
)
1800 return IRQ_WAKE_THREAD
;
1803 irqreturn_t
iwl_pcie_irq_msix_handler(int irq
, void *dev_id
)
1805 struct msix_entry
*entry
= dev_id
;
1806 struct iwl_trans_pcie
*trans_pcie
= iwl_pcie_get_trans_pcie(entry
);
1807 struct iwl_trans
*trans
= trans_pcie
->trans
;
1808 struct isr_statistics
*isr_stats
= isr_stats
= &trans_pcie
->isr_stats
;
1809 u32 inta_fh
, inta_hw
;
1811 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1813 spin_lock(&trans_pcie
->irq_lock
);
1814 inta_fh
= iwl_read_direct32(trans
, CSR_MSIX_FH_INT_CAUSES_AD
);
1815 inta_hw
= iwl_read_direct32(trans
, CSR_MSIX_HW_INT_CAUSES_AD
);
1817 * Clear causes registers to avoid being handling the same cause.
1819 iwl_write_direct32(trans
, CSR_MSIX_FH_INT_CAUSES_AD
, inta_fh
);
1820 iwl_write_direct32(trans
, CSR_MSIX_HW_INT_CAUSES_AD
, inta_hw
);
1821 spin_unlock(&trans_pcie
->irq_lock
);
1823 if (unlikely(!(inta_fh
| inta_hw
))) {
1824 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
1825 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1829 if (iwl_have_debug_level(IWL_DL_ISR
))
1830 IWL_DEBUG_ISR(trans
, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
1832 iwl_read32(trans
, CSR_MSIX_FH_INT_MASK_AD
));
1834 /* This "Tx" DMA channel is used only for loading uCode */
1835 if (inta_fh
& MSIX_FH_INT_CAUSES_D2S_CH0_NUM
) {
1836 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
1839 * Wake up uCode load routine,
1840 * now that load is complete
1842 trans_pcie
->ucode_write_complete
= true;
1843 wake_up(&trans_pcie
->ucode_write_waitq
);
1846 /* Error detected by uCode */
1847 if ((inta_fh
& MSIX_FH_INT_CAUSES_FH_ERR
) ||
1848 (inta_hw
& MSIX_HW_INT_CAUSES_REG_SW_ERR
)) {
1850 "Microcode SW error detected. Restarting 0x%X.\n",
1853 iwl_pcie_irq_handle_error(trans
);
1856 /* After checking FH register check HW register */
1857 if (iwl_have_debug_level(IWL_DL_ISR
))
1858 IWL_DEBUG_ISR(trans
,
1859 "ISR inta_hw 0x%08x, enabled 0x%08x\n",
1861 iwl_read32(trans
, CSR_MSIX_HW_INT_MASK_AD
));
1863 /* Alive notification via Rx interrupt will do the real work */
1864 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_ALIVE
) {
1865 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
1869 /* uCode wakes up after power-down sleep */
1870 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_WAKEUP
) {
1871 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
1872 iwl_pcie_rxq_check_wrptr(trans
);
1873 iwl_pcie_txq_check_wrptrs(trans
);
1875 isr_stats
->wakeup
++;
1878 /* Chip got too hot and stopped itself */
1879 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_CT_KILL
) {
1880 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
1881 isr_stats
->ctkill
++;
1884 /* HW RF KILL switch toggled */
1885 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_RF_KILL
) {
1888 hw_rfkill
= iwl_is_rfkill_set(trans
);
1889 IWL_WARN(trans
, "RF_KILL bit toggled to %s.\n",
1890 hw_rfkill
? "disable radio" : "enable radio");
1892 isr_stats
->rfkill
++;
1894 mutex_lock(&trans_pcie
->mutex
);
1895 iwl_trans_pcie_rf_kill(trans
, hw_rfkill
);
1896 mutex_unlock(&trans_pcie
->mutex
);
1898 set_bit(STATUS_RFKILL
, &trans
->status
);
1899 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE
,
1901 IWL_DEBUG_RF_KILL(trans
,
1902 "Rfkill while SYNC HCMD in flight\n");
1903 wake_up(&trans_pcie
->wait_command_queue
);
1905 clear_bit(STATUS_RFKILL
, &trans
->status
);
1909 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_HW_ERR
) {
1911 "Hardware error detected. Restarting.\n");
1914 iwl_pcie_irq_handle_error(trans
);
1917 iwl_pcie_clear_irq(trans
, entry
);
1919 lock_map_release(&trans
->sync_cmd_lockdep_map
);