if (!rxq->need_update)
continue;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
rxq->need_update = false;
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
while (rxq->free_count) {
/* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
rxq->free_count--;
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
/*
* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8.
*/
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
}
}
IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
/* If we were scheduled - there is at least one request */
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* swap out the rba->rbd_empty to a local list */
list_replace_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
while (pending) {
int i;
pending);
}
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* add the allocated rbds to the allocator allocated list */
list_splice_tail(&local_allocated, &rba->rbd_allocated);
/* get more empty RBDs for current pending requests */
list_splice_tail_init(&rba->rbd_empty, &local_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
atomic_inc(&rba->req_ready);
}
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
/* return unused rbds to the allocator empty list */
list_splice_tail(&local_empty, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
}
rxq->used_count = 0;
}
-int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+
+static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
{
- WARN_ON(1);
- return 0;
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, rxq->id);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
+}
+
+static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget)
+{
+ struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+ struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans *trans;
+ int ret;
+
+ trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans = trans_pcie->trans;
+
+ ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+ if (ret < budget) {
+ spin_lock(&trans_pcie->irq_lock);
+ iwl_pcie_clear_irq(trans, 0);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ napi_complete_done(&rxq->napi, ret);
+ }
+
+ return ret;
}
static int _iwl_pcie_rx_init(struct iwl_trans *trans)
cancel_work_sync(&rba->rx_alloc);
- spin_lock(&rba->lock);
+ spin_lock_bh(&rba->lock);
atomic_set(&rba->req_pending, 0);
atomic_set(&rba->req_ready, 0);
INIT_LIST_HEAD(&rba->rbd_allocated);
INIT_LIST_HEAD(&rba->rbd_empty);
- spin_unlock(&rba->lock);
+ spin_unlock_bh(&rba->lock);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
- if (!rxq->napi.poll)
+ if (!rxq->napi.poll) {
+ int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
+
+ if (trans_pcie->msix_enabled) {
+ poll = iwl_pcie_napi_poll_msix;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX &&
+ i == 0)
+ poll = iwl_pcie_napi_poll_msix_shared;
+
+ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
+ i == 1)
+ poll = iwl_pcie_napi_poll_msix_shared;
+ }
+
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
- iwl_pcie_dummy_napi_poll, 64);
+ poll, NAPI_POLL_WEIGHT);
+ napi_enable(&rxq->napi);
+ }
spin_unlock(&rxq->lock);
}
iwl_pcie_free_rxq_dma(trans, rxq);
- if (rxq->napi.poll)
+ if (rxq->napi.poll) {
+ napi_disable(&rxq->napi);
netif_napi_del(&rxq->napi);
+ }
}
kfree(trans_pcie->rx_pool);
kfree(trans_pcie->global_table);
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
-static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct napi_struct *napi;
struct iwl_rxq *rxq;
- u32 r, i, count = 0;
+ u32 r, i, count = 0, handled = 0;
bool emergency = false;
if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
- return;
+ return budget;
rxq = &trans_pcie->rxq[queue];
if (i == r)
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
- while (i != r) {
+ while (i != r && ++handled < budget) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb;
/* number of RBDs still waiting for page allocation */
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- napi = &rxq->napi;
- if (napi->poll) {
- napi_gro_flush(napi, false);
-
- if (napi->rx_count) {
- netif_receive_skb_list(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
- }
- }
-
iwl_pcie_rxq_restock(trans, rxq);
+
+ return handled;
}
static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
+ struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
lock_map_acquire(&trans->sync_cmd_lockdep_map);
local_bh_disable();
- iwl_pcie_rx_handle(trans, entry->entry);
+ if (napi_schedule_prep(&rxq->napi))
+ __napi_schedule(&rxq->napi);
+ else
+ iwl_pcie_clear_irq(trans, entry->entry);
local_bh_enable();
- iwl_pcie_clear_irq(trans, entry);
-
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_HANDLED;
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
+ bool polling = false;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
/* dram interrupt table not set yet,
* use legacy interrupt.
*/
if (test_bit(STATUS_INT_ENABLED, &trans->status))
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
lock_map_release(&trans->sync_cmd_lockdep_map);
return IRQ_NONE;
}
* already raised an interrupt.
*/
IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
goto out;
}
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
isr_stats->rx++;
local_bh_disable();
- iwl_pcie_rx_handle(trans, 0);
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
local_bh_enable();
}
inta & ~trans_pcie->inta_mask);
}
- spin_lock(&trans_pcie->irq_lock);
- /* only Re-enable all interrupt if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
- _iwl_enable_interrupts(trans);
- /* we are loading the firmware, enable FH_TX interrupt only */
- else if (handled & CSR_INT_BIT_FH_TX)
- iwl_enable_fw_load_int(trans);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(trans);
- /* Re-enable the ALIVE / Rx interrupt if it occurred */
- else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
- iwl_enable_fw_load_int_ctx_info(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ if (!polling) {
+ spin_lock_bh(&trans_pcie->irq_lock);
+ /* only Re-enable all interrupt if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ _iwl_enable_interrupts(trans);
+ /* we are loading the firmware, enable FH_TX interrupt only */
+ else if (handled & CSR_INT_BIT_FH_TX)
+ iwl_enable_fw_load_int(trans);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
+ /* Re-enable the ALIVE / Rx interrupt if it occurred */
+ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+ iwl_enable_fw_load_int_ctx_info(trans);
+ spin_unlock_bh(&trans_pcie->irq_lock);
+ }
out:
lock_map_release(&trans->sync_cmd_lockdep_map);
if (!trans_pcie->ict_tbl)
return;
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
_iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
_iwl_enable_interrupts(trans);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
}
irqreturn_t iwl_pcie_isr(int irq, void *data)
struct iwl_trans *trans = trans_pcie->trans;
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
+ bool polling = false;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock(&trans_pcie->irq_lock);
+ spin_lock_bh(&trans_pcie->irq_lock);
inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
*/
iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
- spin_unlock(&trans_pcie->irq_lock);
+ spin_unlock_bh(&trans_pcie->irq_lock);
trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
local_bh_disable();
- iwl_pcie_rx_handle(trans, 0);
+ if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[0].napi);
+ }
local_bh_enable();
}
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
inta_fh & MSIX_FH_INT_CAUSES_Q1) {
local_bh_disable();
- iwl_pcie_rx_handle(trans, 1);
+ if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
+ polling = true;
+ __napi_schedule(&trans_pcie->rxq[1].napi);
+ }
local_bh_enable();
}
wake_up(&trans_pcie->fw_reset_waitq);
}
- iwl_pcie_clear_irq(trans, entry);
+ if (!polling)
+ iwl_pcie_clear_irq(trans, entry->entry);
lock_map_release(&trans->sync_cmd_lockdep_map);