* this round of polling
*/
if (rx_work) {
+ unsigned long flags;
+
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
- local_irq_disable();
+ local_irq_save(flags);
cpw16_f(IntrMask, cp_intr_mask);
__netif_rx_complete(dev);
- local_irq_enable();
+ local_irq_restore(flags);
return 0; /* done */
}
}
if (bp->istat & ISTAT_ERRORS) {
- spin_lock_irq(&bp->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, 1);
netif_wake_queue(bp->dev);
- spin_unlock_irq(&bp->lock);
+ spin_unlock_irqrestore(&bp->lock, flags);
done = 1;
}
int pkts, limit = min(*budget, dev->quota);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ unsigned long flags;
pkts = nv_rx_process(dev, limit);
if (nv_alloc_rx(dev)) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
}
if (pkts < limit) {
netif_rx_complete(dev);
/* re-enable receive interrupts */
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
+
np->irqmask |= NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
- spin_unlock_irq(&np->lock);
+
+ spin_unlock_irqrestore(&np->lock, flags);
return 0;
} else {
/* used up our quantum, so reschedule */
struct skge_hw *hw = skge->hw;
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
+ unsigned long flags;
int to_do = min(dev->quota, *budget);
int work_done = 0;
if (work_done >= to_do)
return 1; /* not done */
- spin_lock_irq(&hw->hw_lock);
+ spin_lock_irqsave(&hw->hw_lock, flags);
__netif_rx_complete(dev);
hw->intr_mask |= irqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
- spin_unlock_irq(&hw->hw_lock);
+ spin_unlock_irqrestore(&hw->hw_lock, flags);
return 0;
}