rmb();
dp = &RX_DESC(mac, n);
+ prefetchw(dp);
macrx = dp->macrx;
if (!(macrx & XCT_MACRX_O))
if (info->dma == dma)
break;
}
+ prefetchw(info);
skb = info->skb;
+ prefetchw(skb);
info->dma = 0;
pci_unmap_single(mac->dma_pdev, dma, skb->len,
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, mac->netdev);
-
- if ((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
+ if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
XCT_MACRX_CSUM_S;
mac->stats.rx_bytes += len;
mac->stats.rx_packets++;
+ skb->protocol = eth_type_trans(skb, mac->netdev);
netif_receive_skb(skb);
dp->ptr = 0;
for (i = start; i < mac->tx->next_to_use; i++) {
dp = &TX_DESC(mac, i);
- if (!dp || (dp->mactx & XCT_MACTX_O))
+ if (unlikely(dp->mactx & XCT_MACTX_O))
break;
count++;
struct pasemi_mac_txring *txring;
struct pasemi_mac_buffer *info;
struct pas_dma_xct_descr *dp;
- u64 dflags;
+ u64 dflags, mactx, ptr;
dma_addr_t map;
int flags;
if (dma_mapping_error(map))
return NETDEV_TX_BUSY;
+ mactx = dflags | XCT_MACTX_LLEN(skb->len);
+ ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
+
txring = mac->tx;
spin_lock_irqsave(&txring->lock, flags);
}
}
-
dp = &TX_DESC(mac, txring->next_to_use);
info = &TX_DESC_INFO(mac, txring->next_to_use);
- dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
- dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
+ dp->mactx = mactx;
+ dp->ptr = ptr;
info->dma = map;
info->skb = skb;