framesize = 0;
#if SYNCLINK_GENERIC_HDLC
{
- struct net_device_stats *stats = hdlc_stats(info->netdev);
- stats->rx_errors++;
- stats->rx_frame_errors++;
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
}
#endif
} else
static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
{
MGSLPC_INFO *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
info->tx_put = info->tx_count = skb->len;
/* update network statistics */
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
/* done with socket buffer, so free it */
dev_kfree_skb(skb);
static void hdlcdev_tx_timeout(struct net_device *dev)
{
MGSLPC_INFO *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_tx_timeout(%s)\n",dev->name);
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
spin_lock_irqsave(&info->lock,flags);
tx_stop(info);
{
struct sk_buff *skb = dev_alloc_skb(size);
struct net_device *dev = info->netdev;
- struct net_device_stats *stats = hdlc_stats(dev);
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_rx(%s)\n",dev->name);
if (skb == NULL) {
printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
- memcpy(skb_put(skb, size),buf,size);
+ memcpy(skb_put(skb, size), buf, size);
- skb->protocol = hdlc_type_trans(skb, info->netdev);
+ skb->protocol = hdlc_type_trans(skb, dev);
- stats->rx_packets++;
- stats->rx_bytes += size;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
netif_rx(skb);
- info->netdev->last_rx = jiffies;
+ dev->last_rx = jiffies;
}
/**
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
{
- struct net_device_stats *stats = hdlc_stats(info->netdev);
- stats->rx_errors++;
- stats->rx_frame_errors++;
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
}
#endif
} else
static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mgsl_struct *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
/* update network statistics */
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
/* done with socket buffer, so free it */
dev_kfree_skb(skb);
static void hdlcdev_tx_timeout(struct net_device *dev)
{
struct mgsl_struct *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_tx_timeout(%s)\n",dev->name);
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_stop_transmitter(info);
{
struct sk_buff *skb = dev_alloc_skb(size);
struct net_device *dev = info->netdev;
- struct net_device_stats *stats = hdlc_stats(dev);
if (debug_level >= DEBUG_LEVEL_INFO)
- printk("hdlcdev_rx(%s)\n",dev->name);
+ printk("hdlcdev_rx(%s)\n", dev->name);
if (skb == NULL) {
- printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
- stats->rx_dropped++;
+ printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
+ dev->name);
+ dev->stats.rx_dropped++;
return;
}
- memcpy(skb_put(skb, size),buf,size);
+ memcpy(skb_put(skb, size), buf, size);
- skb->protocol = hdlc_type_trans(skb, info->netdev);
+ skb->protocol = hdlc_type_trans(skb, dev);
- stats->rx_packets++;
- stats->rx_bytes += size;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
netif_rx(skb);
- info->netdev->last_rx = jiffies;
+ dev->last_rx = jiffies;
}
/**
static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct slgt_info *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
DBGINFO(("%s hdlc_xmit\n", dev->name));
tx_load(info, skb->data, skb->len);
/* update network statistics */
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
/* done with socket buffer, so free it */
dev_kfree_skb(skb);
static void hdlcdev_tx_timeout(struct net_device *dev)
{
struct slgt_info *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
spin_lock_irqsave(&info->lock,flags);
tx_stop(info);
{
struct sk_buff *skb = dev_alloc_skb(size);
struct net_device *dev = info->netdev;
- struct net_device_stats *stats = hdlc_stats(dev);
DBGINFO(("%s hdlcdev_rx\n", dev->name));
if (skb == NULL) {
DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
- memcpy(skb_put(skb, size),buf,size);
+ memcpy(skb_put(skb, size), buf, size);
- skb->protocol = hdlc_type_trans(skb, info->netdev);
+ skb->protocol = hdlc_type_trans(skb, dev);
- stats->rx_packets++;
- stats->rx_bytes += size;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
netif_rx(skb);
- info->netdev->last_rx = jiffies;
+ dev->last_rx = jiffies;
}
/**
#if SYNCLINK_GENERIC_HDLC
if (framesize == 0) {
- struct net_device_stats *stats = hdlc_stats(info->netdev);
- stats->rx_errors++;
- stats->rx_frame_errors++;
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
}
#endif
static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
{
SLMP_INFO *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
tx_load_dma_buffer(info, skb->data, skb->len);
/* update network statistics */
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
/* done with socket buffer, so free it */
dev_kfree_skb(skb);
static void hdlcdev_tx_timeout(struct net_device *dev)
{
SLMP_INFO *info = dev_to_port(dev);
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_tx_timeout(%s)\n",dev->name);
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
spin_lock_irqsave(&info->lock,flags);
tx_stop(info);
{
struct sk_buff *skb = dev_alloc_skb(size);
struct net_device *dev = info->netdev;
- struct net_device_stats *stats = hdlc_stats(dev);
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_rx(%s)\n",dev->name);
if (skb == NULL) {
- printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
- stats->rx_dropped++;
+ printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
+ dev->name);
+ dev->stats.rx_dropped++;
return;
}
- memcpy(skb_put(skb, size),buf,size);
+ memcpy(skb_put(skb, size), buf, size);
- skb->protocol = hdlc_type_trans(skb, info->netdev);
+ skb->protocol = hdlc_type_trans(skb, dev);
- stats->rx_packets++;
- stats->rx_bytes += size;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += size;
netif_rx(skb);
- info->netdev->last_rx = jiffies;
+ dev->last_rx = jiffies;
}
/**
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
{
- struct net_device_stats *stats = hdlc_stats(info->netdev);
- stats->rx_errors++;
- stats->rx_frame_errors++;
+ info->netdev->stats.rx_errors++;
+ info->netdev->stats.rx_frame_errors++;
}
#endif
}
sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
if (stat & ST1_UDRN) {
- struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
- stats->tx_errors++; /* TX Underrun error detected */
- stats->tx_fifo_errors++;
+ /* TX Underrun error detected */
+ port_to_dev(port)->stats.tx_errors++;
+ port_to_dev(port)->stats.tx_fifo_errors++;
}
stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
struct net_device *dev)
{
struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
- struct net_device_stats *stats = hdlc_stats(dev);
struct pci_dev *pdev = dpriv->pci_priv->pdev;
struct sk_buff *skb;
int pkt_len;
pci_unmap_single(pdev, le32_to_cpu(rx_fd->data),
RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
skb_put(skb, pkt_len);
if (netif_running(dev))
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
} else {
if (skb->data[pkt_len] & FrameRdo)
- stats->rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
else if (!(skb->data[pkt_len] | ~FrameCrc))
- stats->rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
else if (!(skb->data[pkt_len] | ~(FrameVfr | FrameRab)))
- stats->rx_length_errors++;
+ dev->stats.rx_length_errors++;
else
- stats->rx_errors++;
+ dev->stats.rx_errors++;
dev_kfree_skb_irq(skb);
}
refill:
if (state & SccEvt) {
if (state & Alls) {
- struct net_device_stats *stats = hdlc_stats(dev);
struct sk_buff *skb;
struct TxFD *tx_fd;
pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data),
skb->len, PCI_DMA_TODEVICE);
if (tx_fd->state & FrameEnd) {
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
}
dev_kfree_skb_irq(skb);
dpriv->tx_skbuff[cur] = NULL;
}
if (state & Err) {
printk(KERN_INFO "%s: Tx ERR\n", dev->name);
- hdlc_stats(dev)->tx_errors++;
+ dev->stats.tx_errors++;
state &= ~Err;
}
}
if (!(rx_fd->state2 & DataComplete))
break;
if (rx_fd->state2 & FrameAborted) {
- hdlc_stats(dev)->rx_over_errors++;
+ dev->stats.rx_over_errors++;
rx_fd->state1 |= Hold;
rx_fd->state2 = 0x00000000;
rx_fd->end = cpu_to_le32(0xbabeface);
int len, int txpos)
{
struct net_device *dev = port_to_dev(port);
- struct net_device_stats *stats = hdlc_stats(dev);
/*
* Everything is now set, just tell the card to go
dbg(DBG_TX, "fst_tx_dma_complete\n");
FST_WRB(card, txDescrRing[port->index][txpos].bits,
DMA_OWN | TX_STP | TX_ENP);
- stats->tx_packets++;
- stats->tx_bytes += len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
dev->trans_start = jiffies;
}
int len, struct sk_buff *skb, int rxp)
{
struct net_device *dev = port_to_dev(port);
- struct net_device_stats *stats = hdlc_stats(dev);
int pi;
int rx_status;
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
- stats->rx_packets++;
- stats->rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing the frame up the stack\n");
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
dev->last_rx = jiffies;
}
unsigned char dmabits, int rxp, unsigned short len)
{
struct net_device *dev = port_to_dev(port);
- struct net_device_stats *stats = hdlc_stats(dev);
- /*
+ /*
* Increment the appropriate error counter
*/
- stats->rx_errors++;
+ dev->stats.rx_errors++;
if (dmabits & RX_OFLO) {
- stats->rx_fifo_errors++;
+ dev->stats.rx_fifo_errors++;
dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
card->card_no, port->index, rxp);
}
if (dmabits & RX_CRC) {
- stats->rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
card->card_no, port->index);
}
if (dmabits & RX_FRAM) {
- stats->rx_frame_errors++;
+ dev->stats.rx_frame_errors++;
dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
card->card_no, port->index);
}
if (dmabits == (RX_STP | RX_ENP)) {
- stats->rx_length_errors++;
+ dev->stats.rx_length_errors++;
dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
len, card->card_no, port->index);
}
unsigned short len;
struct sk_buff *skb;
struct net_device *dev = port_to_dev(port);
- struct net_device_stats *stats = hdlc_stats(dev);
/* Check we have a buffer to process */
pi = port->index;
if ((skb = dev_alloc_skb(len)) == NULL) {
dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
- stats->rx_packets++;
- stats->rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing frame up the stack\n");
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
- if (rx_status == NET_RX_DROP) {
- stats->rx_dropped++;
- }
+ if (rx_status == NET_RX_DROP)
+ dev->stats.rx_dropped++;
dev->last_rx = jiffies;
} else {
card->dma_skb_rx = skb;
struct sk_buff *skb;
unsigned long flags;
struct net_device *dev;
- struct net_device_stats *stats;
/*
* Find a free buffer for the transmit
if (!port->run)
continue;
- dev = port_to_dev(port);
- stats = hdlc_stats(dev);
- while (!
- (FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
- DMA_OWN)
- && !(card->dmatx_in_progress)) {
+ dev = port_to_dev(port);
+ while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
+ DMA_OWN)
+ && !(card->dmatx_in_progress)) {
/*
* There doesn't seem to be a txdone event per-se
* We seem to have to deduce it, by checking the DMA_OWN
txDescrRing[pi][port->txpos].
bits,
DMA_OWN | TX_STP | TX_ENP);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
} else {
/* Or do it through dma */
* always load up the entire packet for DMA.
*/
dbg(DBG_TX, "Tx underflow port %d\n", port->index);
- hdlc_stats(port_to_dev(port))->tx_errors++;
- hdlc_stats(port_to_dev(port))->tx_fifo_errors++;
+ port_to_dev(port)->stats.tx_errors++;
+ port_to_dev(port)->stats.tx_fifo_errors++;
dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
card->card_no, port->index);
break;
{
struct fst_port_info *port;
struct fst_card_info *card;
- struct net_device_stats *stats = hdlc_stats(dev);
port = dev_to_port(dev);
card = port->card;
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
dbg(DBG_ASS, "Tx timeout card %d port %d\n",
card->card_no, port->index);
fst_issue_cmd(port, ABORTTX);
{
struct fst_card_info *card;
struct fst_port_info *port;
- struct net_device_stats *stats = hdlc_stats(dev);
unsigned long flags;
int txq_length;
/* Drop packet with error if we don't have carrier */
if (!netif_carrier_ok(dev)) {
dev_kfree_skb(skb);
- stats->tx_errors++;
- stats->tx_carrier_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_carrier_errors++;
dbg(DBG_ASS,
"Tried to transmit but no carrier on card %d port %d\n",
card->card_no, port->index);
dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
LEN_TX_BUFFER);
dev_kfree_skb(skb);
- stats->tx_errors++;
+ dev->stats.tx_errors++;
return 0;
}
* This shouldn't have happened but such is life
*/
dev_kfree_skb(skb);
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
card->card_no, port->index);
return 0;
sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
if (stat & ST1_UDRN) {
- struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
- stats->tx_errors++; /* TX Underrun error detected */
- stats->tx_fifo_errors++;
+ /* TX Underrun error detected */
+ port_to_dev(port)->stats.tx_errors++;
+ port_to_dev(port)->stats.tx_fifo_errors++;
}
if (stat & ST1_CDCD)
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin)
{
struct net_device *dev = port_to_dev(port);
- struct net_device_stats *stats = hdlc_stats(dev);
struct sk_buff *skb;
u16 len;
u32 buff;
len = readw(&desc->len);
skb = dev_alloc_skb(len);
if (!skb) {
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
return;
}
printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
dev->last_rx = jiffies;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
+ struct net_device *dev = port_to_dev(port);
u16 dmac = get_dmac_rx(port);
card_t *card = port_to_card(port);
u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
- struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_RX(phy_node(port)), card);
if (stat & DSR_BOF)
- stats->rx_over_errors++; /* Dropped one or more frames */
+ /* Dropped one or more frames */
+ dev->stats.rx_over_errors++;
while (1) {
u32 desc_off = desc_offset(port, port->rxin, 0);
if (!(stat & ST_RX_EOM))
port->rxpart = 1; /* partial frame received */
else if ((stat & ST_ERROR_MASK) || port->rxpart) {
- stats->rx_errors++;
- if (stat & ST_RX_OVERRUN) stats->rx_fifo_errors++;
+ dev->stats.rx_errors++;
+ if (stat & ST_RX_OVERRUN)
+ dev->stats.rx_fifo_errors++;
else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
ST_RX_RESBIT)) || port->rxpart)
- stats->rx_frame_errors++;
- else if (stat & ST_RX_CRC) stats->rx_crc_errors++;
+ dev->stats.rx_frame_errors++;
+ else if (stat & ST_RX_CRC)
+ dev->stats.rx_crc_errors++;
if (stat & ST_RX_EOM)
port->rxpart = 0; /* received last fragment */
} else
static inline void sca_tx_intr(port_t *port)
{
struct net_device *dev = port_to_dev(port);
- struct net_device_stats *stats = hdlc_stats(dev);
u16 dmac = get_dmac_tx(port);
card_t* card = port_to_card(port);
u8 stat;
break; /* Transmitter is/will_be sending this frame */
desc = desc_address(port, port->txlast, 1);
- stats->tx_packets++;
- stats->tx_bytes += readw(&desc->len);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += readw(&desc->len);
writeb(0, &desc->stat); /* Free descriptor */
port->txlast = next_desc(port, port->txlast, 1);
}
static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
{
- return hdlc_stats(dev);
+ return &dev->stats;
}
dev_kfree_skb_any(skb);
return NET_RX_DROP;
- rx_error:
- dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */
+rx_error:
+ dev->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
}pvc_device;
struct pvc_desc {
- struct net_device_stats stats;
pvc_device *pvc;
};
return dev->priv;
}
-static inline struct net_device_stats* pvc_get_stats(struct net_device *dev)
-{
- return &pvcdev_to_desc(dev)->stats;
-}
-
static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
{
pvc_device *pvc = state(hdlc)->first_pvc;
static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
- struct net_device_stats *stats = pvc_get_stats(dev);
if (pvc->state.active) {
if (dev->type == ARPHRD_ETHER) {
if (skb_tailroom(skb) < pad)
if (pskb_expand_head(skb, 0, pad,
GFP_ATOMIC)) {
- stats->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
}
skb->protocol = __constant_htons(ETH_P_802_3);
}
if (!fr_hard_header(&skb, pvc->dlci)) {
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
if (pvc->state.fecn) /* TX Congestion counter */
- stats->tx_compressed++;
+ dev->stats.tx_compressed++;
skb->dev = pvc->frad;
dev_queue_xmit(skb);
return 0;
}
}
- stats->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
}
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- dev_to_hdlc(frad)->stats.rx_dropped++;
+ frad->stats.rx_dropped++;
return NET_RX_DROP;
}
}
if (dev) {
- struct net_device_stats *stats = pvc_get_stats(dev);
- stats->rx_packets++; /* PVC traffic */
- stats->rx_bytes += skb->len;
+ dev->stats.rx_packets++; /* PVC traffic */
+ dev->stats.rx_bytes += skb->len;
if (pvc->state.becn)
- stats->rx_compressed++;
+ dev->stats.rx_compressed++;
netif_rx(skb);
return NET_RX_SUCCESS;
} else {
}
rx_error:
- dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */
+ frad->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
dlci_to_q922(dev->broadcast, dlci);
}
dev->hard_start_xmit = pvc_xmit;
- dev->get_stats = pvc_get_stats;
dev->open = pvc_open;
dev->stop = pvc_close;
dev->do_ioctl = pvc_ioctl;
int len = skb->len;
if (skb_tailroom(skb) < pad)
if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
- hdlc_stats(dev)->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
}
static int x25_rx(struct sk_buff *skb)
{
- struct hdlc_device *hdlc = dev_to_hdlc(skb->dev);
-
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- hdlc->stats.rx_dropped++;
+ skb->dev->stats.rx_dropped++;
return NET_RX_DROP;
}
if (lapb_data_received(skb->dev, skb) == LAPB_OK)
return NET_RX_SUCCESS;
- hdlc->stats.rx_errors++;
+ skb->dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
static void tx_dma_buf_check(pc300_t *, int);
static void rx_dma_buf_check(pc300_t *, int);
static irqreturn_t cpc_intr(int, void *);
-static struct net_device_stats *cpc_get_stats(struct net_device *);
static int clock_rate_calc(uclong, uclong, int *);
static uclong detect_ram(pc300_t *);
static void plx_init(pc300_t *);
pc300dev_t *d = (pc300dev_t *) dev->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
- struct net_device_stats *stats = hdlc_stats(dev);
int ch = chan->channel;
unsigned long flags;
ucchar ilar;
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
CPC_LOCK(card, flags);
if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) {
printk("%s: ILAR=0x%x\n", dev->name, ilar);
pc300dev_t *d = (pc300dev_t *) dev->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
- struct net_device_stats *stats = hdlc_stats(dev);
int ch = chan->channel;
unsigned long flags;
#ifdef PC300_DEBUG_TX
} else if (!netif_carrier_ok(dev)) {
/* DCD must be OFF: drop packet */
dev_kfree_skb(skb);
- stats->tx_errors++;
- stats->tx_carrier_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_carrier_errors++;
return 0;
} else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) {
printk("%s: DCD is OFF. Going administrative down.\n", dev->name);
- stats->tx_errors++;
- stats->tx_carrier_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_carrier_errors++;
dev_kfree_skb(skb);
netif_carrier_off(dev);
CPC_LOCK(card, flags);
// printk("%s: write error. Dropping TX packet.\n", dev->name);
netif_stop_queue(dev);
dev_kfree_skb(skb);
- stats->tx_errors++;
- stats->tx_dropped++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
return 0;
}
#ifdef PC300_DEBUG_TX
pc300dev_t *d = (pc300dev_t *) dev->priv;
pc300ch_t *chan = (pc300ch_t *) d->chan;
pc300_t *card = (pc300_t *) chan->card;
- struct net_device_stats *stats = hdlc_stats(dev);
int ch = chan->channel;
#ifdef PC300_DEBUG_RX
int i;
#endif
if ((skb == NULL) && (rxb > 0)) {
/* rxb > dev->mtu */
- stats->rx_errors++;
- stats->rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
continue;
}
if (rxb < 0) { /* Invalid frame */
rxb = -rxb;
if (rxb & DST_OVR) {
- stats->rx_errors++;
- stats->rx_fifo_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_fifo_errors++;
}
if (rxb & DST_CRC) {
- stats->rx_errors++;
- stats->rx_crc_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_crc_errors++;
}
if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) {
- stats->rx_errors++;
- stats->rx_frame_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_frame_errors++;
}
}
if (skb) {
continue;
}
- stats->rx_bytes += rxb;
+ dev->stats.rx_bytes += rxb;
#ifdef PC300_DEBUG_RX
printk("%s R:", dev->name);
if (d->trace_on) {
cpc_trace(dev, skb, 'R');
}
- stats->rx_packets++;
+ dev->stats.rx_packets++;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
}
pc300_t *card = (pc300_t *)chan->card;
int ch = chan->channel;
volatile pcsca_bd_t __iomem * ptdescr;
- struct net_device_stats *stats = hdlc_stats(dev->dev);
/* Clean up descriptors from previous transmission */
ptdescr = (card->hw.rambase +
TX_BD_ADDR(ch,chan->tx_first_bd));
- while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) !=
- TX_BD_ADDR(ch,chan->tx_first_bd)) &&
- (cpc_readb(&ptdescr->status) & DST_OSB)) {
- stats->tx_packets++;
- stats->tx_bytes += cpc_readw(&ptdescr->len);
+ while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) !=
+ TX_BD_ADDR(ch,chan->tx_first_bd)) &&
+ (cpc_readb(&ptdescr->status) & DST_OSB)) {
+ dev->dev->stats.tx_packets++;
+ dev->dev->stats.tx_bytes += cpc_readw(&ptdescr->len);
cpc_writeb(&ptdescr->status, DST_OSB);
cpc_writew(&ptdescr->len, 0);
chan->nfree_tx_bd++;
}
cpc_net_rx(dev);
/* Discard invalid frames */
- hdlc_stats(dev)->rx_errors++;
- hdlc_stats(dev)->rx_over_errors++;
+ dev->stats.rx_errors++;
+ dev->stats.rx_over_errors++;
chan->rx_first_bd = 0;
chan->rx_last_bd = N_DMA_RX_BUF - 1;
rx_dma_start(card, ch);
card->hw.cpld_reg2) &
~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
}
- hdlc_stats(dev)->tx_errors++;
- hdlc_stats(dev)->tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_fifo_errors++;
sca_tx_intr(d);
}
}
case SIOCGPC300UTILSTATS:
{
if (!arg) { /* clear statistics */
- memset(hdlc_stats(dev), 0, sizeof(struct net_device_stats));
+ memset(&dev->stats, 0, sizeof(dev->stats));
if (card->hw.type == PC300_TE) {
memset(&chan->falc, 0, sizeof(falc_t));
}
pc300stats.hw_type = card->hw.type;
pc300stats.line_on = card->chan[ch].d.line_on;
pc300stats.line_off = card->chan[ch].d.line_off;
- memcpy(&pc300stats.gen_stats, hdlc_stats(dev),
- sizeof(struct net_device_stats));
+ memcpy(&pc300stats.gen_stats, &dev->stats,
+ sizeof(dev->stats));
if (card->hw.type == PC300_TE)
memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t));
if (copy_to_user(arg, &pc300stats, sizeof(pc300stats_t)))
}
}
-static struct net_device_stats *cpc_get_stats(struct net_device *dev)
-{
- return hdlc_stats(dev);
-}
-
static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
{
int br, tc;
dev->stop = cpc_close;
dev->tx_timeout = cpc_tx_timeout;
dev->watchdog_timeo = PC300_TX_TIMEOUT;
- dev->get_stats = cpc_get_stats;
dev->set_multicast_list = NULL;
dev->set_mac_address = NULL;
dev->change_mtu = cpc_change_mtu;
CPC_TTY_DBG("%s: cpc_tty_write data len=%i\n",cpc_tty->name,count);
pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan;
- stats = hdlc_stats(((pc300dev_t*)cpc_tty->pc300dev)->dev);
+ stats = &cpc_tty->pc300dev->dev->stats;
card = (pc300_t *) pc300chan->card;
ch = pc300chan->channel;
pc300_t *card = (pc300_t *)pc300chan->card;
int ch = pc300chan->channel;
volatile pcsca_bd_t __iomem * ptdescr;
- struct net_device_stats *stats = hdlc_stats(pc300dev->dev);
+ struct net_device_stats *stats = &pc300dev->dev->stats;
int rx_len, rx_aux;
volatile unsigned char status;
unsigned short first_bd = pc300chan->rx_first_bd;
pc300ch_t *chan = (pc300ch_t *)dev->chan;
pc300_t *card = (pc300_t *)chan->card;
int ch = chan->channel;
- struct net_device_stats *stats = hdlc_stats(dev->dev);
+ struct net_device_stats *stats = &dev->dev->stats;
unsigned long flags;
volatile pcsca_bd_t __iomem *ptdescr;
int i, nchar;
static inline void wanxl_tx_intr(port_t *port)
{
struct net_device *dev = port->dev;
- struct net_device_stats *stats = hdlc_stats(dev);
while (1) {
desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
struct sk_buff *skb = port->tx_skbs[port->tx_in];
return;
case PACKET_UNDERRUN:
- stats->tx_errors++;
- stats->tx_fifo_errors++;
+ dev->stats.tx_errors++;
+ dev->stats.tx_fifo_errors++;
break;
default:
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
}
desc->stat = PACKET_EMPTY; /* Free descriptor */
pci_unmap_single(port->card->pdev, desc->address, skb->len,
port_t *port = &card->ports[desc->stat &
PACKET_PORT_MASK];
struct net_device *dev = port->dev;
- struct net_device_stats *stats = hdlc_stats(dev);
if (!skb)
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
else {
pci_unmap_single(card->pdev, desc->address,
BUFFER_LENGTH,
skb->len);
debug_frame(skb);
#endif
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
dev->last_rx = jiffies;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
{
- struct net_device_stats *stats = hdlc_stats(dev);
port_t *port = dev_to_port(dev);
- stats->rx_over_errors = get_status(port)->rx_overruns;
- stats->rx_frame_errors = get_status(port)->rx_frame_errors;
- stats->rx_errors = stats->rx_over_errors + stats->rx_frame_errors;
- return stats;
+ dev->stats.rx_over_errors = get_status(port)->rx_overruns;
+ dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
+ dev->stats.rx_errors = dev->stats.rx_over_errors +
+ dev->stats.rx_frame_errors;
+ return &dev->stats;
}
/* Pointed to by dev->priv */
typedef struct hdlc_device {
- struct net_device_stats stats;
/* used by HDLC layer to take control over HDLC device from hw driver*/
int (*attach)(struct net_device *dev,
unsigned short encoding, unsigned short parity);
/* May be used by hardware driver to gain control over HDLC device */
void detach_hdlc_protocol(struct net_device *dev);
-static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
-{
- return &dev_to_hdlc(dev)->stats;
-}
-
-
static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
struct net_device *dev)
{