spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
if (lp->sk_count <= 3) {
schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue);
}
- return (0); /* success */
+ return NETDEV_TX_OK; /* success */
} /* net_send_packet */
isdn_net_dev *nd;
isdn_net_local *slp;
isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
- int retv = 0;
+ int retv = NETDEV_TX_OK;
if (((isdn_net_local *) netdev_priv(ndev))->master) {
printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
/* For the other encaps the header has already been built */
isdn_net_dev *nd;
unsigned int proto = PPP_IP; /* 0x21 */
struct ippp_struct *ipt,*ipts;
- int slot, retval = 0;
+ int slot, retval = NETDEV_TX_OK;
mlp = (isdn_net_local *) netdev_priv(netdev);
nd = mlp->netdev; /* get master lp */
if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */
if (ipts->debug & 0x1)
printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name);
- retval = 1;
+ retval = NETDEV_TX_BUSY;
goto out;
}
lp = isdn_net_get_locked_lp(nd);
if (!lp) {
printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name);
- retval = 1;
+ retval = NETDEV_TX_BUSY;
goto out;
}
/* we have our lp locked from now on */
if (el_debug > 2)
pr_debug(" queued xmit.\n");
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
/* A receive upset our load, despite our best efforts */
if (el_debug > 2)
lp->tx_full = 1;
spin_unlock_irqrestore (&lp->devlock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(lance_start_xmit);
volatile struct lance_regs *ll = lp->ll;
volatile struct lance_init_block *ib = lp->init_block;
int entry, skblen;
- int status = 0;
+ int status = NETDEV_TX_OK;
unsigned long flags;
if (skb_padto(skb, ETH_ZLEN))
- return 0;
+ return NETDEV_TX_OK;
skblen = max_t(unsigned, skb->len, ETH_ZLEN);
local_irq_save(flags);
dev_kfree_skb(skb);
}
read_unlock(&bond->lock);
- return 0;
+ return NETDEV_TX_OK;
}
int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev)
}
read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
- return 0;
+ return NETDEV_TX_OK;
}
void bond_alb_monitor(struct work_struct *work)
dev_kfree_skb(skb);
}
read_unlock(&bond->lock);
- return 0;
+ return NETDEV_TX_OK;
}
read_unlock(&bond->curr_slave_lock);
read_unlock(&bond->lock);
- return 0;
+ return NETDEV_TX_OK;
}
/*
dev_kfree_skb(skb);
}
read_unlock(&bond->lock);
- return 0;
+ return NETDEV_TX_OK;
}
/*
/* frame sent to all suitable interfaces */
read_unlock(&bond->lock);
- return 0;
+ return NETDEV_TX_OK;
}
/*------------------------- Device initialization ---------------------------*/
bp->xmt_length_errors++; /* bump error counter */
netif_wake_queue(dev);
dev_kfree_skb(skb);
- return(0); /* return "success" */
+ return NETDEV_TX_OK; /* return "success" */
}
/*
* See if adapter link is available, if not, free buffer
bp->xmt_discards++; /* bump error counter */
dev_kfree_skb(skb); /* free sk_buff now */
netif_wake_queue(dev);
- return(0); /* return "success" */
+ return NETDEV_TX_OK; /* return "success" */
}
}
dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword);
spin_unlock_irqrestore(&bp->lock, flags);
netif_wake_queue(dev);
- return(0); /* packet queued to adapter */
+ return NETDEV_TX_OK; /* packet queued to adapter */
}
static int load_packet(struct net_device *dev, struct sk_buff *skb)
{
struct depca_private *lp = netdev_priv(dev);
- int i, entry, end, len, status = 0;
+ int i, entry, end, len, status = NETDEV_TX_OK;
entry = lp->tx_new; /* Ring around buffer number. */
end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
++dev->stats.tx_packets;
dev->stats.tx_bytes += len;
- return 0;
+ return NETDEV_TX_OK;
}
/* Tx lock BH */
{
struct ifb_private *dp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- int ret = 0;
+ int ret = NETDEV_TX_OK;
u32 from = G_TC_FROM(skb->tc_verd);
stats->rx_packets++;
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
- IRDA_ASSERT(dev != NULL, return 0;);
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
iobase = self->io.sir_base;
self = netdev_priv(dev);
- IRDA_ASSERT (self != NULL, return 0; );
+ IRDA_ASSERT (self != NULL, return NETDEV_TX_OK; );
IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __func__
,skb->len,self->txpending,INB (OBOE_ENABLEH));
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
iobase = self->io.fir_base;
int err;
s32 speed;
- IRDA_ASSERT(dev != NULL, return 0;);
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
netif_stop_queue(ndev);
IRDA_DEBUG(1, "%s\n", __func__);
- IRDA_ASSERT(dev != NULL, return 0;);
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
netif_stop_queue(dev);
smsc_ircc_change_speed(self, speed);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
self->new_speed = speed;
}
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
/*
s32 speed;
int mtt;
- IRDA_ASSERT(dev != NULL, return 0;);
+ IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;);
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
netif_stop_queue(dev);
smsc_ircc_change_speed(self, speed);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
self->new_speed = speed;
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
/*
__u32 speed;
self = netdev_priv(dev);
- IRDA_ASSERT(self != NULL, return 0;);
+ IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
iobase = self->io.fir_base;
netif_stop_queue(dev);
dev_kfree_skb (skb);
dev->stats.tx_bytes += send_length;
- return 0;
+ return NETDEV_TX_OK;
}
/**
/* Poll CQ here */
mlx4_en_xmit_poll(priv, tx_ind);
- return 0;
+ return NETDEV_TX_OK;
tx_drop:
dev_kfree_skb_any(skb);
}
}
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
drop:
ss = &mgp->ss[skb_get_queue_mapping(skb)];
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
- return 0;
+ return NETDEV_TX_OK;
}
static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
if (skb->len > XMIT_BUFF_SIZE) {
printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
- return 0;
+ return NETDEV_TX_OK;
}
netif_stop_queue(dev);
}
dev_kfree_skb(skb);
#endif
- return 0;
+ return NETDEV_TX_OK;
}
/*******************************************
// dequeue packets from xmt queue and send them
netif_start_queue(dev);
dev_kfree_skb(skb);
- return (0); /* return "success" */
+ return NETDEV_TX_OK; /* return "success" */
}
if (bp->QueueSkb == 0) { // return with tbusy set: queue full
length = skb->len;
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
- return 0;
+ return NETDEV_TX_OK;
length = ETH_ZLEN;
}
dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
/*
if(tp->QueueSkb > 0)
netif_wake_queue(dev);
- return (0);
+ return NETDEV_TX_OK;
}
static int smctr_send_lobe_media_test(struct net_device *dev)
tms380tr_exec_sifcmd(dev, CMD_TX_VALID);
spin_unlock_irqrestore(&tp->lock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
/*
if (skb->len > MAX_PACKET_SIZE) {
printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
spin_lock_irqsave(&db->lock, flags);
/* free this SKB */
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
ret = 0;
if (!skb || !dev)
- return(0);
+ return NETDEV_TX_OK;
dlp = netdev_priv(dev);
/* Alan Cox recommends always returning 0, and always freeing the packet */
/* experience suggest a slightly more conservative approach */
- if (!ret)
+ if (ret == NETDEV_TX_OK)
{
dev_kfree_skb(skb);
netif_wake_queue(dev);
if (dscc4_tx_quiescent(dpriv, dev))
dscc4_do_tx(dpriv, dev);
- return 0;
+ return NETDEV_TX_OK;
}
static int dscc4_close(struct net_device *dev)
dbg(DBG_ASS,
"Tried to transmit but no carrier on card %d port %d\n",
card->card_no, port->index);
- return 0;
+ return NETDEV_TX_OK;
}
/* Drop it if it's too big! MTU failure ? */
LEN_TX_BUFFER);
dev_kfree_skb(skb);
dev->stats.tx_errors++;
- return 0;
+ return NETDEV_TX_OK;
}
/*
dev->stats.tx_errors++;
dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
card->card_no, port->index);
- return 0;
+ return NETDEV_TX_OK;
}
/*
fst_q_work_item(&fst_work_txq, card->card_no);
tasklet_schedule(&fst_tx_task);
- return 0;
+ return NETDEV_TX_OK;
}
/*
lmc_softc_t *sc = dev_to_sc(dev);
u32 flag;
int entry;
- int ret = 0;
+ int ret = NETDEV_TX_OK;
unsigned long flags;
lmc_trace(dev, "lmc_start_xmit in");
}
spin_unlock(&port->lock);
- return 0;
+ return NETDEV_TX_OK;
}
printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
"(len=%d)\n", dev->name, skb->len);
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
if (local->ddev != dev) {
printk(KERN_DEBUG "%s: prism2_tx: trying to use "
"AP device with Ethernet net dev\n", dev->name);
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
} else {
if (local->iw_mode == IW_MODE_REPEAT) {
printk(KERN_DEBUG "%s: prism2_tx: trying to use "
"non-WDS link in Repeater mode\n", dev->name);
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
} else if (local->iw_mode == IW_MODE_INFRA &&
(local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
memcmp(skb->data + ETH_ALEN, dev->dev_addr,
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL) {
iface->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
if (pskb_expand_head(skb, need_headroom, need_tailroom,
GFP_ATOMIC)) {
kfree_skb(skb);
iface->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
} else if (skb_headroom(skb) < need_headroom) {
struct sk_buff *tmp = skb;
kfree_skb(tmp);
if (skb == NULL) {
iface->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL) {
iface->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
}
/* Send IEEE 802.11 encapsulated frame using the master radio device */
skb->dev = local->dev;
dev_queue_xmit(skb);
- return 0;
+ return NETDEV_TX_OK;
}
printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb "
"(len=%d)\n", dev->name, skb->len);
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
iface->stats.tx_packets++;
/* Send IEEE 802.11 encapsulated frame using the master radio device */
skb->dev = local->dev;
dev_queue_xmit(skb);
- return 0;
+ return NETDEV_TX_OK;
}
printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
"expected 0x%08x)\n",
dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC);
- ret = 0;
+ ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
goto fail;
}
if (skb->len < 24) {
printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb "
"(len=%d)\n", dev->name, skb->len);
- ret = 0;
+ ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
goto fail;
}
dev->name, meta->ethertype);
hostap_dump_tx_80211(dev->name, skb);
- ret = 0; /* drop packet */
+ ret = NETDEV_TX_OK; /* drop packet */
iface->stats.tx_dropped++;
goto fail;
}
break;
case AP_TX_DROP:
- ret = 0; /* drop packet */
+ ret = NETDEV_TX_OK; /* drop packet */
iface->stats.tx_dropped++;
goto fail;
case AP_TX_RETRY:
case AP_TX_BUFFERED:
/* do not free skb here, it will be freed when the
* buffered frame is sent/timed out */
- ret = 0;
+ ret = NETDEV_TX_OK;
goto tx_exit;
}
"frame (drop_unencrypted=1)\n", dev->name);
}
iface->stats.tx_dropped++;
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
if (skb == NULL) {
printk(KERN_DEBUG "%s: TX - encryption failed\n",
dev->name);
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
meta = (struct hostap_skb_tx_data *) skb->cb;
"expected 0x%08x) after hostap_tx_encrypt\n",
dev->name, meta->magic,
HOSTAP_SKB_TX_DATA_MAGIC);
- ret = 0;
+ ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
goto fail;
}
}
if (local->func->tx == NULL || local->func->tx(skb, dev)) {
- ret = 0;
+ ret = NETDEV_TX_OK;
iface->stats.tx_dropped++;
} else {
- ret = 0;
+ ret = NETDEV_TX_OK;
iface->stats.tx_packets++;
iface->stats.tx_bytes += skb->len;
}
fail:
- if (!ret && skb)
+ if (ret == NETDEV_TX_OK && skb)
dev_kfree_skb(skb);
tx_exit:
if (tx.sta_ptr)
if (ret == 0) {
dev->stats.tx_packets++;
dev->stats.tx_bytes += txb->payload_size;
- return 0;
+ return NETDEV_TX_OK;
}
ieee80211_txb_free(txb);
}
- return 0;
+ return NETDEV_TX_OK;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
/* unlock the driver code */
spin_unlock_irqrestore(&priv->slock, flags);
- return 0;
+ return NETDEV_TX_OK;
drop_free:
ndev->stats.tx_dropped++;
#ifdef DEBUG_TX_TRACE
printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
#endif
- return 0;
+ return NETDEV_TX_OK;
}
/*********************** HARDWARE CONFIGURATION ***********************/
* able to detect collisions, therefore in theory we don't really
* need to pad. Jean II */
if (skb_padto(skb, ETH_ZLEN))
- return 0;
+ return NETDEV_TX_OK;
wv_packet_write(dev, skb->data, skb->len);
#ifdef DEBUG_TX_TRACE
printk(KERN_DEBUG "%s: <-wavelan_packet_xmit()\n", dev->name);
#endif
- return(0);
+ return NETDEV_TX_OK;
}
/********************** HARDWARE CONFIGURATION **********************/
if (rc) {
++dev->stats.tx_dropped;
netif_stop_queue(dev);
- rc = NETDEV_TX_OK;
} else {
++dev->stats.tx_packets;
dev->stats.tx_bytes += skb->len;
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&this->lock, flags);
- return rc;
+ return NETDEV_TX_OK;
}
static int wl3501_open(struct net_device *dev)
CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
if (rc)
rc = NETDEV_TX_BUSY;
+ else
+ rc = NETDEV_TX_OK;
return rc;
} /* end of claw_tx */
struct net_device *dev)
{
struct lcs_header *header;
- int rc = 0;
+ int rc = NETDEV_TX_OK;
LCS_DBF_TEXT(5, trace, "hardxmit");
if (skb == NULL) {
card->stats.tx_dropped++;
card->stats.tx_errors++;
- return 0;
+ return NETDEV_TX_OK;
}
if (card->state != DEV_STATE_UP) {
dev_kfree_skb(skb);
card->stats.tx_dropped++;
card->stats.tx_errors++;
card->stats.tx_carrier_errors++;
- return 0;
+ return NETDEV_TX_OK;
}
if (skb->protocol == htons(ETH_P_IPV6)) {
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
netif_stop_queue(card->dev);
spin_lock(&card->lock);
if (skb == NULL) {
IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
privptr->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
if (skb_headroom(skb) < NETIUCV_HDRLEN) {
IUCV_DBF_TEXT(data, 2,
"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
dev_kfree_skb(skb);
privptr->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
/**
privptr->stats.tx_dropped++;
privptr->stats.tx_errors++;
privptr->stats.tx_carrier_errors++;
- return 0;
+ return NETDEV_TX_OK;
}
if (netiucv_test_and_set_busy(dev)) {
card->stats.tx_bytes += tx_bytes;
if (new_skb != skb)
dev_kfree_skb_any(skb);
+ rc = NETDEV_TX_OK;
} else {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
card->perf_stats.sg_frags_sent += nr_frags + 1;
}
}
+ rc = NETDEV_TX_OK;
} else {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
{
struct net_device *net_dev = skb->dev;
PRTMP_ADAPTER pAd = net_dev->ml_priv;
- int status = 0;
+ int status = NETDEV_TX_OK;
PNDIS_PACKET pPacket = (PNDIS_PACKET) skb;
{
STASendPackets((NDIS_HANDLE)pAd, (PPNDIS_PACKET) &pPacket, 1);
- status = 0;
+ status = NETDEV_TX_OK;
done:
return status;
if (!(net_dev->flags & IFF_UP))
{
RELEASE_NDIS_PACKET(pAd, (PNDIS_PACKET)skb_p, NDIS_STATUS_FAILURE);
- return 0;
+ return NETDEV_TX_OK;
}
NdisZeroMemory((PUCHAR)&skb_p->cb[CB_OFF], 15);
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += txb->payload_size;
- return 0;
+ return NETDEV_TX_OK;
}
ieee80211_txb_free(txb);
}
}
- return 0;
+ return NETDEV_TX_OK;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
spin_unlock_irqrestore(&priv->tx_lock,flags);
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
}
rtl8180_tx(dev, skb->data, skb->len, priority,
spin_unlock_irqrestore(&priv->tx_lock,flags);
dev_kfree_skb_any(skb);
- return 0;
+ return NETDEV_TX_OK;
}
// longpre 144+48 shortpre 72+24
p80211_metawep_t p80211_wep;
if (skb == NULL)
- return 0;
+ return NETDEV_TX_OK;
if (wlandev->state != WLAN_DEVICE_OPEN) {
result = 1;
while (i < mpc->number_of_mps_macs) {
if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
- return 0; /* success! */
+ return NETDEV_TX_OK; /* success! */
i++;
}
skb_dst_drop(skb);
rc = ops->ndo_start_xmit(skb, dev);
- if (rc == 0)
+ if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
/*
* TODO: if skb_orphan() was called by
skb->next = nskb->next;
nskb->next = NULL;
rc = ops->ndo_start_xmit(nskb, dev);
- if (unlikely(rc)) {
+ if (unlikely(rc != NETDEV_TX_OK)) {
nskb->next = skb->next;
skb->next = nskb;
return rc;
out_kfree_skb:
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static u32 skb_tx_hashrnd;
u32 sta_flags = 0;
if (unlikely(skb->len < ETH_HLEN)) {
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
/* Do not send frames with mesh_ttl == 0 */
sdata->u.mesh.mshstats.dropped_frames_ttl++;
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
memset(&mesh_hdr, 0, sizeof(mesh_hdr));
hdrlen = 24;
break;
default:
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
dev->trans_start = jiffies;
dev_queue_xmit(skb);
- return 0;
+ return NETDEV_TX_OK;
fail:
- if (!ret)
+ if (ret == NETDEV_TX_OK)
dev_kfree_skb(skb);
return ret;