static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
#endif /* CONFIG_IGB_DCA */
static bool igb_clean_tx_irq(struct igb_q_vector *);
static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int);
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
- .ndo_start_xmit = igb_xmit_frame_adv,
+ .ndo_start_xmit = igb_xmit_frame,
.ndo_get_stats64 = igb_get_stats64,
.ndo_set_rx_mode = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
-static inline int igb_tso_adv(struct igb_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+static inline int igb_tso(struct igb_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
{
struct e1000_adv_tx_context_desc *context_desc;
unsigned int i;
return true;
}
-static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static inline bool igb_tx_csum(struct igb_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
{
struct e1000_adv_tx_context_desc *context_desc;
struct device *dev = tx_ring->dev;
#define IGB_MAX_TXD_PWR 16
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
- unsigned int first)
+static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
+ unsigned int first)
{
struct igb_buffer *buffer_info;
struct device *dev = tx_ring->dev;
return 0;
}
-static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
- u32 tx_flags, int count, u32 paylen,
- u8 hdr_len)
+static inline void igb_tx_queue(struct igb_ring *tx_ring,
+ u32 tx_flags, int count, u32 paylen,
+ u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc;
struct igb_buffer *buffer_info;
return __igb_maybe_stop_tx(tx_ring, size);
}
-netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
- struct igb_ring *tx_ring)
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
+ struct igb_ring *tx_ring)
{
int tso = 0, count;
u32 tx_flags = 0;
first = tx_ring->next_to_use;
if (skb_is_gso(skb)) {
- tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
+ tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) {
dev_kfree_skb_any(skb);
if (tso)
tx_flags |= IGB_TX_FLAGS_TSO;
- else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
+ else if (igb_tx_csum(tx_ring, skb, tx_flags) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IGB_TX_FLAGS_CSUM;
* count reflects descriptors mapped, if 0 or less then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igb_tx_map_adv(tx_ring, skb, first);
+ count = igb_tx_map(tx_ring, skb, first);
if (!count) {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
return NETDEV_TX_OK;
}
- igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
+ igb_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
return NETDEV_TX_OK;
}
-static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *tx_ring;
* to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */
- return igb_xmit_frame_ring_adv(skb, tx_ring);
+ return igb_xmit_frame_ring(skb, tx_ring);
}
/**
clean_complete = !!igb_clean_tx_irq(q_vector);
if (q_vector->rx_ring)
- clean_complete &= igb_clean_rx_irq_adv(q_vector, budget);
+ clean_complete &= igb_clean_rx_irq(q_vector, budget);
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return count < tx_ring->count;
}
-static inline void igb_rx_checksum_adv(struct igb_ring *ring,
- u32 status_err, struct sk_buff *skb)
+static inline void igb_rx_checksum(struct igb_ring *ring,
+ u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
return hlen;
}
-static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, int budget)
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
{
struct igb_ring *rx_ring = q_vector->rx_ring;
union e1000_adv_rx_desc *rx_desc;
total_bytes += skb->len;
total_packets++;
- igb_rx_checksum_adv(rx_ring, staterr, skb);
+ igb_rx_checksum(rx_ring, staterr, skb);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
- igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_ring->total_bytes += total_bytes;
if (cleaned_count)
- igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
return !!budget;
}
}
/**
- * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
**/
-void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, u16 cleaned_count)
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
union e1000_adv_rx_desc *rx_desc;
struct igb_buffer *bi;