return 0;
}
-static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+static int hns3_nic_bd_num(struct sk_buff *skb)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
- int bdnum_for_frag;
- int frag_num;
- int buf_num;
- int size;
- int i;
+ int size = skb_headlen(skb);
+ int i, bd_num;
- size = skb_headlen(skb);
- buf_num = hns3_tx_bd_count(size);
+ /* if the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE))
+ return skb_shinfo(skb)->nr_frags + 1;
- frag_num = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frag_num; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- size = skb_frag_size(frag);
- bdnum_for_frag = hns3_tx_bd_count(size);
- if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
- return -ENOMEM;
+ bd_num = hns3_tx_bd_count(size);
- buf_num += bdnum_for_frag;
- }
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int frag_bd_num;
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
- return -EBUSY;
- /* manual split the send packet */
- new_skb = skb_copy(skb, GFP_ATOMIC);
- if (!new_skb)
+ size = skb_frag_size(frag);
+ frag_bd_num = hns3_tx_bd_count(size);
+
+ if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
- dev_kfree_skb_any(skb);
- *out_skb = new_skb;
- }
- if (unlikely(ring_space(ring) < buf_num))
- return -EBUSY;
+ bd_num += frag_bd_num;
+ }
- *bnum = buf_num;
- return 0;
+ return bd_num;
}
-static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- int buf_num;
+ int bd_num;
- /* No. of segments (plus a header) */
- buf_num = skb_shinfo(skb)->nr_frags + 1;
+ bd_num = hns3_nic_bd_num(skb);
+ if (bd_num < 0)
+ return bd_num;
+
+ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ struct sk_buff *new_skb;
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
+ bd_num = hns3_tx_bd_count(skb->len);
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_copy++;
+ u64_stats_update_end(&ring->syncp);
}
- if (unlikely(ring_space(ring) < buf_num))
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
- *bnum = buf_num;
-
- return 0;
+ return bd_num;
}
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* Prefetch the data used later */
prefetch(skb->data);
- switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
- case -EBUSY:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(buf_num <= 0)) {
+ if (buf_num == -EBUSY) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+ goto out_net_tx_busy;
+ } else if (buf_num == -ENOMEM) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ }
- goto out_net_tx_busy;
- case -ENOMEM:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- netdev_err(netdev, "no memory to xmit!\n");
+ if (net_ratelimit())
+ netdev_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
- default:
- break;
}
/* No. of segments (plus a header) */
bool enable;
int ret;
- if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
-
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, enable);
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
-static void hns3_nic_set_priv_ops(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
-}
-
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
netdev->netdev_ops = &hns3_nic_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev);
hns3_ethtool_set_ops(netdev);
- hns3_nic_set_priv_ops(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);