ETHTOOL_XDP_REDIRECT,
ETHTOOL_XDP_PASS,
ETHTOOL_XDP_DROP,
+ ETHTOOL_XDP_XMIT,
ETHTOOL_XDP_TX,
ETHTOOL_MAX_STATS,
};
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
- { ETHTOOL_XDP_REDIRECT, T_SW, "xdp_redirect", },
- { ETHTOOL_XDP_PASS, T_SW, "xdp_pass", },
- { ETHTOOL_XDP_DROP, T_SW, "xdp_drop", },
- { ETHTOOL_XDP_TX, T_SW, "xdp_tx", },
+ { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
+ { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
+ { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
+ { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
+ { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
};
struct mvneta_stats {
u64 xdp_redirect;
u64 xdp_pass;
u64 xdp_drop;
+ u64 xdp_xmit;
u64 xdp_tx;
};
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
struct xdp_frame *xdpf, bool dma_map)
{
- struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_desc *tx_desc;
struct mvneta_tx_buf *buf;
dma_addr_t dma_addr;
tx_desc->buf_phys_addr = dma_addr;
tx_desc->data_size = xdpf->len;
- u64_stats_update_begin(&stats->syncp);
- stats->es.ps.tx_bytes += xdpf->len;
- stats->es.ps.tx_packets++;
- stats->es.ps.xdp_tx++;
- u64_stats_update_end(&stats->syncp);
-
mvneta_txq_inc_put(txq);
txq->pending++;
txq->count++;
__netif_tx_lock(nq, cpu);
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
- if (ret == MVNETA_XDP_TX)
+ if (ret == MVNETA_XDP_TX) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_packets++;
+ stats->es.ps.xdp_tx++;
+ u64_stats_update_end(&stats->syncp);
+
mvneta_txq_pend_desc_add(pp, txq, 0);
+ }
__netif_tx_unlock(nq);
return ret;
struct xdp_frame **frames, u32 flags)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ int i, nxmit_byte = 0, nxmit = num_frame;
int cpu = smp_processor_id();
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
- int i, drops = 0;
u32 ret;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
- if (ret != MVNETA_XDP_TX) {
+ if (ret == MVNETA_XDP_TX) {
+ nxmit_byte += frames[i]->len;
+ } else {
xdp_return_frame_rx_napi(frames[i]);
- drops++;
+ nxmit--;
}
}
mvneta_txq_pend_desc_add(pp, txq, 0);
__netif_tx_unlock(nq);
- return num_frame - drops;
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += nxmit_byte;
+ stats->es.ps.tx_packets += nxmit;
+ stats->es.ps.xdp_xmit += nxmit;
+ u64_stats_update_end(&stats->syncp);
+
+ return nxmit;
}
static int
u64 xdp_redirect;
u64 xdp_pass;
u64 xdp_drop;
+ u64 xdp_xmit;
u64 xdp_tx;
stats = per_cpu_ptr(pp->stats, cpu);
xdp_redirect = stats->es.ps.xdp_redirect;
xdp_pass = stats->es.ps.xdp_pass;
xdp_drop = stats->es.ps.xdp_drop;
+ xdp_xmit = stats->es.ps.xdp_xmit;
xdp_tx = stats->es.ps.xdp_tx;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
es->ps.xdp_redirect += xdp_redirect;
es->ps.xdp_pass += xdp_pass;
es->ps.xdp_drop += xdp_drop;
+ es->ps.xdp_xmit += xdp_xmit;
es->ps.xdp_tx += xdp_tx;
}
}
case ETHTOOL_XDP_TX:
pp->ethtool_stats[i] = stats.ps.xdp_tx;
break;
+ case ETHTOOL_XDP_XMIT:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit;
+ break;
}
break;
}