#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
+/* List of required flags advertised by the hardware that will be
+ * used if TSO is enabled. */
+#define DPDK_TX_TSO_OFFLOAD_FLAGS (DEV_TX_OFFLOAD_TCP_TSO \
+ | DEV_TX_OFFLOAD_TCP_CKSUM \
+ | DEV_TX_OFFLOAD_UDP_CKSUM \
+ | DEV_TX_OFFLOAD_IPV4_CKSUM)
+
+
static const struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
}
if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) {
- conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO;
- conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+ conf.txmode.offloads |= DPDK_TX_TSO_OFFLOAD_FLAGS;
}
/* Limit configured rss hash functions to only those supported
struct rte_ether_addr eth_addr;
int diag;
int n_rxq, n_txq;
+ uint32_t tx_tso_offload_capa = DPDK_TX_TSO_OFFLOAD_FLAGS;
uint32_t rx_chksm_offload_capa = DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_IPV4_CKSUM;
- uint32_t tx_tso_offload_capa = DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_IPV4_CKSUM;
rte_eth_dev_info_get(dev->port_id, &info);
if (dev->hw_ol_features & NETDEV_TX_TSO_OFFLOAD) {
netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_TSO;
netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM;
+ netdev->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM;
netdev->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM;
}
if (userspace_tso_enabled()) {
netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_TSO;
netdev->ol_flags |= NETDEV_TX_OFFLOAD_TCP_CKSUM;
+ netdev->ol_flags |= NETDEV_TX_OFFLOAD_UDP_CKSUM;
netdev->ol_flags |= NETDEV_TX_OFFLOAD_IPV4_CKSUM;
vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_ECN
| 1ULL << VIRTIO_NET_F_HOST_UFO;
enum netdev_ol_flags {
NETDEV_TX_OFFLOAD_IPV4_CKSUM = 1 << 0,
NETDEV_TX_OFFLOAD_TCP_CKSUM = 1 << 1,
- NETDEV_TX_OFFLOAD_TCP_TSO = 1 << 2,
+ NETDEV_TX_OFFLOAD_UDP_CKSUM = 1 << 2,
+ NETDEV_TX_OFFLOAD_TCP_TSO = 1 << 3,
};
/* A network device (e.g. an Ethernet device).
netdev_send_prepare_packet(const uint64_t netdev_flags,
struct dp_packet *packet, char **errormsg)
{
+ uint64_t l4_mask;
+
if (dp_packet_hwol_is_tso(packet)
&& !(netdev_flags & NETDEV_TX_OFFLOAD_TCP_TSO)) {
/* Fall back to GSO in software. */
return false;
}
- if (dp_packet_hwol_l4_mask(packet)
- && !(netdev_flags & NETDEV_TX_OFFLOAD_TCP_CKSUM)) {
- /* Fall back to L4 csum in software. */
- VLOG_ERR_BUF(errormsg, "No L4 checksum support");
+ l4_mask = dp_packet_hwol_l4_mask(packet);
+ if (l4_mask) {
+ if (dp_packet_hwol_l4_is_tcp(packet)) {
+ if (!(netdev_flags & NETDEV_TX_OFFLOAD_TCP_CKSUM)) {
+ /* Fall back to TCP csum in software. */
+ VLOG_ERR_BUF(errormsg, "No TCP checksum support");
+ return false;
+ }
+ } else if (dp_packet_hwol_l4_is_udp(packet)) {
+ if (!(netdev_flags & NETDEV_TX_OFFLOAD_UDP_CKSUM)) {
+ /* Fall back to UDP csum in software. */
+ VLOG_ERR_BUF(errormsg, "No UDP checksum support");
+ return false;
+ }
+ } else {
+ VLOG_ERR_BUF(errormsg, "No L4 checksum support: mask: %"PRIu64,
+ l4_mask);
return false;
+ }
}
return true;