]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / vmxnet3 / vmxnet3_rxtx.c
index 4867a64f377418df3015641aebeea763988a09d5..73e270f30f001a8e77540257f5e6c2c655f9fb27 100644 (file)
@@ -361,7 +361,7 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                 */
                if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
                                m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
@@ -369,20 +369,20 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
                                (ol_flags & PKT_TX_L4_MASK) ==
                                PKT_TX_SCTP_CKSUM) {
-                       rte_errno = -ENOTSUP;
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -541,10 +541,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                        switch (txm->ol_flags & PKT_TX_L4_MASK) {
                        case PKT_TX_TCP_CKSUM:
-                               gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
+                               gdesc->txd.msscof = gdesc->txd.hlen +
+                                       offsetof(struct rte_tcp_hdr, cksum);
                                break;
                        case PKT_TX_UDP_CKSUM:
-                               gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
+                               gdesc->txd.msscof = gdesc->txd.hlen +
+                                       offsetof(struct rte_udp_hdr,
+                                               dgram_cksum);
                                break;
                        default:
                                PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
@@ -667,32 +670,32 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                struct rte_mbuf *rxm)
 {
        uint32_t hlen, slen;
-       struct ipv4_hdr *ipv4_hdr;
-       struct ipv6_hdr *ipv6_hdr;
-       struct tcp_hdr *tcp_hdr;
+       struct rte_ipv4_hdr *ipv4_hdr;
+       struct rte_ipv6_hdr *ipv6_hdr;
+       struct rte_tcp_hdr *tcp_hdr;
        char *ptr;
 
        RTE_ASSERT(rcd->tcp);
 
        ptr = rte_pktmbuf_mtod(rxm, char *);
        slen = rte_pktmbuf_data_len(rxm);
-       hlen = sizeof(struct ether_hdr);
+       hlen = sizeof(struct rte_ether_hdr);
 
        if (rcd->v4) {
-               if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
-                       return hw->mtu - sizeof(struct ipv4_hdr)
-                                       - sizeof(struct tcp_hdr);
+               if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr)))
+                       return hw->mtu - sizeof(struct rte_ipv4_hdr)
+                                       - sizeof(struct rte_tcp_hdr);
 
-               ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
-               hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
-                               IPV4_IHL_MULTIPLIER;
+               ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
+               hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+                               RTE_IPV4_IHL_MULTIPLIER;
        } else if (rcd->v6) {
-               if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
-                       return hw->mtu - sizeof(struct ipv6_hdr) -
-                                       sizeof(struct tcp_hdr);
+               if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
+                       return hw->mtu - sizeof(struct rte_ipv6_hdr) -
+                                       sizeof(struct rte_tcp_hdr);
 
-               ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
-               hlen += sizeof(struct ipv6_hdr);
+               ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen);
+               hlen += sizeof(struct rte_ipv6_hdr);
                if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
                        int frag;
 
@@ -701,18 +704,18 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                }
        }
 
-       if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
-               return hw->mtu - hlen - sizeof(struct tcp_hdr) +
-                               sizeof(struct ether_hdr);
+       if (unlikely(slen < hlen + sizeof(struct rte_tcp_hdr)))
+               return hw->mtu - hlen - sizeof(struct rte_tcp_hdr) +
+                               sizeof(struct rte_ether_hdr);
 
-       tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+       tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen);
        hlen += (tcp_hdr->data_off & 0xf0) >> 2;
 
        if (rxm->udata64 > 1)
                return (rte_pktmbuf_pkt_len(rxm) - hlen +
                                rxm->udata64 - 1) / rxm->udata64;
        else
-               return hw->mtu - hlen + sizeof(struct ether_hdr);
+               return hw->mtu - hlen + sizeof(struct rte_ether_hdr);
 }
 
 /* Receive side checksum and other offloads */
@@ -947,13 +950,17 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                        RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
 
-                       if (rxm->data_len) {
+                       if (likely(start && rxm->data_len > 0)) {
                                start->pkt_len += rxm->data_len;
                                start->nb_segs++;
 
                                rxq->last_seg->next = rxm;
                                rxq->last_seg = rxm;
                        } else {
+                               PMD_RX_LOG(ERR, "Error received empty or out of order frame.");
+                               rxq->stats.drop_total++;
+                               rxq->stats.drop_err++;
+
                                rte_pktmbuf_free_seg(rxm);
                        }
                }
@@ -1308,6 +1315,14 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
 
        cmdInfo->setRSSFields = 0;
        port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+       if ((port_rss_conf->rss_hf & VMXNET3_MANDATORY_V4_RSS) !=
+           VMXNET3_MANDATORY_V4_RSS) {
+               PMD_INIT_LOG(WARNING, "RSS: IPv4/6 TCP is required for vmxnet3 v4 RSS,"
+                            "automatically setting it");
+               port_rss_conf->rss_hf |= VMXNET3_MANDATORY_V4_RSS;
+       }
+
        rss_hf = port_rss_conf->rss_hf &
                (VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);