]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
xen-netback: pass hash value to the frontend
authorPaul Durrant <Paul.Durrant@citrix.com>
Fri, 13 May 2016 08:37:28 +0000 (09:37 +0100)
committerDavid S. Miller <davem@davemloft.net>
Mon, 16 May 2016 17:35:56 +0000 (13:35 -0400)
My recent patch to include/xen/interface/io/netif.h defines a new extra
info type that can be used to pass hash values between backend and guest
frontend.

This patch adds code to xen-netback to pass hash values calculated for
guest receive-side packets (i.e. netback transmit side) to the frontend.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c

index 5a39cdbc217c3d163a1aaa7b303e763d9e940212..1c7f49b5acc17805f5983ed41839472fa8c8d5d8 100644 (file)
@@ -158,8 +158,17 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
        struct xenvif *vif = netdev_priv(dev);
        unsigned int size = vif->hash.size;
 
-       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
-               return fallback(dev, skb) % dev->real_num_tx_queues;
+       if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
+               u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
+
+               /* Make sure there is no hash information in the socket
+                * buffer otherwise it would be incorrectly forwarded
+                * to the frontend.
+                */
+               skb_clear_hash(skb);
+
+               return index;
+       }
 
        xenvif_set_skb_hash(vif, skb);
 
index 1916ab332d60571652bd5098b8cf4e433b725098..9d6bd862f3c15e7d53c0009c04405ec853a531e6 100644 (file)
@@ -168,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
        needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
        if (skb_is_gso(skb))
                needed++;
+       if (skb->sw_hash)
+               needed++;
 
        do {
                prod = queue->rx.sring->req_prod;
@@ -285,6 +287,8 @@ struct gop_frag_copy {
        struct xenvif_rx_meta *meta;
        int head;
        int gso_type;
+       int protocol;
+       int hash_present;
 
        struct page *page;
 };
@@ -331,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
        npo->copy_off += *len;
        info->meta->size += *len;
 
+       if (!info->head)
+               return;
+
        /* Leave a gap for the GSO descriptor. */
-       if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+       if ((1 << info->gso_type) & queue->vif->gso_mask)
+               queue->rx.req_cons++;
+
+       /* Leave a gap for the hash extra segment. */
+       if (info->hash_present)
                queue->rx.req_cons++;
 
        info->head = 0; /* There must be something in this buffer now */
@@ -367,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                .npo = npo,
                .head = *head,
                .gso_type = XEN_NETIF_GSO_TYPE_NONE,
+               /* xenvif_set_skb_hash() will have either set a s/w
+                * hash or cleared the hash depending on
+                * whether the the frontend wants a hash for this skb.
+                */
+               .hash_present = skb->sw_hash,
        };
        unsigned long bytes;
 
@@ -555,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
 
 static void xenvif_rx_action(struct xenvif_queue *queue)
 {
+       struct xenvif *vif = queue->vif;
        s8 status;
        u16 flags;
        struct xen_netif_rx_response *resp;
@@ -590,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
        gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               struct xen_netif_extra_info *extra = NULL;
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_prefix_mask) {
+                   vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&queue->rx,
                                                 queue->rx.rsp_prod_pvt++);
 
@@ -610,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                queue->stats.tx_bytes += skb->len;
                queue->stats.tx_packets++;
 
-               status = xenvif_check_gop(queue->vif,
+               status = xenvif_check_gop(vif,
                                          XENVIF_RX_CB(skb)->meta_slots_used,
                                          &npo);
 
@@ -632,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
                                        flags);
 
                if ((1 << queue->meta[npo.meta_cons].gso_type) &
-                   queue->vif->gso_mask) {
-                       struct xen_netif_extra_info *gso =
-                               (struct xen_netif_extra_info *)
+                   vif->gso_mask) {
+                       extra = (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&queue->rx,
                                                  queue->rx.rsp_prod_pvt++);
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
-                       gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
-                       gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
-                       gso->u.gso.pad = 0;
-                       gso->u.gso.features = 0;
+                       extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+                       extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+                       extra->u.gso.pad = 0;
+                       extra->u.gso.features = 0;
+
+                       extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+                       extra->flags = 0;
+               }
+
+               if (skb->sw_hash) {
+                       /* Since the skb got here via xenvif_select_queue()
+                        * we know that the hash has been re-calculated
+                        * according to a configuration set by the frontend
+                        * and therefore we know that it is legitimate to
+                        * pass it to the frontend.
+                        */
+                       if (resp->flags & XEN_NETRXF_extra_info)
+                               extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+                       else
+                               resp->flags |= XEN_NETRXF_extra_info;
+
+                       extra = (struct xen_netif_extra_info *)
+                               RING_GET_RESPONSE(&queue->rx,
+                                                 queue->rx.rsp_prod_pvt++);
 
-                       gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
-                       gso->flags = 0;
+                       extra->u.hash.algorithm =
+                               XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+                       if (skb->l4_hash)
+                               extra->u.hash.type =
+                                       skb->protocol == htons(ETH_P_IP) ?
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+                       else
+                               extra->u.hash.type =
+                                       skb->protocol == htons(ETH_P_IP) ?
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+                                       _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+                       *(uint32_t *)extra->u.hash.value =
+                               skb_get_hash_raw(skb);
+
+                       extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+                       extra->flags = 0;
                }
 
                xenvif_add_frag_responses(queue, status,