]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - net/core/dev.c
net: Don't keep around original SKB when we software segment GSO frames.
[mirror_ubuntu-hirsute-kernel.git] / net / core / dev.c
index 0fde7d2153db45b5cc4f5336af59657118c82ed0..c89da4f306b12dfd99b12ca1a27063b6dbf07900 100644 (file)
@@ -2485,52 +2485,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
        return 0;
 }
 
-struct dev_gso_cb {
-       void (*destructor)(struct sk_buff *skb);
-};
-
-#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-
-static void dev_gso_skb_destructor(struct sk_buff *skb)
-{
-       struct dev_gso_cb *cb;
-
-       kfree_skb_list(skb->next);
-       skb->next = NULL;
-
-       cb = DEV_GSO_CB(skb);
-       if (cb->destructor)
-               cb->destructor(skb);
-}
-
-/**
- *     dev_gso_segment - Perform emulated hardware segmentation on skb.
- *     @skb: buffer to segment
- *     @features: device features as applicable to this skb
- *
- *     This function segments the given skb and stores the list of segments
- *     in skb->next.
- */
-static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
-       struct sk_buff *segs;
-
-       segs = skb_gso_segment(skb, features);
-
-       /* Verifying header integrity only. */
-       if (!segs)
-               return 0;
-
-       if (IS_ERR(segs))
-               return PTR_ERR(segs);
-
-       skb->next = segs;
-       DEV_GSO_CB(skb)->destructor = skb->destructor;
-       skb->destructor = dev_gso_skb_destructor;
-
-       return 0;
-}
-
 /* If MPLS offload request, verify we are testing hardware MPLS features
  * instead of standard features for the netdev.
  */
@@ -2600,7 +2554,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
 EXPORT_SYMBOL(netif_skb_features);
 
 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
-                   struct netdev_queue *txq)
+                   struct netdev_queue *txq, bool more)
 {
        unsigned int len;
        int rc;
@@ -2610,107 +2564,125 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
 
        len = skb->len;
        trace_net_dev_start_xmit(skb, dev);
-       rc = netdev_start_xmit(skb, dev, txq);
+       rc = netdev_start_xmit(skb, dev, txq, more);
        trace_net_dev_xmit(skb, rc, dev, len);
 
        return rc;
 }
 
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq)
+static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev,
+                                struct netdev_queue *txq, int *ret)
 {
+       struct sk_buff *skb = first;
        int rc = NETDEV_TX_OK;
 
-       if (likely(!skb->next)) {
-               netdev_features_t features;
+       while (skb) {
+               struct sk_buff *next = skb->next;
 
-               /*
-                * If device doesn't need skb->dst, release it right now while
-                * its hot in this cpu cache
-                */
-               if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
-                       skb_dst_drop(skb);
+               skb->next = NULL;
+               rc = xmit_one(skb, dev, txq, next != NULL);
+               if (unlikely(!dev_xmit_complete(rc))) {
+                       skb->next = next;
+                       goto out;
+               }
 
-               features = netif_skb_features(skb);
+               skb = next;
+               if (netif_xmit_stopped(txq) && skb) {
+                       rc = NETDEV_TX_BUSY;
+                       break;
+               }
+       }
 
-               if (vlan_tx_tag_present(skb) &&
-                   !vlan_hw_offload_capable(features, skb->vlan_proto)) {
-                       skb = __vlan_put_tag(skb, skb->vlan_proto,
-                                            vlan_tx_tag_get(skb));
-                       if (unlikely(!skb))
-                               goto out;
+out:
+       *ret = rc;
+       return skb;
+}
 
+struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
+{
+       if (vlan_tx_tag_present(skb) &&
+           !vlan_hw_offload_capable(features, skb->vlan_proto)) {
+               skb = __vlan_put_tag(skb, skb->vlan_proto,
+                                    vlan_tx_tag_get(skb));
+               if (skb)
                        skb->vlan_tci = 0;
-               }
+       }
+       return skb;
+}
 
-               /* If encapsulation offload request, verify we are testing
-                * hardware encapsulation features instead of standard
-                * features for the netdev
-                */
-               if (skb->encapsulation)
-                       features &= dev->hw_enc_features;
+struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+{
+       netdev_features_t features;
 
-               if (netif_needs_gso(skb, features)) {
-                       if (unlikely(dev_gso_segment(skb, features)))
-                               goto out_kfree_skb;
-                       if (skb->next)
-                               goto gso;
-               } else {
-                       if (skb_needs_linearize(skb, features) &&
-                           __skb_linearize(skb))
-                               goto out_kfree_skb;
+       if (skb->next)
+               return skb;
 
-                       /* If packet is not checksummed and device does not
-                        * support checksumming for this protocol, complete
-                        * checksumming here.
-                        */
-                       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                               if (skb->encapsulation)
-                                       skb_set_inner_transport_header(skb,
-                                               skb_checksum_start_offset(skb));
-                               else
-                                       skb_set_transport_header(skb,
-                                               skb_checksum_start_offset(skb));
-                               if (!(features & NETIF_F_ALL_CSUM) &&
-                                    skb_checksum_help(skb))
-                                       goto out_kfree_skb;
-                       }
-               }
+       /* If device doesn't need skb->dst, release it right now while
+        * its hot in this cpu cache
+        */
+       if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+               skb_dst_drop(skb);
 
-               return xmit_one(skb, dev, txq);
-       }
+       features = netif_skb_features(skb);
+       skb = validate_xmit_vlan(skb, features);
+       if (unlikely(!skb))
+               goto out_null;
 
-gso:
-       do {
-               struct sk_buff *nskb = skb->next;
+       /* If encapsulation offload request, verify we are testing
+        * hardware encapsulation features instead of standard
+        * features for the netdev
+        */
+       if (skb->encapsulation)
+               features &= dev->hw_enc_features;
 
-               skb->next = nskb->next;
-               nskb->next = NULL;
+       if (netif_needs_gso(skb, features)) {
+               struct sk_buff *segs;
 
-               rc = xmit_one(nskb, dev, txq);
-               if (unlikely(rc != NETDEV_TX_OK)) {
-                       if (rc & ~NETDEV_TX_MASK)
-                               goto out_kfree_gso_skb;
-                       nskb->next = skb->next;
-                       skb->next = nskb;
-                       return rc;
-               }
-               if (unlikely(netif_xmit_stopped(txq) && skb->next))
-                       return NETDEV_TX_BUSY;
-       } while (skb->next);
+               segs = skb_gso_segment(skb, features);
+               kfree_skb(skb);
+               if (IS_ERR(segs))
+                       segs = NULL;
+               skb = segs;
+       } else {
+               if (skb_needs_linearize(skb, features) &&
+                   __skb_linearize(skb))
+                       goto out_kfree_skb;
 
-out_kfree_gso_skb:
-       if (likely(skb->next == NULL)) {
-               skb->destructor = DEV_GSO_CB(skb)->destructor;
-               consume_skb(skb);
-               return rc;
+               /* If packet is not checksummed and device does not
+                * support checksumming for this protocol, complete
+                * checksumming here.
+                */
+               if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       if (skb->encapsulation)
+                               skb_set_inner_transport_header(skb,
+                                                              skb_checksum_start_offset(skb));
+                       else
+                               skb_set_transport_header(skb,
+                                                        skb_checksum_start_offset(skb));
+                       if (!(features & NETIF_F_ALL_CSUM) &&
+                           skb_checksum_help(skb))
+                               goto out_kfree_skb;
+               }
        }
+
+       return skb;
+
 out_kfree_skb:
        kfree_skb(skb);
-out:
-       return rc;
+out_null:
+       return NULL;
+}
+
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+                                   struct netdev_queue *txq, int *ret)
+{
+       if (likely(!skb->next)) {
+               *ret = xmit_one(skb, dev, txq, false);
+               return skb;
+       }
+
+       return xmit_list(skb, dev, txq, ret);
 }
-EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
 
 static void qdisc_pkt_len_init(struct sk_buff *skb)
 {
@@ -2922,7 +2894,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 
                        if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
-                               rc = dev_hard_start_xmit(skb, dev, txq);
+                               skb = dev_hard_start_xmit(skb, dev, txq, &rc);
                                __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);