]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - net/core/dev.c
Merge tag 'trace-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-artful-kernel.git] / net / core / dev.c
index 130d642202297205cd038fb2645970eab5d2b972..4699dcfdc4ab1254f106af5e4ae1451987bff7aa 100644 (file)
@@ -897,23 +897,25 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
 EXPORT_SYMBOL(dev_getfirstbyhwtype);
 
 /**
- *     dev_get_by_flags_rcu - find any device with given flags
+ *     __dev_get_by_flags - find any device with given flags
  *     @net: the applicable net namespace
  *     @if_flags: IFF_* values
  *     @mask: bitmask of bits in if_flags to check
  *
  *     Search for any interface with the given flags. Returns NULL if a device
  *     is not found or a pointer to the device. Must be called inside
- *     rcu_read_lock(), and result refcount is unchanged.
+ *     rtnl_lock(), and result refcount is unchanged.
  */
 
-struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
-                                   unsigned short mask)
+struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
+                                     unsigned short mask)
 {
        struct net_device *dev, *ret;
 
+       ASSERT_RTNL();
+
        ret = NULL;
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
                if (((dev->flags ^ if_flags) & mask) == 0) {
                        ret = dev;
                        break;
@@ -921,7 +923,7 @@ struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags
        }
        return ret;
 }
-EXPORT_SYMBOL(dev_get_by_flags_rcu);
+EXPORT_SYMBOL(__dev_get_by_flags);
 
 /**
  *     dev_valid_name - check if name is okay for network device
@@ -2175,6 +2177,53 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
        return (struct dev_kfree_skb_cb *)skb->cb;
 }
 
+void netif_schedule_queue(struct netdev_queue *txq)
+{
+       rcu_read_lock();
+       if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
+               struct Qdisc *q = rcu_dereference(txq->qdisc);
+
+               __netif_schedule(q);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(netif_schedule_queue);
+
+/**
+ *     netif_wake_subqueue - allow sending packets on subqueue
+ *     @dev: network device
+ *     @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+       struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
+               struct Qdisc *q;
+
+               rcu_read_lock();
+               q = rcu_dereference(txq->qdisc);
+               __netif_schedule(q);
+               rcu_read_unlock();
+       }
+}
+EXPORT_SYMBOL(netif_wake_subqueue);
+
+void netif_tx_wake_queue(struct netdev_queue *dev_queue)
+{
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
+               struct Qdisc *q;
+
+               rcu_read_lock();
+               q = rcu_dereference(dev_queue->qdisc);
+               __netif_schedule(q);
+               rcu_read_unlock();
+       }
+}
+EXPORT_SYMBOL(netif_tx_wake_queue);
+
 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
 {
        unsigned long flags;
@@ -2371,16 +2420,6 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, &offload_base, list) {
                if (ptype->type == type && ptype->callbacks.gso_segment) {
-                       if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-                               int err;
-
-                               err = ptype->callbacks.gso_send_check(skb);
-                               segs = ERR_PTR(err);
-                               if (err || skb_gso_ok(skb, features))
-                                       break;
-                               __skb_push(skb, (skb->data -
-                                                skb_network_header(skb)));
-                       }
                        segs = ptype->callbacks.gso_segment(skb, features);
                        break;
                }
@@ -2483,52 +2522,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
        return 0;
 }
 
-struct dev_gso_cb {
-       void (*destructor)(struct sk_buff *skb);
-};
-
-#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-
-static void dev_gso_skb_destructor(struct sk_buff *skb)
-{
-       struct dev_gso_cb *cb;
-
-       kfree_skb_list(skb->next);
-       skb->next = NULL;
-
-       cb = DEV_GSO_CB(skb);
-       if (cb->destructor)
-               cb->destructor(skb);
-}
-
-/**
- *     dev_gso_segment - Perform emulated hardware segmentation on skb.
- *     @skb: buffer to segment
- *     @features: device features as applicable to this skb
- *
- *     This function segments the given skb and stores the list of segments
- *     in skb->next.
- */
-static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
-       struct sk_buff *segs;
-
-       segs = skb_gso_segment(skb, features);
-
-       /* Verifying header integrity only. */
-       if (!segs)
-               return 0;
-
-       if (IS_ERR(segs))
-               return PTR_ERR(segs);
-
-       skb->next = segs;
-       DEV_GSO_CB(skb)->destructor = skb->destructor;
-       skb->destructor = dev_gso_skb_destructor;
-
-       return 0;
-}
-
 /* If MPLS offload request, verify we are testing hardware MPLS features
  * instead of standard features for the netdev.
  */
@@ -2572,10 +2565,12 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
 netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
+       const struct net_device *dev = skb->dev;
+       netdev_features_t features = dev->features;
+       u16 gso_segs = skb_shinfo(skb)->gso_segs;
        __be16 protocol = skb->protocol;
-       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+       if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
@@ -2586,7 +2581,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
        }
 
        features = netdev_intersect_features(features,
-                                            skb->dev->vlan_features |
+                                            dev->vlan_features |
                                             NETIF_F_HW_VLAN_CTAG_TX |
                                             NETIF_F_HW_VLAN_STAG_TX);
 
@@ -2603,119 +2598,149 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_skb_features);
 
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-                       struct netdev_queue *txq)
+static int xmit_one(struct sk_buff *skb, struct net_device *dev,
+                   struct netdev_queue *txq, bool more)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
-       int rc = NETDEV_TX_OK;
-       unsigned int skb_len;
+       unsigned int len;
+       int rc;
 
-       if (likely(!skb->next)) {
-               netdev_features_t features;
+       if (!list_empty(&ptype_all))
+               dev_queue_xmit_nit(skb, dev);
 
-               /*
-                * If device doesn't need skb->dst, release it right now while
-                * its hot in this cpu cache
-                */
-               if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
-                       skb_dst_drop(skb);
+       len = skb->len;
+       trace_net_dev_start_xmit(skb, dev);
+       rc = netdev_start_xmit(skb, dev, txq, more);
+       trace_net_dev_xmit(skb, rc, dev, len);
 
-               features = netif_skb_features(skb);
-
-               if (vlan_tx_tag_present(skb) &&
-                   !vlan_hw_offload_capable(features, skb->vlan_proto)) {
-                       skb = __vlan_put_tag(skb, skb->vlan_proto,
-                                            vlan_tx_tag_get(skb));
-                       if (unlikely(!skb))
-                               goto out;
+       return rc;
+}
 
-                       skb->vlan_tci = 0;
-               }
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
+                                   struct netdev_queue *txq, int *ret)
+{
+       struct sk_buff *skb = first;
+       int rc = NETDEV_TX_OK;
 
-               /* If encapsulation offload request, verify we are testing
-                * hardware encapsulation features instead of standard
-                * features for the netdev
-                */
-               if (skb->encapsulation)
-                       features &= dev->hw_enc_features;
+       while (skb) {
+               struct sk_buff *next = skb->next;
 
-               if (netif_needs_gso(skb, features)) {
-                       if (unlikely(dev_gso_segment(skb, features)))
-                               goto out_kfree_skb;
-                       if (skb->next)
-                               goto gso;
-               } else {
-                       if (skb_needs_linearize(skb, features) &&
-                           __skb_linearize(skb))
-                               goto out_kfree_skb;
+               skb->next = NULL;
+               rc = xmit_one(skb, dev, txq, next != NULL);
+               if (unlikely(!dev_xmit_complete(rc))) {
+                       skb->next = next;
+                       goto out;
+               }
 
-                       /* If packet is not checksummed and device does not
-                        * support checksumming for this protocol, complete
-                        * checksumming here.
-                        */
-                       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                               if (skb->encapsulation)
-                                       skb_set_inner_transport_header(skb,
-                                               skb_checksum_start_offset(skb));
-                               else
-                                       skb_set_transport_header(skb,
-                                               skb_checksum_start_offset(skb));
-                               if (!(features & NETIF_F_ALL_CSUM) &&
-                                    skb_checksum_help(skb))
-                                       goto out_kfree_skb;
-                       }
+               skb = next;
+               if (netif_xmit_stopped(txq) && skb) {
+                       rc = NETDEV_TX_BUSY;
+                       break;
                }
+       }
 
-               if (!list_empty(&ptype_all))
-                       dev_queue_xmit_nit(skb, dev);
+out:
+       *ret = rc;
+       return skb;
+}
 
-               skb_len = skb->len;
-               trace_net_dev_start_xmit(skb, dev);
-               rc = ops->ndo_start_xmit(skb, dev);
-               trace_net_dev_xmit(skb, rc, dev, skb_len);
-               if (rc == NETDEV_TX_OK)
-                       txq_trans_update(txq);
-               return rc;
+static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
+                                         netdev_features_t features)
+{
+       if (vlan_tx_tag_present(skb) &&
+           !vlan_hw_offload_capable(features, skb->vlan_proto)) {
+               skb = __vlan_put_tag(skb, skb->vlan_proto,
+                                    vlan_tx_tag_get(skb));
+               if (skb)
+                       skb->vlan_tci = 0;
        }
+       return skb;
+}
 
-gso:
-       do {
-               struct sk_buff *nskb = skb->next;
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+{
+       netdev_features_t features;
 
-               skb->next = nskb->next;
-               nskb->next = NULL;
+       if (skb->next)
+               return skb;
 
-               if (!list_empty(&ptype_all))
-                       dev_queue_xmit_nit(nskb, dev);
-
-               skb_len = nskb->len;
-               trace_net_dev_start_xmit(nskb, dev);
-               rc = ops->ndo_start_xmit(nskb, dev);
-               trace_net_dev_xmit(nskb, rc, dev, skb_len);
-               if (unlikely(rc != NETDEV_TX_OK)) {
-                       if (rc & ~NETDEV_TX_MASK)
-                               goto out_kfree_gso_skb;
-                       nskb->next = skb->next;
-                       skb->next = nskb;
-                       return rc;
+       features = netif_skb_features(skb);
+       skb = validate_xmit_vlan(skb, features);
+       if (unlikely(!skb))
+               goto out_null;
+
+       /* If encapsulation offload request, verify we are testing
+        * hardware encapsulation features instead of standard
+        * features for the netdev
+        */
+       if (skb->encapsulation)
+               features &= dev->hw_enc_features;
+
+       if (netif_needs_gso(skb, features)) {
+               struct sk_buff *segs;
+
+               segs = skb_gso_segment(skb, features);
+               if (IS_ERR(segs)) {
+                       segs = NULL;
+               } else if (segs) {
+                       consume_skb(skb);
+                       skb = segs;
                }
-               txq_trans_update(txq);
-               if (unlikely(netif_xmit_stopped(txq) && skb->next))
-                       return NETDEV_TX_BUSY;
-       } while (skb->next);
+       } else {
+               if (skb_needs_linearize(skb, features) &&
+                   __skb_linearize(skb))
+                       goto out_kfree_skb;
 
-out_kfree_gso_skb:
-       if (likely(skb->next == NULL)) {
-               skb->destructor = DEV_GSO_CB(skb)->destructor;
-               consume_skb(skb);
-               return rc;
+               /* If packet is not checksummed and device does not
+                * support checksumming for this protocol, complete
+                * checksumming here.
+                */
+               if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       if (skb->encapsulation)
+                               skb_set_inner_transport_header(skb,
+                                                              skb_checksum_start_offset(skb));
+                       else
+                               skb_set_transport_header(skb,
+                                                        skb_checksum_start_offset(skb));
+                       if (!(features & NETIF_F_ALL_CSUM) &&
+                           skb_checksum_help(skb))
+                               goto out_kfree_skb;
+               }
        }
+
+       return skb;
+
 out_kfree_skb:
        kfree_skb(skb);
-out:
-       return rc;
+out_null:
+       return NULL;
+}
+
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+{
+       struct sk_buff *next, *head = NULL, *tail;
+
+       for (; skb != NULL; skb = next) {
+               next = skb->next;
+               skb->next = NULL;
+
+               /* in case skb wont be segmented, point to itself */
+               skb->prev = skb;
+
+               skb = validate_xmit_skb(skb, dev);
+               if (!skb)
+                       continue;
+
+               if (!head)
+                       head = skb;
+               else
+                       tail->next = skb;
+               /* If skb was segmented, skb->prev points to
+                * the last segment. If not, it still contains skb.
+                */
+               tail = skb->prev;
+       }
+       return head;
 }
-EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
 
 static void qdisc_pkt_len_init(struct sk_buff *skb)
 {
@@ -2778,12 +2803,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                 * waiting to be sent out; and the qdisc is not running -
                 * xmit the skb directly.
                 */
-               if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
-                       skb_dst_force(skb);
 
                qdisc_bstats_update(q, skb);
 
-               if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+               if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
                                contended = false;
@@ -2794,7 +2817,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 
                rc = NET_XMIT_SUCCESS;
        } else {
-               skb_dst_force(skb);
                rc = q->enqueue(skb, q) & NET_XMIT_MASK;
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
@@ -2891,6 +2913,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 
        skb_update_prio(skb);
 
+       /* If device/qdisc don't need skb->dst, release it right now while
+        * its hot in this cpu cache.
+        */
+       if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+               skb_dst_drop(skb);
+       else
+               skb_dst_force(skb);
+
        txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
 
@@ -2923,11 +2953,15 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
                        if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
                                goto recursion_alert;
 
+                       skb = validate_xmit_skb(skb, dev);
+                       if (!skb)
+                               goto drop;
+
                        HARD_TX_LOCK(dev, txq, cpu);
 
                        if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
-                               rc = dev_hard_start_xmit(skb, dev, txq);
+                               skb = dev_hard_start_xmit(skb, dev, txq, &rc);
                                __this_cpu_dec(xmit_recursion);
                                if (dev_xmit_complete(rc)) {
                                        HARD_TX_UNLOCK(dev, txq);
@@ -2948,10 +2982,11 @@ recursion_alert:
        }
 
        rc = -ENETDOWN;
+drop:
        rcu_read_unlock_bh();
 
        atomic_long_inc(&dev->tx_dropped);
-       kfree_skb(skb);
+       kfree_skb_list(skb);
        return rc;
 out:
        rcu_read_unlock_bh();
@@ -3128,8 +3163,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        }
 
        if (map) {
-               tcpu = map->cpus[((u64) hash * map->len) >> 32];
-
+               tcpu = map->cpus[reciprocal_scale(hash, map->len)];
                if (cpu_online(tcpu)) {
                        cpu = tcpu;
                        goto done;
@@ -3465,7 +3499,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 
-       q = rxq->qdisc;
+       q = rcu_dereference(rxq->qdisc);
        if (q != &noop_qdisc) {
                spin_lock(qdisc_lock(q));
                if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
@@ -3482,7 +3516,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
 {
        struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
 
-       if (!rxq || rxq->qdisc == &noop_qdisc)
+       if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
                goto out;
 
        if (*pt_prev) {
@@ -3963,11 +3997,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (!(skb->dev->features & NETIF_F_GRO))
                goto normal;
 
-       if (skb_is_gso(skb) || skb_has_frag_list(skb))
+       if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
                goto normal;
 
        gro_list_prepare(napi, skb);
-       NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
 
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, head, list) {
@@ -3981,6 +4014,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->udp_mark = 0;
 
+               /* Setup for GRO checksum validation */
+               switch (skb->ip_summed) {
+               case CHECKSUM_COMPLETE:
+                       NAPI_GRO_CB(skb)->csum = skb->csum;
+                       NAPI_GRO_CB(skb)->csum_valid = 1;
+                       NAPI_GRO_CB(skb)->csum_cnt = 0;
+                       break;
+               case CHECKSUM_UNNECESSARY:
+                       NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
+                       NAPI_GRO_CB(skb)->csum_valid = 0;
+                       break;
+               default:
+                       NAPI_GRO_CB(skb)->csum_cnt = 0;
+                       NAPI_GRO_CB(skb)->csum_valid = 0;
+               }
+
                pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
                break;
        }
@@ -4210,6 +4259,31 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(napi_gro_frags);
 
+/* Compute the checksum from gro_offset and return the folded value
+ * after adding in any pseudo checksum.
+ */
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
+{
+       __wsum wsum;
+       __sum16 sum;
+
+       wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
+
+       /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
+       sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
+       if (likely(!sum)) {
+               if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+                   !skb->csum_complete_sw)
+                       netdev_rx_csum_fault(skb->dev);
+       }
+
+       NAPI_GRO_CB(skb)->csum = wsum;
+       NAPI_GRO_CB(skb)->csum_valid = 1;
+
+       return sum;
+}
+EXPORT_SYMBOL(__skb_gro_checksum_complete);
+
 /*
  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
  * Note: called with local irq disabled, but exits with local irq enabled.
@@ -6579,6 +6653,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev->gso_max_size = GSO_MAX_SIZE;
        dev->gso_max_segs = GSO_MAX_SEGS;
+       dev->gso_min_segs = 0;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
@@ -6588,7 +6663,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        INIT_LIST_HEAD(&dev->adj_list.lower);
        INIT_LIST_HEAD(&dev->all_adj_list.upper);
        INIT_LIST_HEAD(&dev->all_adj_list.lower);
-       dev->priv_flags = IFF_XMIT_DST_RELEASE;
+       dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
        setup(dev);
 
        dev->num_tx_queues = txqs;
@@ -7010,53 +7085,45 @@ const char *netdev_drivername(const struct net_device *dev)
        return empty;
 }
 
-static int __netdev_printk(const char *level, const struct net_device *dev,
-                          struct va_format *vaf)
+static void __netdev_printk(const char *level, const struct net_device *dev,
+                           struct va_format *vaf)
 {
-       int r;
-
        if (dev && dev->dev.parent) {
-               r = dev_printk_emit(level[1] - '0',
-                                   dev->dev.parent,
-                                   "%s %s %s%s: %pV",
-                                   dev_driver_string(dev->dev.parent),
-                                   dev_name(dev->dev.parent),
-                                   netdev_name(dev), netdev_reg_state(dev),
-                                   vaf);
+               dev_printk_emit(level[1] - '0',
+                               dev->dev.parent,
+                               "%s %s %s%s: %pV",
+                               dev_driver_string(dev->dev.parent),
+                               dev_name(dev->dev.parent),
+                               netdev_name(dev), netdev_reg_state(dev),
+                               vaf);
        } else if (dev) {
-               r = printk("%s%s%s: %pV", level, netdev_name(dev),
-                          netdev_reg_state(dev), vaf);
+               printk("%s%s%s: %pV",
+                      level, netdev_name(dev), netdev_reg_state(dev), vaf);
        } else {
-               r = printk("%s(NULL net_device): %pV", level, vaf);
+               printk("%s(NULL net_device): %pV", level, vaf);
        }
-
-       return r;
 }
 
-int netdev_printk(const char *level, const struct net_device *dev,
-                 const char *format, ...)
+void netdev_printk(const char *level, const struct net_device *dev,
+                  const char *format, ...)
 {
        struct va_format vaf;
        va_list args;
-       int r;
 
        va_start(args, format);
 
        vaf.fmt = format;
        vaf.va = &args;
 
-       r = __netdev_printk(level, dev, &vaf);
+       __netdev_printk(level, dev, &vaf);
 
        va_end(args);
-
-       return r;
 }
 EXPORT_SYMBOL(netdev_printk);
 
 #define define_netdev_printk_level(func, level)                        \
-int func(const struct net_device *dev, const char *fmt, ...)   \
+void func(const struct net_device *dev, const char *fmt, ...)  \
 {                                                              \
-       int r;                                                  \
        struct va_format vaf;                                   \
        va_list args;                                           \
                                                                \
@@ -7065,11 +7132,9 @@ int func(const struct net_device *dev, const char *fmt, ...)     \
        vaf.fmt = fmt;                                          \
        vaf.va = &args;                                         \
                                                                \
-       r = __netdev_printk(level, dev, &vaf);                  \
+       __netdev_printk(level, dev, &vaf);                      \
                                                                \
        va_end(args);                                           \
-                                                               \
-       return r;                                               \
 }                                                              \
 EXPORT_SYMBOL(func);