]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - net/core/dev.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[mirror_ubuntu-bionic-kernel.git] / net / core / dev.c
index ae00b894e67555257b1ba81dd57cecec72eed161..cc9e3652cf93a6306e6f614f966e0fe7cf17a10e 100644 (file)
@@ -96,6 +96,7 @@
 #include <linux/skbuff.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
+#include <net/busy_poll.h>
 #include <linux/rtnetlink.h>
 #include <linux/stat.h>
 #include <net/dst.h>
 #include <linux/errqueue.h>
 #include <linux/hrtimer.h>
 #include <linux/netfilter_ingress.h>
+#include <linux/sctp.h>
 
 #include "net-sysfs.h"
 
@@ -182,8 +184,8 @@ EXPORT_SYMBOL(dev_base_lock);
 /* protects napi_hash addition/deletion and napi_gen_id */
 static DEFINE_SPINLOCK(napi_hash_lock);
 
-static unsigned int napi_gen_id;
-static DEFINE_HASHTABLE(napi_hash, 8);
+static unsigned int napi_gen_id = NR_CPUS;
+static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
 
 static seqcount_t devnet_rename_seq;
 
@@ -1674,6 +1676,22 @@ void net_dec_ingress_queue(void)
 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
 #endif
 
+#ifdef CONFIG_NET_EGRESS
+static struct static_key egress_needed __read_mostly;
+
+void net_inc_egress_queue(void)
+{
+       static_key_slow_inc(&egress_needed);
+}
+EXPORT_SYMBOL_GPL(net_inc_egress_queue);
+
+void net_dec_egress_queue(void)
+{
+       static_key_slow_dec(&egress_needed);
+}
+EXPORT_SYMBOL_GPL(net_dec_egress_queue);
+#endif
+
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
 /* We are not allowed to call static_key_slow_dec() from irq context
@@ -2470,6 +2488,141 @@ out:
 }
 EXPORT_SYMBOL(skb_checksum_help);
 
+/* skb_csum_offload_check - Driver helper function to determine if a device
+ * with limited checksum offload capabilities is able to offload the checksum
+ * for a given packet.
+ *
+ * Arguments:
+ *   skb - sk_buff for the packet in question
+ *   spec - contains the description of what device can offload
+ *   csum_encapped - returns true if the checksum being offloaded is
+ *           encpasulated. That is it is checksum for the transport header
+ *           in the inner headers.
+ *   checksum_help - when set indicates that helper function should
+ *           call skb_checksum_help if offload checks fail
+ *
+ * Returns:
+ *   true: Packet has passed the checksum checks and should be offloadable to
+ *        the device (a driver may still need to check for additional
+ *        restrictions of its device)
+ *   false: Checksum is not offloadable. If checksum_help was set then
+ *        skb_checksum_help was called to resolve checksum for non-GSO
+ *        packets and when IP protocol is not SCTP
+ */
+bool __skb_csum_offload_chk(struct sk_buff *skb,
+                           const struct skb_csum_offl_spec *spec,
+                           bool *csum_encapped,
+                           bool csum_help)
+{
+       struct iphdr *iph;
+       struct ipv6hdr *ipv6;
+       void *nhdr;
+       int protocol;
+       u8 ip_proto;
+
+       if (skb->protocol == htons(ETH_P_8021Q) ||
+           skb->protocol == htons(ETH_P_8021AD)) {
+               if (!spec->vlan_okay)
+                       goto need_help;
+       }
+
+       /* We check whether the checksum refers to a transport layer checksum in
+        * the outermost header or an encapsulated transport layer checksum that
+        * corresponds to the inner headers of the skb. If the checksum is for
+        * something else in the packet we need help.
+        */
+       if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
+               /* Non-encapsulated checksum */
+               protocol = eproto_to_ipproto(vlan_get_protocol(skb));
+               nhdr = skb_network_header(skb);
+               *csum_encapped = false;
+               if (spec->no_not_encapped)
+                       goto need_help;
+       } else if (skb->encapsulation && spec->encap_okay &&
+                  skb_checksum_start_offset(skb) ==
+                  skb_inner_transport_offset(skb)) {
+               /* Encapsulated checksum */
+               *csum_encapped = true;
+               switch (skb->inner_protocol_type) {
+               case ENCAP_TYPE_ETHER:
+                       protocol = eproto_to_ipproto(skb->inner_protocol);
+                       break;
+               case ENCAP_TYPE_IPPROTO:
+                       protocol = skb->inner_protocol;
+                       break;
+               }
+               nhdr = skb_inner_network_header(skb);
+       } else {
+               goto need_help;
+       }
+
+       switch (protocol) {
+       case IPPROTO_IP:
+               if (!spec->ipv4_okay)
+                       goto need_help;
+               iph = nhdr;
+               ip_proto = iph->protocol;
+               if (iph->ihl != 5 && !spec->ip_options_okay)
+                       goto need_help;
+               break;
+       case IPPROTO_IPV6:
+               if (!spec->ipv6_okay)
+                       goto need_help;
+               if (spec->no_encapped_ipv6 && *csum_encapped)
+                       goto need_help;
+               ipv6 = nhdr;
+               nhdr += sizeof(*ipv6);
+               ip_proto = ipv6->nexthdr;
+               break;
+       default:
+               goto need_help;
+       }
+
+ip_proto_again:
+       switch (ip_proto) {
+       case IPPROTO_TCP:
+               if (!spec->tcp_okay ||
+                   skb->csum_offset != offsetof(struct tcphdr, check))
+                       goto need_help;
+               break;
+       case IPPROTO_UDP:
+               if (!spec->udp_okay ||
+                   skb->csum_offset != offsetof(struct udphdr, check))
+                       goto need_help;
+               break;
+       case IPPROTO_SCTP:
+               if (!spec->sctp_okay ||
+                   skb->csum_offset != offsetof(struct sctphdr, checksum))
+                       goto cant_help;
+               break;
+       case NEXTHDR_HOP:
+       case NEXTHDR_ROUTING:
+       case NEXTHDR_DEST: {
+               u8 *opthdr = nhdr;
+
+               if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
+                       goto need_help;
+
+               ip_proto = opthdr[0];
+               nhdr += (opthdr[1] + 1) << 3;
+
+               goto ip_proto_again;
+       }
+       default:
+               goto need_help;
+       }
+
+       /* Passed the tests for offloading checksum */
+       return true;
+
+need_help:
+       if (csum_help && !skb_shinfo(skb)->gso_size)
+               skb_checksum_help(skb);
+cant_help:
+       return false;
+}
+EXPORT_SYMBOL(__skb_csum_offload_chk);
+
 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
 {
        __be16 type = skb->protocol;
@@ -2542,6 +2695,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
  *
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
+ *
+ *     Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
  */
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path)
@@ -2556,6 +2711,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                        return ERR_PTR(err);
        }
 
+       BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
+                    sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
+
        SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
        SKB_GSO_CB(skb)->encap_level = 0;
 
@@ -2644,7 +2802,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
-               features &= ~NETIF_F_ALL_CSUM;
+               features &= ~NETIF_F_CSUM_MASK;
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
@@ -2791,7 +2949,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
                        else
                                skb_set_transport_header(skb,
                                                         skb_checksum_start_offset(skb));
-                       if (!(features & NETIF_F_ALL_CSUM) &&
+                       if (!(features & NETIF_F_CSUM_MASK) &&
                            skb_checksum_help(skb))
                                goto out_kfree_skb;
                }
@@ -2870,7 +3028,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        bool contended;
        int rc;
 
-       qdisc_pkt_len_init(skb);
        qdisc_calculate_pkt_len(skb, q);
        /*
         * Heuristic to force contended enqueues to serialize on a
@@ -2928,7 +3085,8 @@ static void skb_update_prio(struct sk_buff *skb)
        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
 
        if (!skb->priority && skb->sk && map) {
-               unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+               unsigned int prioidx =
+                       sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
 
                if (prioidx < map->priomap_len)
                        skb->priority = map->priomap[prioidx];
@@ -2962,6 +3120,49 @@ int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dev_loopback_xmit);
 
+#ifdef CONFIG_NET_EGRESS
+static struct sk_buff *
+sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
+{
+       struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
+       struct tcf_result cl_res;
+
+       if (!cl)
+               return skb;
+
+       /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
+        * earlier by the caller.
+        */
+       qdisc_bstats_cpu_update(cl->q, skb);
+
+       switch (tc_classify(skb, cl, &cl_res, false)) {
+       case TC_ACT_OK:
+       case TC_ACT_RECLASSIFY:
+               skb->tc_index = TC_H_MIN(cl_res.classid);
+               break;
+       case TC_ACT_SHOT:
+               qdisc_qstats_cpu_drop(cl->q);
+               *ret = NET_XMIT_DROP;
+               goto drop;
+       case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
+               *ret = NET_XMIT_SUCCESS;
+drop:
+               kfree_skb(skb);
+               return NULL;
+       case TC_ACT_REDIRECT:
+               /* No need to push/pop skb's mac_header here on egress! */
+               skb_do_redirect(skb);
+               *ret = NET_XMIT_SUCCESS;
+               return NULL;
+       default:
+               break;
+       }
+
+       return skb;
+}
+#endif /* CONFIG_NET_EGRESS */
+
 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_XPS
@@ -3021,7 +3222,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
        int queue_index = 0;
 
 #ifdef CONFIG_XPS
-       if (skb->sender_cpu == 0)
+       u32 sender_cpu = skb->sender_cpu - 1;
+
+       if (sender_cpu >= (u32)NR_CPUS)
                skb->sender_cpu = raw_smp_processor_id() + 1;
 #endif
 
@@ -3086,6 +3289,17 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 
        skb_update_prio(skb);
 
+       qdisc_pkt_len_init(skb);
+#ifdef CONFIG_NET_CLS_ACT
+       skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
+# ifdef CONFIG_NET_EGRESS
+       if (static_key_false(&egress_needed)) {
+               skb = sch_handle_egress(skb, &rc, dev);
+               if (!skb)
+                       goto out;
+       }
+# endif
+#endif
        /* If device/qdisc don't need skb->dst, release it right now while
         * its hot in this cpu cache.
         */
@@ -3107,9 +3321,6 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
 
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
-#endif
        trace_net_dev_queue(skb);
        if (q->enqueue) {
                rc = __dev_xmit_skb(skb, q, dev, txq);
@@ -3666,9 +3877,9 @@ int (*br_fdb_test_addr_hook)(struct net_device *dev,
 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
 #endif
 
-static inline struct sk_buff *handle_ing(struct sk_buff *skb,
-                                        struct packet_type **pt_prev,
-                                        int *ret, struct net_device *orig_dev)
+static inline struct sk_buff *
+sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
+                  struct net_device *orig_dev)
 {
 #ifdef CONFIG_NET_CLS_ACT
        struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
@@ -3862,7 +4073,7 @@ another_round:
 skip_taps:
 #ifdef CONFIG_NET_INGRESS
        if (static_key_false(&ingress_needed)) {
-               skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+               skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
                if (!skb)
                        goto out;
 
@@ -4353,6 +4564,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
+       skb_mark_napi_id(skb, napi);
        trace_napi_gro_receive_entry(skb);
 
        skb_gro_reset_offset(skb);
@@ -4386,7 +4598,10 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
 
        if (!skb) {
                skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
-               napi->skb = skb;
+               if (skb) {
+                       napi->skb = skb;
+                       skb_mark_napi_id(skb, napi);
+               }
        }
        return skb;
 }
@@ -4661,7 +4876,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
 EXPORT_SYMBOL(napi_complete_done);
 
 /* must be called under rcu_read_lock(), as we dont take a reference */
-struct napi_struct *napi_by_id(unsigned int napi_id)
+static struct napi_struct *napi_by_id(unsigned int napi_id)
 {
        unsigned int hash = napi_id % HASH_SIZE(napi_hash);
        struct napi_struct *napi;
@@ -4672,43 +4887,101 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(napi_by_id);
 
-void napi_hash_add(struct napi_struct *napi)
+#if defined(CONFIG_NET_RX_BUSY_POLL)
+#define BUSY_POLL_BUDGET 8
+bool sk_busy_loop(struct sock *sk, int nonblock)
 {
-       if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
+       unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
+       int (*busy_poll)(struct napi_struct *dev);
+       struct napi_struct *napi;
+       int rc = false;
 
-               spin_lock(&napi_hash_lock);
+       rcu_read_lock();
 
-               /* 0 is not a valid id, we also skip an id that is taken
-                * we expect both events to be extremely rare
-                */
-               napi->napi_id = 0;
-               while (!napi->napi_id) {
-                       napi->napi_id = ++napi_gen_id;
-                       if (napi_by_id(napi->napi_id))
-                               napi->napi_id = 0;
+       napi = napi_by_id(sk->sk_napi_id);
+       if (!napi)
+               goto out;
+
+       /* Note: ndo_busy_poll method is optional in linux-4.5 */
+       busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
+
+       do {
+               rc = 0;
+               local_bh_disable();
+               if (busy_poll) {
+                       rc = busy_poll(napi);
+               } else if (napi_schedule_prep(napi)) {
+                       void *have = netpoll_poll_lock(napi);
+
+                       if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
+                               rc = napi->poll(napi, BUSY_POLL_BUDGET);
+                               trace_napi_poll(napi);
+                               if (rc == BUSY_POLL_BUDGET) {
+                                       napi_complete_done(napi, rc);
+                                       napi_schedule(napi);
+                               }
+                       }
+                       netpoll_poll_unlock(have);
                }
+               if (rc > 0)
+                       NET_ADD_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+               local_bh_enable();
 
-               hlist_add_head_rcu(&napi->napi_hash_node,
-                       &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+               if (rc == LL_FLUSH_FAILED)
+                       break; /* permanent failure */
 
-               spin_unlock(&napi_hash_lock);
-       }
+               cpu_relax();
+       } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
+                !need_resched() && !busy_loop_timeout(end_time));
+
+       rc = !skb_queue_empty(&sk->sk_receive_queue);
+out:
+       rcu_read_unlock();
+       return rc;
+}
+EXPORT_SYMBOL(sk_busy_loop);
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+void napi_hash_add(struct napi_struct *napi)
+{
+       if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
+           test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
+               return;
+
+       spin_lock(&napi_hash_lock);
+
+       /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
+       do {
+               if (unlikely(++napi_gen_id < NR_CPUS + 1))
+                       napi_gen_id = NR_CPUS + 1;
+       } while (napi_by_id(napi_gen_id));
+       napi->napi_id = napi_gen_id;
+
+       hlist_add_head_rcu(&napi->napi_hash_node,
+                          &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+
+       spin_unlock(&napi_hash_lock);
 }
 EXPORT_SYMBOL_GPL(napi_hash_add);
 
 /* Warning : caller is responsible to make sure rcu grace period
  * is respected before freeing memory containing @napi
  */
-void napi_hash_del(struct napi_struct *napi)
+bool napi_hash_del(struct napi_struct *napi)
 {
+       bool rcu_sync_needed = false;
+
        spin_lock(&napi_hash_lock);
 
-       if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
+       if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
+               rcu_sync_needed = true;
                hlist_del_rcu(&napi->napi_hash_node);
-
+       }
        spin_unlock(&napi_hash_lock);
+       return rcu_sync_needed;
 }
 EXPORT_SYMBOL_GPL(napi_hash_del);
 
@@ -4744,6 +5017,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
        napi->poll_owner = -1;
 #endif
        set_bit(NAPI_STATE_SCHED, &napi->state);
+       napi_hash_add(napi);
 }
 EXPORT_SYMBOL(netif_napi_add);
 
@@ -4763,8 +5037,12 @@ void napi_disable(struct napi_struct *n)
 }
 EXPORT_SYMBOL(napi_disable);
 
+/* Must be called in process context */
 void netif_napi_del(struct napi_struct *napi)
 {
+       might_sleep();
+       if (napi_hash_del(napi))
+               synchronize_net();
        list_del_init(&napi->dev_list);
        napi_free_frags(napi);
 
@@ -5351,7 +5629,7 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
 
 static int __netdev_upper_dev_link(struct net_device *dev,
                                   struct net_device *upper_dev, bool master,
-                                  void *private)
+                                  void *upper_priv, void *upper_info)
 {
        struct netdev_notifier_changeupper_info changeupper_info;
        struct netdev_adjacent *i, *j, *to_i, *to_j;
@@ -5375,6 +5653,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        changeupper_info.upper_dev = upper_dev;
        changeupper_info.master = master;
        changeupper_info.linking = true;
+       changeupper_info.upper_info = upper_info;
 
        ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
                                            &changeupper_info.info);
@@ -5382,7 +5661,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (ret)
                return ret;
 
-       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
                                                   master);
        if (ret)
                return ret;
@@ -5420,8 +5699,12 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                        goto rollback_lower_mesh;
        }
 
-       call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
-                                     &changeupper_info.info);
+       ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+                                           &changeupper_info.info);
+       ret = notifier_to_errno(ret);
+       if (ret)
+               goto rollback_lower_mesh;
+
        return 0;
 
 rollback_lower_mesh:
@@ -5475,7 +5758,7 @@ rollback_mesh:
 int netdev_upper_dev_link(struct net_device *dev,
                          struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
+       return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_link);
 
@@ -5483,6 +5766,8 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
  * netdev_master_upper_dev_link - Add a master link to the upper device
  * @dev: device
  * @upper_dev: new upper device
+ * @upper_priv: upper device private
+ * @upper_info: upper info to be passed down via notifier
  *
  * Adds a link to device which is upper to this one. In this case, only
  * one master upper device can be linked, although other non-master devices
@@ -5491,20 +5776,14 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
  * counts are adjusted and the function returns zero.
  */
 int netdev_master_upper_dev_link(struct net_device *dev,
-                                struct net_device *upper_dev)
+                                struct net_device *upper_dev,
+                                void *upper_priv, void *upper_info)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
+       return __netdev_upper_dev_link(dev, upper_dev, true,
+                                      upper_priv, upper_info);
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_link);
 
-int netdev_master_upper_dev_link_private(struct net_device *dev,
-                                        struct net_device *upper_dev,
-                                        void *private)
-{
-       return __netdev_upper_dev_link(dev, upper_dev, true, private);
-}
-EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
-
 /**
  * netdev_upper_dev_unlink - Removes a link to upper device
  * @dev: device
@@ -5663,7 +5942,7 @@ EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
 
 int dev_get_nest_level(struct net_device *dev,
-                      bool (*type_check)(struct net_device *dev))
+                      bool (*type_check)(const struct net_device *dev))
 {
        struct net_device *lower = NULL;
        struct list_head *iter;
@@ -5685,6 +5964,26 @@ int dev_get_nest_level(struct net_device *dev,
 }
 EXPORT_SYMBOL(dev_get_nest_level);
 
+/**
+ * netdev_lower_change - Dispatch event about lower device state change
+ * @lower_dev: device
+ * @lower_state_info: state to dispatch
+ *
+ * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
+ * The caller must hold the RTNL lock.
+ */
+void netdev_lower_state_changed(struct net_device *lower_dev,
+                               void *lower_state_info)
+{
+       struct netdev_notifier_changelowerstate_info changelowerstate_info;
+
+       ASSERT_RTNL();
+       changelowerstate_info.lower_state_info = lower_state_info;
+       call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
+                                     &changelowerstate_info.info);
+}
+EXPORT_SYMBOL(netdev_lower_state_changed);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -6375,9 +6674,9 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
        /* UFO needs SG and checksumming */
        if (features & NETIF_F_UFO) {
                /* maybe split UFO into V4 and V6? */
-               if (!((features & NETIF_F_GEN_CSUM) ||
-                   (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
-                           == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+               if (!(features & NETIF_F_HW_CSUM) &&
+                   ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
+                    (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
                        netdev_dbg(dev,
                                "Dropping NETIF_F_UFO since no checksum offload features.\n");
                        features &= ~NETIF_F_UFO;
@@ -7164,11 +7463,13 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
  *     This function does the last stage of destroying an allocated device
  *     interface. The reference to the device object is released.
  *     If this is the last reference then it will be freed.
+ *     Must be called in process context.
  */
 void free_netdev(struct net_device *dev)
 {
        struct napi_struct *p, *n;
 
+       might_sleep();
        netif_free_tx_queues(dev);
 #ifdef CONFIG_SYSFS
        kvfree(dev->_rx);
@@ -7477,16 +7778,16 @@ static int dev_cpu_callback(struct notifier_block *nfb,
 netdev_features_t netdev_increment_features(netdev_features_t all,
        netdev_features_t one, netdev_features_t mask)
 {
-       if (mask & NETIF_F_GEN_CSUM)
-               mask |= NETIF_F_ALL_CSUM;
+       if (mask & NETIF_F_HW_CSUM)
+               mask |= NETIF_F_CSUM_MASK;
        mask |= NETIF_F_VLAN_CHALLENGED;
 
-       all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
+       all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
        all &= one | ~NETIF_F_ALL_FOR_ALL;
 
        /* If one device supports hw checksumming, set for all. */
-       if (all & NETIF_F_GEN_CSUM)
-               all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+       if (all & NETIF_F_HW_CSUM)
+               all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
 
        return all;
 }