]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Feb 2015 01:41:19 +0000 (17:41 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Feb 2015 01:41:19 +0000 (17:41 -0800)
Pull networking updates from David Miller:

 1) Missing netlink attribute validation in nft_lookup, from Patrick
    McHardy.

 2) Restrict ipv6 partial checksum handling to UDP, since that's the
    only case it works for.  From Vlad Yasevich.

 3) Clear out silly device table sentinal macros used by SSB and BCMA
    drivers.  From Joe Perches.

 4) Make sure the remote checksum code never creates a situation where
    the remote checksum is applied yet the tunneling metadata describing
    the remote checksum transformation is still present.  Otherwise an
    external entity might see this and apply the checksum again.  From
    Tom Herbert.

 5) Use msecs_to_jiffies() where applicable, from Nicholas Mc Guire.

 6) Don't explicitly initialize timer struct fields, use setup_timer()
    and mod_timer() instead.  From Vaishali Thakkar.

 7) Don't invoke tg3_halt() without the tp->lock held, from Jun'ichi
    Nomura.

 8) Missing __percpu annotation in ipvlan driver, from Eric Dumazet.

 9) Don't potentially perform skb_get() on shared skbs, also from Eric
    Dumazet.

10) Fix COW'ing of metrics for non-DST_HOST routes in ipv6, from Martin
    KaFai Lau.

11) Fix merge resolution error between the iov_iter changes in vhost and
    some bug fixes that occurred at the same time.  From Jason Wang.

12) If rtnl_configure_link() fails we have to perform a call to
    ->dellink() before unregistering the device.  From WANG Cong.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (39 commits)
  net: dsa: Set valid phy interface type
  rtnetlink: call ->dellink on failure when ->newlink exists
  com20020-pci: add support for eae single card
  vhost_net: fix wrong iter offset when setting number of buffers
  net: spelling fixes
  net/core: Fix warning while make xmldocs caused by dev.c
  net: phy: micrel: disable NAND-tree for KSZ8021, KSZ8031, KSZ8051, KSZ8081
  ipv6: fix ipv6_cow_metrics for non DST_HOST case
  openvswitch: Fix key serialization.
  r8152: restore hw settings
  hso: fix rx parsing logic when skb allocation fails
  tcp: make sure skb is not shared before using skb_get()
  bridge: netfilter: Move sysctl-specific error code inside #ifdef
  ipv6: fix possible deadlock in ip6_fl_purge / ip6_fl_gc
  ipvlan: add a missing __percpu pcpu_stats
  tg3: Hold tp->lock before calling tg3_halt() from tg3_init_one()
  bgmac: fix device initialization on Northstar SoCs (condition typo)
  qlcnic: Delete existing multicast MAC list before adding new
  net/mlx5_core: Fix configuration of log_uar_page_sz
  sunvnet: don't change gso data on clones
  ...

52 files changed:
drivers/net/arcnet/com20020-pci.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ipvlan/ipvlan.h
drivers/net/phy/micrel.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/vxlan.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/rtlwifi/pci.c
drivers/spi/spi-bcm53xx.c
drivers/ssb/driver_gige.c
drivers/usb/host/bcma-hcd.c
drivers/usb/host/ssb-hcd.c
drivers/vhost/net.c
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/skbuff.h
include/net/checksum.h
include/net/vxlan.h
include/uapi/linux/fou.h
include/uapi/linux/if_link.h
net/bridge/br_netfilter.c
net/core/dev.c
net/core/filter.c
net/core/pktgen.c
net/core/rtnetlink.c
net/dsa/slave.c
net/ipv4/devinet.c
net/ipv4/fou.c
net/ipv4/tcp_fastopen.c
net/ipv4/udp_offload.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/udp_offload.c
net/netfilter/nft_compat.c
net/netfilter/nft_lookup.c
net/openvswitch/flow.c
net/openvswitch/flow_netlink.c
net/rds/cong.c

index 945f532078e97d15b42ea9a5f5163e3ccc53a6c7..96edc1346124eb95b29c863f53359ae327348e2d 100644 (file)
@@ -214,8 +214,17 @@ static struct com20020_pci_card_info card_info_sohard = {
        .flags = ARC_CAN_10MBIT,
 };
 
-static struct com20020_pci_card_info card_info_eae = {
-       .name = "EAE PLX-PCI",
+static struct com20020_pci_card_info card_info_eae_arc1 = {
+       .name = "EAE PLX-PCI ARC1",
+       .devcount = 1,
+       .chan_map_tbl = {
+               { 2, 0x00, 0x08 },
+       },
+       .flags = ARC_CAN_10MBIT,
+};
+
+static struct com20020_pci_card_info card_info_eae_ma1 = {
+       .name = "EAE PLX-PCI MA1",
        .devcount = 2,
        .chan_map_tbl = {
                { 2, 0x00, 0x08 },
@@ -357,11 +366,17 @@ static const struct pci_device_id com20020pci_id_table[] = {
                0, 0,
                (kernel_ulong_t)&card_info_sohard
        },
+       {
+               0x10B5, 0x9050,
+               0x10B5, 0x3263,
+               0, 0,
+               (kernel_ulong_t)&card_info_eae_arc1
+       },
        {
                0x10B5, 0x9050,
                0x10B5, 0x3292,
                0, 0,
-               (kernel_ulong_t)&card_info_eae
+               (kernel_ulong_t)&card_info_eae_ma1
        },
        {
                0x14BA, 0x6000,
index f18647c2355990ba0e55b6a2690c0cdbb819da43..c5a320507556a83adf4595ef5a6071e81a056848 100644 (file)
@@ -518,11 +518,8 @@ static int el3_open(struct net_device *dev)
        netif_start_queue(dev);
 
        tc589_reset(dev);
-       init_timer(&lp->media);
-       lp->media.function = media_check;
-       lp->media.data = (unsigned long) dev;
-       lp->media.expires = jiffies + HZ;
-       add_timer(&lp->media);
+       setup_timer(&lp->media, media_check, (unsigned long)dev);
+       mod_timer(&lp->media, jiffies + HZ);
 
        dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
          dev->name, inw(dev->base_addr + EL3_STATUS));
index 384dc163851b3dd28bc51977306d4e1bec962575..e0f3d197e7f226ba693eb641ead31b948376fae8 100644 (file)
@@ -3127,7 +3127,8 @@ static void et131x_error_timer_handler(unsigned long data)
        }
 
        /* This is a periodic timer, so reschedule */
-       mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
+       mod_timer(&adapter->error_timer, jiffies +
+                 msecs_to_jiffies(TX_ERROR_PERIOD));
 }
 
 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
@@ -3647,7 +3648,8 @@ static int et131x_open(struct net_device *netdev)
 
        /* Start the timer to track NIC errors */
        init_timer(&adapter->error_timer);
-       adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
+       adapter->error_timer.expires = jiffies +
+               msecs_to_jiffies(TX_ERROR_PERIOD);
        adapter->error_timer.function = et131x_error_timer_handler;
        adapter->error_timer.data = (unsigned long)adapter;
        add_timer(&adapter->error_timer);
index 44b15373d6b3e628a0198f888c163a5cbc1b9544..4de62b210c85bab8e3d172234f24381da7a66276 100644 (file)
@@ -1030,12 +1030,14 @@ static const struct acpi_device_id xgene_enet_acpi_match[] = {
 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
 #endif
 
+#ifdef CONFIG_OF
 static struct of_device_id xgene_enet_of_match[] = {
        {.compatible = "apm,xgene-enet",},
        {},
 };
 
 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
+#endif
 
 static struct platform_driver xgene_enet_driver = {
        .driver = {
index d86d6baf9681f2cb637591f3830440e602a35cd7..bd5916a60cb5bb7bb84d2b96aa370c22a611cb80 100644 (file)
@@ -121,7 +121,7 @@ static struct pci_driver b44_pci_driver = {
 
 static const struct ssb_device_id b44_ssb_tbl[] = {
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
-       SSB_DEVTABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
 
index 3007d95fbb9f69c460a83d57bc66c7488421a627..676ffe09318073e33b707d46423bdcf780451750 100644 (file)
@@ -21,7 +21,7 @@
 static const struct bcma_device_id bgmac_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
-       BCMA_CORETABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
 
@@ -1412,6 +1412,7 @@ static void bgmac_mii_unregister(struct bgmac *bgmac)
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
 static int bgmac_probe(struct bcma_device *core)
 {
+       struct bcma_chipinfo *ci = &core->bus->chipinfo;
        struct net_device *net_dev;
        struct bgmac *bgmac;
        struct ssb_sprom *sprom = &core->bus->sprom;
@@ -1474,8 +1475,8 @@ static int bgmac_probe(struct bcma_device *core)
        bgmac_chip_reset(bgmac);
 
        /* For Northstar, we have to take all GMAC core out of reset */
-       if (core->id.id == BCMA_CHIP_ID_BCM4707 ||
-           core->id.id == BCMA_CHIP_ID_BCM53018) {
+       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
+           ci->id == BCMA_CHIP_ID_BCM53018) {
                struct bcma_device *ns_core;
                int ns_gmac;
 
index 615a6dbde04797651d29e566abfc64b82e1cae1f..23a019cee279af1e502d05dc5f2363372d35b2ce 100644 (file)
@@ -17855,8 +17855,10 @@ static int tg3_init_one(struct pci_dev *pdev,
         */
        if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
            (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+               tg3_full_lock(tp, 0);
                tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+               tg3_full_unlock(tp);
        }
 
        err = tg3_test_dma(tp);
index d6651937d8996188b249fe911b09e9cbbdc480a5..5394a848655876c2d0435ed6f8934363a060c303 100644 (file)
@@ -291,6 +291,7 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from)
        MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
        MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
        MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
+       MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
        v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
        *flags_off = cpu_to_be64(v64);
 }
index e56c1bb361412144e2fa60cc6b2b21b3ecb35245..fa4317611fd63fe81df2e23e47fa307b8c5c5348 100644 (file)
@@ -848,10 +848,17 @@ struct qlcnic_cardrsp_tx_ctx {
 #define QLCNIC_MAC_VLAN_ADD    3
 #define QLCNIC_MAC_VLAN_DEL    4
 
+enum qlcnic_mac_type {
+       QLCNIC_UNICAST_MAC,
+       QLCNIC_MULTICAST_MAC,
+       QLCNIC_BROADCAST_MAC,
+};
+
 struct qlcnic_mac_vlan_list {
        struct list_head list;
        uint8_t mac_addr[ETH_ALEN+2];
        u16 vlan_id;
+       enum qlcnic_mac_type mac_type;
 };
 
 /* MAC Learn */
@@ -1615,7 +1622,9 @@ void qlcnic_watchdog_task(struct work_struct *work);
 void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
                struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
 void qlcnic_set_multi(struct net_device *netdev);
-int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16,
+                      enum qlcnic_mac_type);
 int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
 void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
 int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
index 69b46c051cc0c299feb3db576dca2056463f3ba5..3e0f705a43117d51926dde8d3b63ed3af3f3e6fd 100644 (file)
@@ -487,7 +487,8 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
        return err;
 }
 
-int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
+                      enum qlcnic_mac_type mac_type)
 {
        struct qlcnic_mac_vlan_list *cur;
        struct list_head *head;
@@ -513,10 +514,29 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan)
        }
 
        cur->vlan_id = vlan;
+       cur->mac_type = mac_type;
+
        list_add_tail(&cur->list, &adapter->mac_list);
        return 0;
 }
 
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter)
+{
+       struct qlcnic_mac_vlan_list *cur;
+       struct list_head *head, *tmp;
+
+       list_for_each_safe(head, tmp, &adapter->mac_list) {
+               cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+               if (cur->mac_type != QLCNIC_MULTICAST_MAC)
+                       continue;
+
+               qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+                                         cur->vlan_id, QLCNIC_MAC_DEL);
+               list_del(&cur->list);
+               kfree(cur);
+       }
+}
+
 static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -530,8 +550,9 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
                return;
 
-       qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan);
-       qlcnic_nic_add_mac(adapter, bcast_addr, vlan);
+       qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan,
+                          QLCNIC_UNICAST_MAC);
+       qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC);
 
        if (netdev->flags & IFF_PROMISC) {
                if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
@@ -540,8 +561,10 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
                   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
                mode = VPORT_MISS_MODE_ACCEPT_MULTI;
        } else if (!netdev_mc_empty(netdev)) {
+               qlcnic_flush_mcast_mac(adapter);
                netdev_for_each_mc_addr(ha, netdev)
-                       qlcnic_nic_add_mac(adapter, ha->addr, vlan);
+                       qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+                                          QLCNIC_MULTICAST_MAC);
        }
 
        /* configure unicast MAC address, if there is not sufficient space
@@ -551,7 +574,8 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
                mode = VPORT_MISS_MODE_ACCEPT_ALL;
        } else if (!netdev_uc_empty(netdev)) {
                netdev_for_each_uc_addr(ha, netdev)
-                       qlcnic_nic_add_mac(adapter, ha->addr, vlan);
+                       qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+                                          QLCNIC_UNICAST_MAC);
        }
 
        if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
index 1659c804f1d5f4fc530f7d9b2ac11c7f9f023c6b..e6312465fe4584e0dd4f90b8f914c5e0088ffeaa 100644 (file)
@@ -1489,7 +1489,8 @@ out:
        return ret;
 }
 
-static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
+                                 enum qlcnic_mac_type mac_type)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1500,17 +1501,18 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
        vf = &adapter->ahw->sriov->vf_info[0];
 
        if (!qlcnic_sriov_check_any_vlan(vf)) {
-               qlcnic_nic_add_mac(adapter, mac, 0);
+               qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
        } else {
                spin_lock(&vf->vlan_list_lock);
                for (i = 0; i < sriov->num_allowed_vlans; i++) {
                        vlan_id = vf->sriov_vlans[i];
                        if (vlan_id)
-                               qlcnic_nic_add_mac(adapter, mac, vlan_id);
+                               qlcnic_nic_add_mac(adapter, mac, vlan_id,
+                                                  mac_type);
                }
                spin_unlock(&vf->vlan_list_lock);
                if (qlcnic_84xx_check(adapter))
-                       qlcnic_nic_add_mac(adapter, mac, 0);
+                       qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
        }
 }
 
@@ -1549,10 +1551,12 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
                   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
                mode = VPORT_MISS_MODE_ACCEPT_MULTI;
        } else {
-               qlcnic_vf_add_mc_list(netdev, bcast_addr);
+               qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
                if (!netdev_mc_empty(netdev)) {
+                       qlcnic_flush_mcast_mac(adapter);
                        netdev_for_each_mc_addr(ha, netdev)
-                               qlcnic_vf_add_mc_list(netdev, ha->addr);
+                               qlcnic_vf_add_mc_list(netdev, ha->addr,
+                                                     QLCNIC_MULTICAST_MAC);
                }
        }
 
@@ -1563,7 +1567,8 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
                mode = VPORT_MISS_MODE_ACCEPT_ALL;
        } else if (!netdev_uc_empty(netdev)) {
                netdev_for_each_uc_addr(ha, netdev)
-                       qlcnic_vf_add_mc_list(netdev, ha->addr);
+                       qlcnic_vf_add_mc_list(netdev, ha->addr,
+                                             QLCNIC_UNICAST_MAC);
        }
 
        if (adapter->pdev->is_virtfn) {
index 2b10b85d8a0881ba584cbdf3ea337edbea0c448f..22e0cad1b4b5a21e0edc9215781c1052a9c6ef84 100644 (file)
@@ -1192,23 +1192,16 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
        skb_pull(skb, maclen);
 
        if (port->tso && gso_size < datalen) {
+               if (skb_unclone(skb, GFP_ATOMIC))
+                       goto out_dropped;
+
                /* segment to TSO size */
                skb_shinfo(skb)->gso_size = datalen;
                skb_shinfo(skb)->gso_segs = gso_segs;
-
-               segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
-
-               /* restore gso_size & gso_segs */
-               skb_shinfo(skb)->gso_size = gso_size;
-               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen,
-                                                        gso_size);
-       } else
-               segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
-       if (IS_ERR(segs)) {
-               dev->stats.tx_dropped++;
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
        }
+       segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+       if (IS_ERR(segs))
+               goto out_dropped;
 
        skb_push(skb, maclen);
        skb_reset_mac_header(skb);
@@ -1246,6 +1239,10 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
        if (!(status & NETDEV_TX_MASK))
                dev_kfree_skb_any(skb);
        return status;
+out_dropped:
+       dev->stats.tx_dropped++;
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
 }
 
 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
index 2729f64b3e7e4b5a99887ec787b61d76fd1f54cd..924ea98bd5311b1c7197a9fe4e9b475b095069ce 100644 (file)
@@ -67,7 +67,7 @@ struct ipvl_dev {
        struct list_head        addrs;
        int                     ipv4cnt;
        int                     ipv6cnt;
-       struct ipvl_pcpu_stats  *pcpu_stats;
+       struct ipvl_pcpu_stats  __percpu *pcpu_stats;
        DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
        netdev_features_t       sfeatures;
        u32                     msg_enable;
index 3ad8ca76196d8254afd41359a437a7716defeecb..1190fd8f008862bc8f70f271575839d280a8f906 100644 (file)
@@ -32,6 +32,7 @@
 /* Operation Mode Strap Override */
 #define MII_KSZPHY_OMSO                                0x16
 #define KSZPHY_OMSO_B_CAST_OFF                 BIT(9)
+#define KSZPHY_OMSO_NAND_TREE_ON               BIT(5)
 #define KSZPHY_OMSO_RMII_OVERRIDE              BIT(1)
 #define KSZPHY_OMSO_MII_OVERRIDE               BIT(0)
 
@@ -76,6 +77,7 @@ struct kszphy_type {
        u32 led_mode_reg;
        u16 interrupt_level_mask;
        bool has_broadcast_disable;
+       bool has_nand_tree_disable;
        bool has_rmii_ref_clk_sel;
 };
 
@@ -89,6 +91,7 @@ struct kszphy_priv {
 static const struct kszphy_type ksz8021_type = {
        .led_mode_reg           = MII_KSZPHY_CTRL_2,
        .has_broadcast_disable  = true,
+       .has_nand_tree_disable  = true,
        .has_rmii_ref_clk_sel   = true,
 };
 
@@ -98,11 +101,13 @@ static const struct kszphy_type ksz8041_type = {
 
 static const struct kszphy_type ksz8051_type = {
        .led_mode_reg           = MII_KSZPHY_CTRL_2,
+       .has_nand_tree_disable  = true,
 };
 
 static const struct kszphy_type ksz8081_type = {
        .led_mode_reg           = MII_KSZPHY_CTRL_2,
        .has_broadcast_disable  = true,
+       .has_nand_tree_disable  = true,
        .has_rmii_ref_clk_sel   = true,
 };
 
@@ -231,6 +236,26 @@ out:
        return ret;
 }
 
+static int kszphy_nand_tree_disable(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = phy_read(phydev, MII_KSZPHY_OMSO);
+       if (ret < 0)
+               goto out;
+
+       if (!(ret & KSZPHY_OMSO_NAND_TREE_ON))
+               return 0;
+
+       ret = phy_write(phydev, MII_KSZPHY_OMSO,
+                       ret & ~KSZPHY_OMSO_NAND_TREE_ON);
+out:
+       if (ret)
+               dev_err(&phydev->dev, "failed to disable NAND tree mode\n");
+
+       return ret;
+}
+
 static int kszphy_config_init(struct phy_device *phydev)
 {
        struct kszphy_priv *priv = phydev->priv;
@@ -245,6 +270,9 @@ static int kszphy_config_init(struct phy_device *phydev)
        if (type->has_broadcast_disable)
                kszphy_broadcast_disable(phydev);
 
+       if (type->has_nand_tree_disable)
+               kszphy_nand_tree_disable(phydev);
+
        if (priv->rmii_ref_clk_sel) {
                ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
                if (ret) {
index 6b8efcabb816459336be87dda7f9a40536067c46..9cdfb3fe9c156ba775d41a9d6d343ddbb5fc9b60 100644 (file)
@@ -914,7 +914,7 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
                                        /* We got no receive buffer. */
                                        D1("could not allocate memory");
                                        odev->rx_parse_state = WAIT_SYNC;
-                                       return;
+                                       continue;
                                }
 
                                /* Copy what we got so far. make room for iphdr
index 5980ac6c48dd47bbf00e8c6367c8bf7d9f66aa8c..438fc6bcaef15538e9c151c46fce08dc67cec1a5 100644 (file)
@@ -40,6 +40,7 @@
 #define PLA_RXFIFO_CTRL0       0xc0a0
 #define PLA_RXFIFO_CTRL1       0xc0a4
 #define PLA_RXFIFO_CTRL2       0xc0a8
+#define PLA_DMY_REG0           0xc0b0
 #define PLA_FMC                        0xc0b4
 #define PLA_CFG_WOL            0xc0b6
 #define PLA_TEREDO_CFG         0xc0bc
 #define PLA_BP_7               0xfc36
 #define PLA_BP_EN              0xfc38
 
+#define USB_USB2PHY            0xb41e
+#define USB_SSPHYLINK2         0xb428
 #define USB_U2P3_CTRL          0xb460
+#define USB_CSR_DUMMY1         0xb464
+#define USB_CSR_DUMMY2         0xb466
 #define USB_DEV_STAT           0xb808
+#define USB_CONNECT_TIMER      0xcbf8
+#define USB_BURST_SIZE         0xcfc0
 #define USB_USB_CTRL           0xd406
 #define USB_PHY_CTRL           0xd408
 #define USB_TX_AGG             0xd40a
 #define TXFIFO_THR_NORMAL      0x00400008
 #define TXFIFO_THR_NORMAL2     0x01000008
 
+/* PLA_DMY_REG0 */
+#define ECM_ALDPS              0x0002
+
 /* PLA_FMC */
 #define FMC_FCR_MCU_EN         0x0001
 
 /* PLA_BOOT_CTRL */
 #define AUTOLOAD_DONE          0x0002
 
+/* USB_USB2PHY */
+#define USB2PHY_SUSPEND                0x0001
+#define USB2PHY_L1             0x0002
+
+/* USB_SSPHYLINK2 */
+#define pwd_dn_scale_mask      0x3ffe
+#define pwd_dn_scale(x)                ((x) << 1)
+
+/* USB_CSR_DUMMY1 */
+#define DYNAMIC_BURST          0x0001
+
+/* USB_CSR_DUMMY2 */
+#define EP4_FULL_FC            0x0001
+
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK                0x0006
 #define STAT_SPEED_HIGH                0x0000
 #define TIMER11_EN             0x0001
 
 /* USB_LPM_CTRL */
+/* bit 4 ~ 5: fifo empty boundary */
+#define FIFO_EMPTY_1FB         0x30    /* 0x1fb * 64 = 32448 bytes */
+/* bit 2 ~ 3: LMP timer */
 #define LPM_TIMER_MASK         0x0c
 #define LPM_TIMER_500MS                0x04    /* 500 ms */
 #define LPM_TIMER_500US                0x0c    /* 500 us */
+#define ROK_EXIT_LPM           0x02
 
 /* USB_AFE_CTRL2 */
 #define SEN_VAL_MASK           0xf800
@@ -3230,6 +3258,32 @@ static void r8153_init(struct r8152 *tp)
 
        r8153_u2p3en(tp, false);
 
+       if (tp->version == RTL_VER_04) {
+               ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2);
+               ocp_data &= ~pwd_dn_scale_mask;
+               ocp_data |= pwd_dn_scale(96);
+               ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2, ocp_data);
+
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY);
+               ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND;
+               ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data);
+       } else if (tp->version == RTL_VER_05) {
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0);
+               ocp_data &= ~ECM_ALDPS;
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data);
+
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+               if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+                       ocp_data &= ~DYNAMIC_BURST;
+               else
+                       ocp_data |= DYNAMIC_BURST;
+               ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+       }
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
+       ocp_data |= EP4_FULL_FC;
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2, ocp_data);
+
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
        ocp_data &= ~TIMER11_EN;
        ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
@@ -3238,8 +3292,7 @@ static void r8153_init(struct r8152 *tp)
        ocp_data &= ~LED_MODE_MASK;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
 
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL);
-       ocp_data &= ~LPM_TIMER_MASK;
+       ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM;
        if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
                ocp_data |= LPM_TIMER_500MS;
        else
@@ -3251,6 +3304,8 @@ static void r8153_init(struct r8152 *tp)
        ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
        ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
 
+       ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
+
        r8153_power_cut_en(tp, false);
        r8153_u1u2en(tp, true);
 
index 0e57e862c399d8c3c436aca06ab724068537a27d..1e0a775ea882995d88127e4d3c2a8f3f0afb8d60 100644 (file)
@@ -555,12 +555,13 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                                          unsigned int off,
                                          struct vxlanhdr *vh, size_t hdrlen,
-                                         u32 data)
+                                         u32 data, struct gro_remcsum *grc,
+                                         bool nopartial)
 {
        size_t start, offset, plen;
 
        if (skb->remcsum_offload)
-               return vh;
+               return NULL;
 
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
@@ -579,7 +580,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
                        return NULL;
        }
 
-       skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
+       skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
+                               start, offset, grc, nopartial);
 
        skb->remcsum_offload = 1;
 
@@ -597,6 +599,9 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
        struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
                                             udp_offloads);
        u32 flags;
+       struct gro_remcsum grc;
+
+       skb_gro_remcsum_init(&grc);
 
        off_vx = skb_gro_offset(skb);
        hlen = off_vx + sizeof(*vh);
@@ -614,7 +619,9 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
 
        if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
                vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
-                                      ntohl(vh->vx_vni));
+                                      ntohl(vh->vx_vni), &grc,
+                                      !!(vs->flags &
+                                         VXLAN_F_REMCSUM_NOPARTIAL));
 
                if (!vh)
                        goto out;
@@ -637,6 +644,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
        pp = eth_gro_receive(head, skb);
 
 out:
+       skb_gro_remcsum_cleanup(skb, &grc);
        NAPI_GRO_CB(skb)->flush |= flush;
 
        return pp;
@@ -1150,16 +1158,10 @@ static void vxlan_igmp_leave(struct work_struct *work)
 }
 
 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
-                                     size_t hdrlen, u32 data)
+                                     size_t hdrlen, u32 data, bool nopartial)
 {
        size_t start, offset, plen;
 
-       if (skb->remcsum_offload) {
-               /* Already processed in GRO path */
-               skb->remcsum_offload = 0;
-               return vh;
-       }
-
        start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
        offset = start + ((data & VXLAN_RCO_UDP) ?
                          offsetof(struct udphdr, check) :
@@ -1172,7 +1174,8 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
 
        vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
 
-       skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset);
+       skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
+                           nopartial);
 
        return vh;
 }
@@ -1209,7 +1212,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                goto drop;
 
        if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
-               vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni);
+               vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
+                                   !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
                if (!vxh)
                        goto drop;
 
@@ -2438,6 +2442,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
        [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
        [IFLA_VXLAN_GBP]        = { .type = NLA_FLAG, },
+       [IFLA_VXLAN_REMCSUM_NOPARTIAL]  = { .type = NLA_FLAG },
 };
 
 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -2761,6 +2766,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (data[IFLA_VXLAN_GBP])
                vxlan->flags |= VXLAN_F_GBP;
 
+       if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
+               vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+
        if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
                           vxlan->dst_port, vxlan->flags)) {
                pr_info("duplicate VNI %u\n", vni);
@@ -2910,6 +2918,10 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_flag(skb, IFLA_VXLAN_GBP))
                goto nla_put_failure;
 
+       if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
+           nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
+               goto nla_put_failure;
+
        return 0;
 
 nla_put_failure:
index 2c9088633ec6f0ca593405038131d8df7970e56c..ccbdb05b28cd7e2dc457afe9443e35ac2ca3fc21 100644 (file)
@@ -127,7 +127,7 @@ static const struct bcma_device_id b43_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1E, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x28, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x2A, BCMA_ANY_CLASS),
-       BCMA_CORETABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(bcma, b43_bcma_tbl);
 #endif
@@ -144,7 +144,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
-       SSB_DEVTABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl);
 #endif
index 1aec2146a2bfb5ecfe3fe6d8452236799058d9c8..4e58c0069830b698b0689203172a9a8f4d458e4b 100644 (file)
@@ -86,7 +86,7 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for the firmware files to load.");
 static const struct ssb_device_id b43legacy_ssb_tbl[] = {
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 2),
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 4),
-       SSB_DEVTABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl);
 
index f95b524422816431db9ef522dba3c815ab822273..48135063347e4388586085a15127429501ec467a 100644 (file)
@@ -99,7 +99,7 @@ static struct bcma_device_id brcms_coreid_table[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 17, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
-       BCMA_CORETABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
 
index ec456f0d972eb583f4f2bbfda84472680b2513f0..a62170ea04818e37790eeec99fc2047e255b030f 100644 (file)
@@ -822,11 +822,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 
                /* get a new skb - if fail, old one will be reused */
                new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
-               if (unlikely(!new_skb)) {
-                       pr_err("Allocation of new skb failed in %s\n",
-                              __func__);
+               if (unlikely(!new_skb))
                        goto no_new;
-               }
                if (rtlpriv->use_new_trx_flow) {
                        buffer_desc =
                          &rtlpci->rx_ring[rxring_idx].buffer_desc
index 17b34cbadc03fc2744caf6cb6cf55b22683e2aa0..3fb91c81015a39a670afc818d90a9f3c8ae54521 100644 (file)
@@ -216,7 +216,7 @@ static struct spi_board_info bcm53xx_info = {
 
 static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS),
-       BCMA_CORETABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl);
 
index 21f71a1581fa6cefcd3e055e4001aa259ee92729..e9734051e3c4adaedaa4131948355d5dd6fd1e00 100644 (file)
@@ -24,7 +24,7 @@ MODULE_LICENSE("GPL");
 
 static const struct ssb_device_id ssb_gige_tbl[] = {
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET_GBIT, SSB_ANY_REV),
-       SSB_DEVTABLE_END
+       {},
 };
 /* MODULE_DEVICE_TABLE(ssb, ssb_gige_tbl); */
 
index cd6d0afb6b8f7869d40520223093380159ba91ff..526cfab41d5f7022b4c62c3754a5eb69a30240f3 100644 (file)
@@ -306,7 +306,7 @@ static int bcma_hcd_resume(struct bcma_device *dev)
 
 static const struct bcma_device_id bcma_hcd_table[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
-       BCMA_CORETABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
 
index 0196f766df734f48352fd8c5217d08963aab444f..ffc32f4b1b1b869992cd11dafab9cad977b1dd18 100644 (file)
@@ -251,7 +251,7 @@ static const struct ssb_device_id ssb_hcd_table[] = {
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV),
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV),
        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV),
-       SSB_DEVTABLE_END
+       {},
 };
 MODULE_DEVICE_TABLE(ssb, ssb_hcd_table);
 
index 8dccca9013ed3fae55f1a716f85e1785f277a3f4..afa06d28725dad3960aed1df9dd4f9e9ddc53493 100644 (file)
@@ -528,9 +528,9 @@ static void handle_rx(struct vhost_net *net)
                .msg_controllen = 0,
                .msg_flags = MSG_DONTWAIT,
        };
-       struct virtio_net_hdr_mrg_rxbuf hdr = {
-               .hdr.flags = 0,
-               .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+       struct virtio_net_hdr hdr = {
+               .flags = 0,
+               .gso_type = VIRTIO_NET_HDR_GSO_NONE
        };
        size_t total_len = 0;
        int err, mergeable;
@@ -539,6 +539,7 @@ static void handle_rx(struct vhost_net *net)
        size_t vhost_len, sock_len;
        struct socket *sock;
        struct iov_iter fixup;
+       __virtio16 num_buffers;
 
        mutex_lock(&vq->mutex);
        sock = vq->private_data;
@@ -616,9 +617,9 @@ static void handle_rx(struct vhost_net *net)
                }
                /* TODO: Should check and handle checksum. */
 
-               hdr.num_buffers = cpu_to_vhost16(vq, headcount);
+               num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) {
+                   copy_to_iter(&num_buffers, 2, &fixup) != 2) {
                        vq_err(vq, "Failed num_buffers write");
                        vhost_discard_vq_desc(vq, headcount);
                        break;
index 2e75ab00dbf2b981f7b7dc22d19f926a8e8e6a9e..e530533b94be218fde581a3578f8541052d855ed 100644 (file)
@@ -364,8 +364,6 @@ struct ssb_device_id {
 } __attribute__((packed, aligned(2)));
 #define SSB_DEVICE(_vendor, _coreid, _revision)  \
        { .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
-#define SSB_DEVTABLE_END  \
-       { 0, },
 
 #define SSB_ANY_VENDOR         0xFFFF
 #define SSB_ANY_ID             0xFFFF
@@ -380,8 +378,6 @@ struct bcma_device_id {
 } __attribute__((packed,aligned(2)));
 #define BCMA_CORE(_manuf, _id, _rev, _class)  \
        { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
-#define BCMA_CORETABLE_END  \
-       { 0, },
 
 #define BCMA_ANY_MANUF         0xFFFF
 #define BCMA_ANY_ID            0xFFFF
index d115256ed5a209fe28ce971bdbde7ca421d048aa..5897b4ea5a3f9e0f07f511f57d8fa8bfe7019205 100644 (file)
@@ -1923,13 +1923,8 @@ struct napi_gro_cb {
        /* Number of segments aggregated. */
        u16     count;
 
-       /* This is non-zero if the packet may be of the same flow. */
-       u8      same_flow;
-
-       /* Free the skb? */
-       u8      free;
-#define NAPI_GRO_FREE            1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
+       /* Start offset for remote checksum offload */
+       u16     gro_remcsum_start;
 
        /* jiffies when first packet was created/queued */
        unsigned long age;
@@ -1937,6 +1932,9 @@ struct napi_gro_cb {
        /* Used in ipv6_gro_receive() and foo-over-udp */
        u16     proto;
 
+       /* This is non-zero if the packet may be of the same flow. */
+       u8      same_flow:1;
+
        /* Used in udp_gro_receive */
        u8      udp_mark:1;
 
@@ -1946,9 +1944,16 @@ struct napi_gro_cb {
        /* Number of checksums via CHECKSUM_UNNECESSARY */
        u8      csum_cnt:3;
 
+       /* Free the skb? */
+       u8      free:2;
+#define NAPI_GRO_FREE            1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+
        /* Used in foo-over-udp, set in udp[46]_gro_receive */
        u8      is_ipv6:1;
 
+       /* 7 bit hole */
+
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
 
@@ -2242,11 +2247,20 @@ static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
 
 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
 
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+       return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
+               skb_gro_offset(skb));
+}
+
 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
                                                      bool zero_okay,
                                                      __sum16 check)
 {
-       return (skb->ip_summed != CHECKSUM_PARTIAL &&
+       return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+               skb_checksum_start_offset(skb) <
+                skb_gro_offset(skb)) &&
+               !skb_at_gro_remcsum_start(skb) &&
                NAPI_GRO_CB(skb)->csum_cnt == 0 &&
                (!zero_okay || check));
 }
@@ -2321,20 +2335,48 @@ do {                                                                    \
                                           compute_pseudo(skb, proto)); \
 } while (0)
 
+struct gro_remcsum {
+       int offset;
+       __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+       grc->delta = 0;
+}
+
 static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
-                                          int start, int offset)
+                                          int start, int offset,
+                                          struct gro_remcsum *grc,
+                                          bool nopartial)
 {
        __wsum delta;
 
        BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 
+       if (!nopartial) {
+               NAPI_GRO_CB(skb)->gro_remcsum_start =
+                   ((unsigned char *)ptr + start) - skb->head;
+               return;
+       }
+
        delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
 
        /* Adjust skb->csum since we changed the packet */
-       skb->csum = csum_add(skb->csum, delta);
        NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+       grc->offset = (ptr + offset) - (void *)skb->head;
+       grc->delta = delta;
 }
 
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+                                          struct gro_remcsum *grc)
+{
+       if (!grc->delta)
+               return;
+
+       remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+}
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
index 1bb36edb66b96572bd361c392715ee2d166a2b6b..30007afe70b3541cdb4300919c2a5cfbbc0063f2 100644 (file)
  *
  * CHECKSUM_PARTIAL:
  *
- *   This is identical to the case for output below. This may occur on a packet
+ *   A checksum is set up to be offloaded to a device as described in the
+ *   output description for CHECKSUM_PARTIAL. This may occur on a packet
  *   received directly from another Linux OS, e.g., a virtualized Linux kernel
- *   on the same host. The packet can be treated in the same way as
- *   CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the
- *   checksum must be filled in by the OS or the hardware.
+ *   on the same host, or it may be set in the input path in GRO or remote
+ *   checksum offload. For the purposes of checksum verification, the checksum
+ *   referred to by skb->csum_start + skb->csum_offset and any preceding
+ *   checksums in the packet are considered verified. Any checksums in the
+ *   packet that are after the checksum being offloaded are not considered to
+ *   be verified.
  *
  * B. Checksumming on output.
  *
@@ -2915,7 +2919,10 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb);
 
 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
 {
-       return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid);
+       return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
+               skb->csum_valid ||
+               (skb->ip_summed == CHECKSUM_PARTIAL &&
+                skb_checksum_start_offset(skb) >= 0));
 }
 
 /**
@@ -3097,16 +3104,29 @@ do {                                                                    \
                                       compute_pseudo(skb, proto));     \
 } while (0)
 
+static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
+                                             u16 start, u16 offset)
+{
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
+       skb->csum_offset = offset - start;
+}
+
 /* Update skbuf and packet to reflect the remote checksum offload operation.
  * When called, ptr indicates the starting point for skb->csum when
  * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
  * here, skb_postpull_rcsum is done so skb->csum start is ptr.
  */
 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
-                                      int start, int offset)
+                                      int start, int offset, bool nopartial)
 {
        __wsum delta;
 
+       if (!nopartial) {
+               skb_remcsum_adjust_partial(skb, ptr, start, offset);
+               return;
+       }
+
         if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
                __skb_checksum_complete(skb);
                skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
index e339a9513e2963ea9f5bfbf77a2851836d36cddc..0a55ac715077d0aad820916f9daf979ae3dfd0fd 100644 (file)
@@ -167,4 +167,9 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
        return delta;
 }
 
+static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+{
+       *psum = csum_fold(csum_sub(delta, *psum));
+}
+
 #endif
index 2927d6244481ae24092ba829f857e57fffcaacfd..eabd3a038674dd00f69263b47828134dd3a24bf1 100644 (file)
@@ -128,13 +128,15 @@ struct vxlan_sock {
 #define VXLAN_F_REMCSUM_TX             0x200
 #define VXLAN_F_REMCSUM_RX             0x400
 #define VXLAN_F_GBP                    0x800
+#define VXLAN_F_REMCSUM_NOPARTIAL      0x1000
 
 /* Flags that are used in the receive patch. These flags must match in
  * order for a socket to be shareable
  */
 #define VXLAN_F_RCV_FLAGS              (VXLAN_F_GBP |                  \
                                         VXLAN_F_UDP_ZERO_CSUM6_RX |    \
-                                        VXLAN_F_REMCSUM_RX)
+                                        VXLAN_F_REMCSUM_RX |           \
+                                        VXLAN_F_REMCSUM_NOPARTIAL)
 
 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                                  vxlan_rcv_t *rcv, void *data,
index 8df06894da23dac0233bc8e7b0da28b693aba20f..c303588bb7670dcf3f90edf4fa0427f27ff1d1de 100644 (file)
@@ -14,6 +14,7 @@ enum {
        FOU_ATTR_AF,                            /* u8 */
        FOU_ATTR_IPPROTO,                       /* u8 */
        FOU_ATTR_TYPE,                          /* u8 */
+       FOU_ATTR_REMCSUM_NOPARTIAL,             /* flag */
 
        __FOU_ATTR_MAX,
 };
index 0deee3eeddbf0e4a2b7af099b61c670381970cab..dfd0bb22e554e7d7ac2cb45e2c4021b901346f38 100644 (file)
@@ -374,6 +374,7 @@ enum {
        IFLA_VXLAN_REMCSUM_TX,
        IFLA_VXLAN_REMCSUM_RX,
        IFLA_VXLAN_GBP,
+       IFLA_VXLAN_REMCSUM_NOPARTIAL,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
index 65728e0dc4ffd011fe87440510b76d34aa1b6ecb..0ee453fad3de652142ddce3b6af6f1c3bb52e95a 100644 (file)
@@ -987,15 +987,12 @@ static int __init br_netfilter_init(void)
        if (brnf_sysctl_header == NULL) {
                printk(KERN_WARNING
                       "br_netfilter: can't register to sysctl.\n");
-               ret = -ENOMEM;
-               goto err1;
+               nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
+               return -ENOMEM;
        }
 #endif
        printk(KERN_NOTICE "Bridge firewalling registered\n");
        return 0;
-err1:
-       nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
-       return ret;
 }
 
 static void __exit br_netfilter_fini(void)
index d030575532a22a778170bcdbff8dac8566cdf120..8f9710c62e20d58bcdcec3d184ca6344fbe5a57c 100644 (file)
@@ -4024,6 +4024,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->udp_mark = 0;
+               NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
                /* Setup for GRO checksum validation */
                switch (skb->ip_summed) {
@@ -5335,7 +5336,7 @@ EXPORT_SYMBOL(netdev_upper_dev_unlink);
 /**
  * netdev_bonding_info_change - Dispatch event about slave change
  * @dev: device
- * @netdev_bonding_info: info to dispatch
+ * @bonding_info: info to dispatch
  *
  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
  * The caller must hold the RTNL lock.
index ec9baea10c16c78200ec1809a5491f3aa01e83de..f6bdc2b1ba01295a53be71b4043437a82848d1c6 100644 (file)
@@ -531,7 +531,7 @@ do_pass:
                        *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
                        break;
 
-               /* Unkown instruction. */
+               /* Unknown instruction. */
                default:
                        goto err;
                }
index 9fa25b0ea1450f3e8a30c2afb6fe9fe623a7bacd..b4899f5b7388e8f0c825a433a1f633d6b087a0f9 100644 (file)
@@ -97,7 +97,7 @@
  * New xmit() return, do_div and misc clean up by Stephen Hemminger
  * <shemminger@osdl.org> 040923
  *
- * Randy Dunlap fixed u64 printk compiler waring
+ * Randy Dunlap fixed u64 printk compiler warning
  *
  * Remove FCS from BW calculation.  Lennert Buytenhek <buytenh@wantstofly.org>
  * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
index 5be499b6a2d22711716b6e095344e313ed60c8fd..ab293a3066b34bc4f6af71701f0c12b9ab6e5a34 100644 (file)
@@ -2162,7 +2162,14 @@ replay:
                }
                err = rtnl_configure_link(dev, ifm);
                if (err < 0) {
-                       unregister_netdevice(dev);
+                       if (ops->newlink) {
+                               LIST_HEAD(list_kill);
+
+                               ops->dellink(dev, &list_kill);
+                               unregister_netdevice_many(&list_kill);
+                       } else {
+                               unregister_netdevice(dev);
+                       }
                        goto out;
                }
 
index d104ae15836fe801beaeb10a9c9f031a3413e4ec..f23deadf42a070a251a7fc0c8f7b4d0a1e767dc5 100644 (file)
@@ -521,10 +521,13 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
        struct device_node *phy_dn, *port_dn;
        bool phy_is_fixed = false;
        u32 phy_flags = 0;
-       int ret;
+       int mode, ret;
 
        port_dn = cd->port_dn[p->port];
-       p->phy_interface = of_get_phy_mode(port_dn);
+       mode = of_get_phy_mode(port_dn);
+       if (mode < 0)
+               mode = PHY_INTERFACE_MODE_NA;
+       p->phy_interface = mode;
 
        phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
        if (of_phy_is_fixed_link(port_dn)) {
@@ -559,6 +562,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
                if (!p->phy)
                        return -ENODEV;
 
+               /* Use already configured phy mode */
+               p->phy_interface = p->phy->interface;
                phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
                                   p->phy_interface);
        } else {
index f0b4a31d7bd6d23b114600ad454fa79c25945fe7..3a8985c94581823b1dc8c81279cc1c1fe59d999b 100644 (file)
@@ -1186,7 +1186,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
 no_in_dev:
 
        /* Not loopback addresses on loopback should be preferred
-          in this case. It is importnat that lo is the first interface
+          in this case. It is important that lo is the first interface
           in dev_base list.
         */
        for_each_netdev_rcu(net, dev) {
index 92ddea1e645732118d982685c4bab5b2d6c03641..ff069f6597ace6302b46add285c787942645aac2 100644 (file)
@@ -22,14 +22,18 @@ static LIST_HEAD(fou_list);
 struct fou {
        struct socket *sock;
        u8 protocol;
+       u8 flags;
        u16 port;
        struct udp_offload udp_offloads;
        struct list_head list;
 };
 
+#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
+
 struct fou_cfg {
        u16 type;
        u8 protocol;
+       u8 flags;
        struct udp_port_cfg udp_config;
 };
 
@@ -64,24 +68,20 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
 }
 
 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
-                                 void *data, size_t hdrlen, u8 ipproto)
+                                 void *data, size_t hdrlen, u8 ipproto,
+                                 bool nopartial)
 {
        __be16 *pd = data;
        size_t start = ntohs(pd[0]);
        size_t offset = ntohs(pd[1]);
        size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
-       if (skb->remcsum_offload) {
-               /* Already processed in GRO path */
-               skb->remcsum_offload = 0;
-               return guehdr;
-       }
-
        if (!pskb_may_pull(skb, plen))
                return NULL;
        guehdr = (struct guehdr *)&udp_hdr(skb)[1];
 
-       skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
+       skb_remcsum_process(skb, (void *)guehdr + hdrlen,
+                           start, offset, nopartial);
 
        return guehdr;
 }
@@ -142,7 +142,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
 
                if (flags & GUE_PFLAG_REMCSUM) {
                        guehdr = gue_remcsum(skb, guehdr, data + doffset,
-                                            hdrlen, guehdr->proto_ctype);
+                                            hdrlen, guehdr->proto_ctype,
+                                            !!(fou->flags &
+                                               FOU_F_REMCSUM_NOPARTIAL));
                        if (!guehdr)
                                goto drop;
 
@@ -214,7 +216,8 @@ out_unlock:
 
 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                                      struct guehdr *guehdr, void *data,
-                                     size_t hdrlen, u8 ipproto)
+                                     size_t hdrlen, u8 ipproto,
+                                     struct gro_remcsum *grc, bool nopartial)
 {
        __be16 *pd = data;
        size_t start = ntohs(pd[0]);
@@ -222,7 +225,7 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
        size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
        if (skb->remcsum_offload)
-               return guehdr;
+               return NULL;
 
        if (!NAPI_GRO_CB(skb)->csum_valid)
                return NULL;
@@ -234,7 +237,8 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
                        return NULL;
        }
 
-       skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset);
+       skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
+                               start, offset, grc, nopartial);
 
        skb->remcsum_offload = 1;
 
@@ -254,6 +258,10 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
        void *data;
        u16 doffset = 0;
        int flush = 1;
+       struct fou *fou = container_of(uoff, struct fou, udp_offloads);
+       struct gro_remcsum grc;
+
+       skb_gro_remcsum_init(&grc);
 
        off = skb_gro_offset(skb);
        len = off + sizeof(*guehdr);
@@ -295,7 +303,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
                if (flags & GUE_PFLAG_REMCSUM) {
                        guehdr = gue_gro_remcsum(skb, off, guehdr,
                                                 data + doffset, hdrlen,
-                                                guehdr->proto_ctype);
+                                                guehdr->proto_ctype, &grc,
+                                                !!(fou->flags &
+                                                   FOU_F_REMCSUM_NOPARTIAL));
                        if (!guehdr)
                                goto out;
 
@@ -345,6 +355,7 @@ out_unlock:
        rcu_read_unlock();
 out:
        NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, &grc);
 
        return pp;
 }
@@ -455,6 +466,7 @@ static int fou_create(struct net *net, struct fou_cfg *cfg,
 
        sk = sock->sk;
 
+       fou->flags = cfg->flags;
        fou->port = cfg->udp_config.local_udp_port;
 
        /* Initial for fou type */
@@ -541,6 +553,7 @@ static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
        [FOU_ATTR_AF] = { .type = NLA_U8, },
        [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
        [FOU_ATTR_TYPE] = { .type = NLA_U8, },
+       [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
 };
 
 static int parse_nl_config(struct genl_info *info,
@@ -571,6 +584,9 @@ static int parse_nl_config(struct genl_info *info,
        if (info->attrs[FOU_ATTR_TYPE])
                cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
 
+       if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
+               cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
+
        return 0;
 }
 
index 53db2c30957263fbfe6224e6934a4f49aaa1d3f5..ea82fd492c1bf4788ffb5dce8275f653d066b2e4 100644 (file)
@@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
        struct tcp_sock *tp;
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
+       u32 end_seq;
 
        req->num_retrans = 0;
        req->num_timeout = 0;
@@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(struct sock *sk,
 
        /* Queue the data carried in the SYN packet. We need to first
         * bump skb's refcnt because the caller will attempt to free it.
+        * Note that IPv6 might also have used skb_get() trick
+        * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
+        * So we need to eventually get a clone of the packet,
+        * before inserting it in sk_receive_queue.
         *
         * XXX (TFO) - we honor a zero-payload TFO request for now,
         * (any reason not to?) but no need to queue the skb since
         * there is no data. How about SYN+FIN?
         */
-       if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
-               skb = skb_get(skb);
-               skb_dst_drop(skb);
-               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
-               skb_set_owner_r(skb, child);
-               __skb_queue_tail(&child->sk_receive_queue, skb);
-               tp->syn_data_acked = 1;
+       end_seq = TCP_SKB_CB(skb)->end_seq;
+       if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
+               struct sk_buff *skb2;
+
+               if (unlikely(skb_shared(skb)))
+                       skb2 = skb_clone(skb, GFP_ATOMIC);
+               else
+                       skb2 = skb_get(skb);
+
+               if (likely(skb2)) {
+                       skb_dst_drop(skb2);
+                       __skb_pull(skb2, tcp_hdrlen(skb));
+                       skb_set_owner_r(skb2, child);
+                       __skb_queue_tail(&child->sk_receive_queue, skb2);
+                       tp->syn_data_acked = 1;
+               } else {
+                       end_seq = TCP_SKB_CB(skb)->seq + 1;
+               }
        }
-       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
        sk->sk_data_ready(sk);
        bh_unlock_sock(child);
        sock_put(child);
index d10f6f4ead27918c3e7c31b754ec977518791d70..4915d8284a86f9ec6e5536804b24398d205e2a2b 100644 (file)
@@ -402,6 +402,13 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
        }
 
        rcu_read_unlock();
+
+       if (skb->remcsum_offload)
+               skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
+
+       skb->encapsulation = 1;
+       skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
+
        return err;
 }
 
@@ -410,9 +417,13 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
        const struct iphdr *iph = ip_hdr(skb);
        struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
-       if (uh->check)
+       if (uh->check) {
+               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
                uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
                                          iph->daddr, 0);
+       } else {
+               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+       }
 
        return udp_gro_complete(skb, nhoff);
 }
index 2f780cba6e1240b06e4fc64eb20c949ab3646558..f45d6db50a454727367d2fc2450fd6f0b1dfb923 100644 (file)
@@ -172,7 +172,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
 {
        int i;
 
-       spin_lock(&ip6_fl_lock);
+       spin_lock_bh(&ip6_fl_lock);
        for (i = 0; i <= FL_HASH_MASK; i++) {
                struct ip6_flowlabel *fl;
                struct ip6_flowlabel __rcu **flp;
@@ -190,7 +190,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
                        flp = &fl->next;
                }
        }
-       spin_unlock(&ip6_fl_lock);
+       spin_unlock_bh(&ip6_fl_lock);
 }
 
 static struct ip6_flowlabel *fl_intern(struct net *net,
index d33df4cbd8720fa6a86a065b8e439b0362ea4356..7deebf102cbafc276f45e4eaffdd8efdb658d842 100644 (file)
@@ -1273,7 +1273,7 @@ emsgsize:
        /* If this is the first and only packet and device
         * supports checksum offloading, let's use it.
         */
-       if (!skb &&
+       if (!skb && sk->sk_protocol == IPPROTO_UDP &&
            length + fragheaderlen < mtu &&
            rt->dst.dev->features & NETIF_F_V6_CSUM &&
            !exthdrlen)
index 98565ce0ebcdff78da7c0f0c11737bbb8c2dee59..4688bd4d7f59587eaf12e91a33bdc81379fd32ea 100644 (file)
@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
        u32 *p = NULL;
 
        if (!(rt->dst.flags & DST_HOST))
-               return NULL;
+               return dst_cow_metrics_generic(dst, old);
 
        peer = rt6_get_peer_create(rt);
        if (peer) {
index a56276996b72b3f5d43121d6bf93422a58eefe62..ab889bb16b3cb077d26ddd2837b8b2eaaf1de666 100644 (file)
@@ -161,9 +161,13 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
        const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
-       if (uh->check)
+       if (uh->check) {
+               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
                uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
                                          &ipv6h->daddr, 0);
+       } else {
+               skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+       }
 
        return udp_gro_complete(skb, nhoff);
 }
index 265e190f22187d83de1a9ed07913ef153cf1f03f..c598f74063a19ebd51ea786530c0669d6f92b8c3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_bridge/ebtables.h>
 #include <net/netfilter/nf_tables.h>
 
 static int nft_compat_chain_validate_dependency(const char *tablename,
@@ -40,6 +41,7 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
 union nft_entry {
        struct ipt_entry e4;
        struct ip6t_entry e6;
+       struct ebt_entry ebt;
 };
 
 static inline void
@@ -50,9 +52,9 @@ nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
        par->hotdrop    = false;
 }
 
-static void nft_target_eval(const struct nft_expr *expr,
-                           struct nft_data data[NFT_REG_MAX + 1],
-                           const struct nft_pktinfo *pkt)
+static void nft_target_eval_xt(const struct nft_expr *expr,
+                              struct nft_data data[NFT_REG_MAX + 1],
+                              const struct nft_pktinfo *pkt)
 {
        void *info = nft_expr_priv(expr);
        struct xt_target *target = expr->ops->data;
@@ -66,7 +68,7 @@ static void nft_target_eval(const struct nft_expr *expr,
        if (pkt->xt.hotdrop)
                ret = NF_DROP;
 
-       switch(ret) {
+       switch (ret) {
        case XT_CONTINUE:
                data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
                break;
@@ -74,7 +76,41 @@ static void nft_target_eval(const struct nft_expr *expr,
                data[NFT_REG_VERDICT].verdict = ret;
                break;
        }
-       return;
+}
+
+static void nft_target_eval_bridge(const struct nft_expr *expr,
+                                  struct nft_data data[NFT_REG_MAX + 1],
+                                  const struct nft_pktinfo *pkt)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_target *target = expr->ops->data;
+       struct sk_buff *skb = pkt->skb;
+       int ret;
+
+       nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+
+       ret = target->target(skb, &pkt->xt);
+
+       if (pkt->xt.hotdrop)
+               ret = NF_DROP;
+
+       switch (ret) {
+       case EBT_ACCEPT:
+               data[NFT_REG_VERDICT].verdict = NF_ACCEPT;
+               break;
+       case EBT_DROP:
+               data[NFT_REG_VERDICT].verdict = NF_DROP;
+               break;
+       case EBT_CONTINUE:
+               data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+               break;
+       case EBT_RETURN:
+               data[NFT_REG_VERDICT].verdict = NFT_RETURN;
+               break;
+       default:
+               data[NFT_REG_VERDICT].verdict = ret;
+               break;
+       }
 }
 
 static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
@@ -100,6 +136,10 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                entry->e6.ipv6.proto = proto;
                entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
                break;
+       case NFPROTO_BRIDGE:
+               entry->ebt.ethproto = proto;
+               entry->ebt.invflags = inv ? EBT_IPROTO : 0;
+               break;
        }
        par->entryinfo  = entry;
        par->target     = target;
@@ -307,6 +347,10 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                entry->e6.ipv6.proto = proto;
                entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
                break;
+       case NFPROTO_BRIDGE:
+               entry->ebt.ethproto = proto;
+               entry->ebt.invflags = inv ? EBT_IPROTO : 0;
+               break;
        }
        par->entryinfo  = entry;
        par->match      = match;
@@ -490,6 +534,9 @@ nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
        case AF_INET6:
                fmt = "ip6t_%s";
                break;
+       case NFPROTO_BRIDGE:
+               fmt = "ebt_%s";
+               break;
        default:
                pr_err("nft_compat: unsupported protocol %d\n",
                        nfmsg->nfgen_family);
@@ -663,13 +710,17 @@ nft_target_select_ops(const struct nft_ctx *ctx,
 
        nft_target->ops.type = &nft_target_type;
        nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
-       nft_target->ops.eval = nft_target_eval;
        nft_target->ops.init = nft_target_init;
        nft_target->ops.destroy = nft_target_destroy;
        nft_target->ops.dump = nft_target_dump;
        nft_target->ops.validate = nft_target_validate;
        nft_target->ops.data = target;
 
+       if (family == NFPROTO_BRIDGE)
+               nft_target->ops.eval = nft_target_eval_bridge;
+       else
+               nft_target->ops.eval = nft_target_eval_xt;
+
        list_add(&nft_target->head, &nft_target_list);
 
        return &nft_target->ops;
index 6404a726d17b78fc6db6f411216195e68db63950..9615b8b9fb37dcf769207537f0545dd2a08c62d6 100644 (file)
@@ -39,6 +39,7 @@ static void nft_lookup_eval(const struct nft_expr *expr,
 
 static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
        [NFTA_LOOKUP_SET]       = { .type = NLA_STRING },
+       [NFTA_LOOKUP_SET_ID]    = { .type = NLA_U32 },
        [NFTA_LOOKUP_SREG]      = { .type = NLA_U32 },
        [NFTA_LOOKUP_DREG]      = { .type = NLA_U32 },
 };
index e2c348b8bacafac9c646f4861b55b76d4e1a51e9..50ec42f170a06713e086d66abb2ea0d1aa998d74 100644 (file)
@@ -717,6 +717,8 @@ int ovs_flow_key_extract_userspace(const struct nlattr *attr,
 {
        int err;
 
+       memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
+
        /* Extract metadata from netlink attributes. */
        err = ovs_nla_get_flow_metadata(attr, key, log);
        if (err)
index 993281e6278dc829cfbcb43bca054bf1c8adb8ec..216f20b90aa596b49592beee89a996cbe868d8ba 100644 (file)
@@ -1516,7 +1516,7 @@ int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
 /* Called with ovs_mutex or RCU read lock. */
 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
 {
-       return ovs_nla_put_key(&flow->mask->key, &flow->key,
+       return ovs_nla_put_key(&flow->key, &flow->key,
                                OVS_FLOW_ATTR_KEY, false, skb);
 }
 
@@ -1746,7 +1746,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
        struct sw_flow_key key;
        struct ovs_tunnel_info *tun_info;
        struct nlattr *a;
-       int err, start, opts_type;
+       int err = 0, start, opts_type;
 
        ovs_match_init(&match, &key, NULL);
        opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
index e5b65acd650b54622b19aa9b879de6b2425a4a66..e6144b8246fd27fe49bffd228b44a44c3e7cbd81 100644 (file)
@@ -221,7 +221,21 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
        list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
                if (!test_and_set_bit(0, &conn->c_map_queued)) {
                        rds_stats_inc(s_cong_update_queued);
-                       rds_send_xmit(conn);
+                       /* We cannot inline the call to rds_send_xmit() here
+                        * for two reasons (both pertaining to a TCP transport):
+                        * 1. When we get here from the receive path, we
+                        *    are already holding the sock_lock (held by
+                        *    tcp_v4_rcv()). So inlining calls to
+                        *    tcp_setsockopt and/or tcp_sendmsg will deadlock
+                        *    when it tries to get the sock_lock())
+                        * 2. Interrupts are masked so that we can mark the
+                        *    the port congested from both send and recv paths.
+                        *    (See comment around declaration of rdc_cong_lock).
+                        *    An attempt to get the sock_lock() here will
+                        *    therefore trigger warnings.
+                        * Defer the xmit to rds_send_worker() instead.
+                        */
+                       queue_delayed_work(rds_wq, &conn->c_send_w, 0);
                }
        }