]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Wed, 15 Mar 2017 18:59:10 +0000 (11:59 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 15 Mar 2017 18:59:10 +0000 (11:59 -0700)
Conflicts:
drivers/net/ethernet/broadcom/genet/bcmgenet.c
net/core/sock.c

Conflicts were overlapping changes in bcmgenet and the
lockdep handling of sockets.

Signed-off-by: David S. Miller <davem@davemloft.net>
26 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/octeon_droq.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/tun.c
drivers/net/vxlan.c
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
include/net/sock.h
net/core/sock.c
net/decnet/af_decnet.c
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c
net/mpls/af_mpls.c
net/rds/ib_cm.c
net/sctp/socket.c

diff --combined MAINTAINERS
index a375d855f539bb10d3136c6f3c84694bff949e57,c776906f67a9f6795a0aef555a7fe348d07c506f..cefda30ed704f0a78c1f965de25bcdbdcd57ffc8
@@@ -902,12 -902,6 +902,12 @@@ F:       drivers/net/phy/mdio-xgene.
  F:    Documentation/devicetree/bindings/net/apm-xgene-enet.txt
  F:    Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
  
 +APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
 +M:    Iyappan Subramanian <isubramanian@apm.com>
 +M:    Keyur Chudgar <kchudgar@apm.com>
 +S:    Supported
 +F:    drivers/net/ethernet/apm/xgene-v2/
 +
  APPLIED MICRO (APM) X-GENE SOC PMU
  M:    Tai Nguyen <ttnguyen@apm.com>
  S:    Supported
@@@ -8313,7 -8307,6 +8313,6 @@@ M:      Richard Leitner <richard.leitner@ski
  L:    linux-usb@vger.kernel.org
  S:    Maintained
  F:    drivers/usb/misc/usb251xb.c
- F:    include/linux/platform_data/usb251xb.h
  F:    Documentation/devicetree/bindings/usb/usb251xb.txt
  
  MICROSOFT SURFACE PRO 3 BUTTON DRIVER
@@@ -11068,12 -11061,6 +11067,12 @@@ F: include/linux/dma/dw.
  F:    include/linux/platform_data/dma-dw.h
  F:    drivers/dma/dw/
  
 +SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
 +M:    Jie Deng <jiedeng@synopsys.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ethernet/synopsys/
 +
  SYNOPSYS DESIGNWARE I2C DRIVER
  M:    Jarkko Nikula <jarkko.nikula@linux.intel.com>
  R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
index d848ac58189c31b3a83ff53ec9bde86352985410,69015fa50f2096c77999539179446e5ab38fa759..44f9c0a1f85d8b56fc955f785310230d2e869dac
@@@ -450,6 -450,22 +450,22 @@@ static inline void bcmgenet_rdma_ring_w
                        genet_dma_ring_regs[r]);
  }
  
+ static int bcmgenet_begin(struct net_device *dev)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       /* Turn on the clock */
+       return clk_prepare_enable(priv->clk);
+ }
+ static void bcmgenet_complete(struct net_device *dev)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       /* Turn off the clock */
+       clk_disable_unprepare(priv->clk);
+ }
  static int bcmgenet_get_link_ksettings(struct net_device *dev,
                                       struct ethtool_link_ksettings *cmd)
  {
@@@ -605,7 -621,7 +621,7 @@@ static int bcmgenet_set_coalesce(struc
  
        /* GENET TDMA hardware does not support a configurable timeout, but will
         * always generate an interrupt either after MBDONE packets have been
 -       * transmitted, or when the ring is emtpy.
 +       * transmitted, or when the ring is empty.
         */
        if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
            ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@@ -778,8 -794,9 +794,9 @@@ static const struct bcmgenet_stats bcmg
        STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
        /* Misc UniMAC counters */
        STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
-                       UMAC_RBUF_OVFL_CNT),
-       STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+                       UMAC_RBUF_OVFL_CNT_V1),
+       STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+                       UMAC_RBUF_ERR_CNT_V1),
        STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
        STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
        STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@@ -821,6 -838,45 +838,45 @@@ static void bcmgenet_get_strings(struc
        }
  }
  
+ static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+ {
+       u16 new_offset;
+       u32 val;
+       switch (offset) {
+       case UMAC_RBUF_OVFL_CNT_V1:
+               if (GENET_IS_V2(priv))
+                       new_offset = RBUF_OVFL_CNT_V2;
+               else
+                       new_offset = RBUF_OVFL_CNT_V3PLUS;
+               val = bcmgenet_rbuf_readl(priv, new_offset);
+               /* clear if overflowed */
+               if (val == ~0)
+                       bcmgenet_rbuf_writel(priv, 0, new_offset);
+               break;
+       case UMAC_RBUF_ERR_CNT_V1:
+               if (GENET_IS_V2(priv))
+                       new_offset = RBUF_ERR_CNT_V2;
+               else
+                       new_offset = RBUF_ERR_CNT_V3PLUS;
+               val = bcmgenet_rbuf_readl(priv, new_offset);
+               /* clear if overflowed */
+               if (val == ~0)
+                       bcmgenet_rbuf_writel(priv, 0, new_offset);
+               break;
+       default:
+               val = bcmgenet_umac_readl(priv, offset);
+               /* clear if overflowed */
+               if (val == ~0)
+                       bcmgenet_umac_writel(priv, 0, offset);
+               break;
+       }
+       return val;
+ }
  static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
  {
        int i, j = 0;
                case BCMGENET_STAT_NETDEV:
                case BCMGENET_STAT_SOFT:
                        continue;
-               case BCMGENET_STAT_MIB_RX:
-               case BCMGENET_STAT_MIB_TX:
                case BCMGENET_STAT_RUNT:
-                       if (s->type != BCMGENET_STAT_MIB_RX)
-                               offset = BCMGENET_STAT_OFFSET;
+                       offset += BCMGENET_STAT_OFFSET;
+                       /* fall through */
+               case BCMGENET_STAT_MIB_TX:
+                       offset += BCMGENET_STAT_OFFSET;
+                       /* fall through */
+               case BCMGENET_STAT_MIB_RX:
                        val = bcmgenet_umac_readl(priv,
                                                  UMAC_MIB_START + j + offset);
+                       offset = 0;     /* Reset Offset */
                        break;
                case BCMGENET_STAT_MISC:
-                       val = bcmgenet_umac_readl(priv, s->reg_offset);
-                       /* clear if overflowed */
-                       if (val == ~0)
-                               bcmgenet_umac_writel(priv, 0, s->reg_offset);
+                       if (GENET_IS_V1(priv)) {
+                               val = bcmgenet_umac_readl(priv, s->reg_offset);
+                               /* clear if overflowed */
+                               if (val == ~0)
+                                       bcmgenet_umac_writel(priv, 0,
+                                                            s->reg_offset);
+                       } else {
+                               val = bcmgenet_update_stat_misc(priv,
+                                                               s->reg_offset);
+                       }
                        break;
                }
  
@@@ -973,6 -1038,8 +1038,8 @@@ static int bcmgenet_set_eee(struct net_
  
  /* standard ethtool support functions. */
  static const struct ethtool_ops bcmgenet_ethtool_ops = {
+       .begin                  = bcmgenet_begin,
+       .complete               = bcmgenet_complete,
        .get_strings            = bcmgenet_get_strings,
        .get_sset_count         = bcmgenet_get_sset_count,
        .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
@@@ -1011,17 -1078,8 +1078,17 @@@ static int bcmgenet_power_down(struct b
                /* Power down LED */
                if (priv->hw_params->flags & GENET_HAS_EXT) {
                        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 -                      reg |= (EXT_PWR_DOWN_PHY |
 -                              EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
 +                      if (GENET_IS_V5(priv))
 +                              reg |= EXT_PWR_DOWN_PHY_EN |
 +                                     EXT_PWR_DOWN_PHY_RD |
 +                                     EXT_PWR_DOWN_PHY_SD |
 +                                     EXT_PWR_DOWN_PHY_RX |
 +                                     EXT_PWR_DOWN_PHY_TX |
 +                                     EXT_IDDQ_GLBL_PWR;
 +                      else
 +                              reg |= EXT_PWR_DOWN_PHY;
 +
 +                      reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
                        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
  
                        bcmgenet_phy_power_set(priv->dev, false);
@@@ -1046,34 -1104,12 +1113,34 @@@ static void bcmgenet_power_up(struct bc
  
        switch (mode) {
        case GENET_POWER_PASSIVE:
 -              reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
 -                              EXT_PWR_DOWN_BIAS);
 -              /* fallthrough */
 +              reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
 +              if (GENET_IS_V5(priv)) {
 +                      reg &= ~(EXT_PWR_DOWN_PHY_EN |
 +                               EXT_PWR_DOWN_PHY_RD |
 +                               EXT_PWR_DOWN_PHY_SD |
 +                               EXT_PWR_DOWN_PHY_RX |
 +                               EXT_PWR_DOWN_PHY_TX |
 +                               EXT_IDDQ_GLBL_PWR);
 +                      reg |=   EXT_PHY_RESET;
 +                      bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
 +                      mdelay(1);
 +
 +                      reg &=  ~EXT_PHY_RESET;
 +              } else {
 +                      reg &= ~EXT_PWR_DOWN_PHY;
 +                      reg |= EXT_PWR_DN_EN_LD;
 +              }
 +              bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
 +              bcmgenet_phy_power_set(priv->dev, true);
 +              bcmgenet_mii_reset(priv->dev);
 +              break;
 +
        case GENET_POWER_CABLE_SENSE:
                /* enable APD */
 -              reg |= EXT_PWR_DN_EN_LD;
 +              if (!GENET_IS_V5(priv)) {
 +                      reg |= EXT_PWR_DN_EN_LD;
 +                      bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
 +              }
                break;
        case GENET_POWER_WOL_MAGIC:
                bcmgenet_wol_power_up_cfg(priv, mode);
        default:
                break;
        }
 -
 -      bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
 -      if (mode == GENET_POWER_PASSIVE) {
 -              bcmgenet_phy_power_set(priv->dev, true);
 -              bcmgenet_mii_reset(priv->dev);
 -      }
  }
  
  /* ioctl handle special commands that are not present in ethtool. */
  static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  {
        struct bcmgenet_priv *priv = netdev_priv(dev);
 -      int val = 0;
  
        if (!netif_running(dev))
                return -EINVAL;
  
 -      switch (cmd) {
 -      case SIOCGMIIPHY:
 -      case SIOCGMIIREG:
 -      case SIOCSMIIREG:
 -              if (!priv->phydev)
 -                      val = -ENODEV;
 -              else
 -                      val = phy_mii_ioctl(priv->phydev, rq, cmd);
 -              break;
 -
 -      default:
 -              val = -EINVAL;
 -              break;
 -      }
 +      if (!priv->phydev)
 +              return -ENODEV;
  
 -      return val;
 +      return phy_mii_ioctl(priv->phydev, rq, cmd);
  }
  
  static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@@ -1179,25 -1234,20 +1246,24 @@@ static unsigned int __bcmgenet_tx_recla
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct device *kdev = &priv->pdev->dev;
        struct enet_cb *tx_cb_ptr;
-       struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
        unsigned int bytes_compl = 0;
        unsigned int c_index;
        unsigned int txbds_ready;
        unsigned int txbds_processed = 0;
  
 -      /* Compute how many buffers are transmitted since last xmit call */
 -      c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
 -      c_index &= DMA_C_INDEX_MASK;
 -
 -      if (likely(c_index >= ring->c_index))
 -              txbds_ready = c_index - ring->c_index;
 +      /* Clear status before servicing to reduce spurious interrupts */
 +      if (ring->index == DESC_INDEX)
 +              bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
 +                                       INTRL2_CPU_CLEAR);
        else
 -              txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
 +              bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
 +                                       INTRL2_CPU_CLEAR);
 +
 +      /* Compute how many buffers are transmitted since last xmit call */
 +      c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
 +              & DMA_C_INDEX_MASK;
 +      txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
  
        netif_dbg(priv, tx_done, dev,
                  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
        }
  
        ring->free_bds += txbds_processed;
 -      ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
 +      ring->c_index = c_index;
  
        dev->stats.tx_packets += pkts_compl;
        dev->stats.tx_bytes += bytes_compl;
  
-       txq = netdev_get_tx_queue(dev, ring->queue);
-       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
-       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
-               if (netif_tx_queue_stopped(txq))
-                       netif_tx_wake_queue(txq);
-       }
+       netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+                                 pkts_compl, bytes_compl);
  
 -      return pkts_compl;
 +      return txbds_processed;
  }
  
  static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@@ -1264,8 -1309,16 +1325,16 @@@ static int bcmgenet_tx_poll(struct napi
        struct bcmgenet_tx_ring *ring =
                container_of(napi, struct bcmgenet_tx_ring, napi);
        unsigned int work_done = 0;
+       struct netdev_queue *txq;
+       unsigned long flags;
  
-       work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+       spin_lock_irqsave(&ring->lock, flags);
+       work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
+       if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+               txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+               netif_tx_wake_queue(txq);
+       }
+       spin_unlock_irqrestore(&ring->lock, flags);
  
        if (work_done == 0) {
                napi_complete(napi);
@@@ -1604,21 -1657,10 +1673,21 @@@ static unsigned int bcmgenet_desc_rx(st
        unsigned long dma_flag;
        int len;
        unsigned int rxpktprocessed = 0, rxpkttoprocess;
 -      unsigned int p_index;
 +      unsigned int p_index, mask;
        unsigned int discards;
        unsigned int chksum_ok = 0;
  
 +      /* Clear status before servicing to reduce spurious interrupts */
 +      if (ring->index == DESC_INDEX) {
 +              bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
 +                                       INTRL2_CPU_CLEAR);
 +      } else {
 +              mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
 +              bcmgenet_intrl2_1_writel(priv,
 +                                       mask,
 +                                       INTRL2_CPU_CLEAR);
 +      }
 +
        p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
  
        discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
        }
  
        p_index &= DMA_P_INDEX_MASK;
 -
 -      if (likely(p_index >= ring->c_index))
 -              rxpkttoprocess = p_index - ring->c_index;
 -      else
 -              rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
 -                               p_index;
 +      rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
  
        netif_dbg(priv, rx_status, dev,
                  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@@ -1865,8 -1912,10 +1934,8 @@@ static void bcmgenet_intr_disable(struc
        /* Mask all interrupts.*/
        bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
        bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
 -      bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
        bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
        bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
 -      bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  }
  
  static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@@ -1893,6 -1942,8 +1962,6 @@@ static int init_umac(struct bcmgenet_pr
        int ret;
        u32 reg;
        u32 int0_enable = 0;
 -      u32 int1_enable = 0;
 -      int i;
  
        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
  
  
        bcmgenet_intr_disable(priv);
  
 -      /* Enable Rx default queue 16 interrupts */
 -      int0_enable |= UMAC_IRQ_RXDMA_DONE;
 -
 -      /* Enable Tx default queue 16 interrupts */
 -      int0_enable |= UMAC_IRQ_TXDMA_DONE;
 -
        /* Configure backpressure vectors for MoCA */
        if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                reg = bcmgenet_bp_mc_get(priv);
        if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
                int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
  
 -      /* Enable Rx priority queue interrupts */
 -      for (i = 0; i < priv->hw_params->rx_queues; ++i)
 -              int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
 -
 -      /* Enable Tx priority queue interrupts */
 -      for (i = 0; i < priv->hw_params->tx_queues; ++i)
 -              int1_enable |= (1 << i);
 -
        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
 -      bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
  
 -      /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
  
        return 0;
@@@ -2069,33 -2136,22 +2138,33 @@@ static void bcmgenet_init_tx_napi(struc
  static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
  {
        unsigned int i;
 +      u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
 +      u32 int1_enable = 0;
        struct bcmgenet_tx_ring *ring;
  
        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
                ring = &priv->tx_rings[i];
                napi_enable(&ring->napi);
 +              int1_enable |= (1 << i);
        }
  
        ring = &priv->tx_rings[DESC_INDEX];
        napi_enable(&ring->napi);
 +
 +      bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
 +      bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
  }
  
  static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
  {
        unsigned int i;
 +      u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
 +      u32 int1_disable = 0xffff;
        struct bcmgenet_tx_ring *ring;
  
 +      bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
 +      bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
 +
        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
                ring = &priv->tx_rings[i];
                napi_disable(&ring->napi);
@@@ -2208,33 -2264,22 +2277,33 @@@ static void bcmgenet_init_rx_napi(struc
  static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
  {
        unsigned int i;
 +      u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
 +      u32 int1_enable = 0;
        struct bcmgenet_rx_ring *ring;
  
        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
                ring = &priv->rx_rings[i];
                napi_enable(&ring->napi);
 +              int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
        }
  
        ring = &priv->rx_rings[DESC_INDEX];
        napi_enable(&ring->napi);
 +
 +      bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
 +      bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
  }
  
  static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
  {
        unsigned int i;
 +      u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
 +      u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
        struct bcmgenet_rx_ring *ring;
  
 +      bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
 +      bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
 +
        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
                ring = &priv->rx_rings[i];
                napi_disable(&ring->napi);
@@@ -2481,17 -2526,28 +2550,28 @@@ static int bcmgenet_init_dma(struct bcm
  /* Interrupt bottom half */
  static void bcmgenet_irq_task(struct work_struct *work)
  {
+       unsigned long flags;
+       unsigned int status;
        struct bcmgenet_priv *priv = container_of(
                        work, struct bcmgenet_priv, bcmgenet_irq_work);
  
        netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
  
+       spin_lock_irqsave(&priv->lock, flags);
+       status = priv->irq0_stat;
+       priv->irq0_stat = 0;
+       spin_unlock_irqrestore(&priv->lock, flags);
+       if (status & UMAC_IRQ_MPD_R) {
+               netif_dbg(priv, wol, priv->dev,
+                         "magic packet detected, waking up\n");
+               bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+       }
        /* Link UP/DOWN event */
-       if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+       if (status & UMAC_IRQ_LINK_EVENT)
                phy_mac_interrupt(priv->phydev,
-                                 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
-               priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
-       }
+                                 !!(status & UMAC_IRQ_LINK_UP));
  }
  
  /* bcmgenet_isr1: handle Rx and Tx priority queues */
@@@ -2500,22 -2556,21 +2580,21 @@@ static irqreturn_t bcmgenet_isr1(int ir
        struct bcmgenet_priv *priv = dev_id;
        struct bcmgenet_rx_ring *rx_ring;
        struct bcmgenet_tx_ring *tx_ring;
-       unsigned int index;
+       unsigned int index, status;
  
-       /* Save irq status for bottom-half processing. */
-       priv->irq1_stat =
-               bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+       /* Read irq status */
+       status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
  
        /* clear interrupts */
-       bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
  
        netif_dbg(priv, intr, priv->dev,
-                 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+                 "%s: IRQ=0x%x\n", __func__, status);
  
        /* Check Rx priority queue interrupts */
        for (index = 0; index < priv->hw_params->rx_queues; index++) {
-               if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+               if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
                        continue;
  
                rx_ring = &priv->rx_rings[index];
  
        /* Check Tx priority queue interrupts */
        for (index = 0; index < priv->hw_params->tx_queues; index++) {
-               if (!(priv->irq1_stat & BIT(index)))
+               if (!(status & BIT(index)))
                        continue;
  
                tx_ring = &priv->tx_rings[index];
@@@ -2548,19 -2603,20 +2627,20 @@@ static irqreturn_t bcmgenet_isr0(int ir
        struct bcmgenet_priv *priv = dev_id;
        struct bcmgenet_rx_ring *rx_ring;
        struct bcmgenet_tx_ring *tx_ring;
+       unsigned int status;
+       unsigned long flags;
  
-       /* Save irq status for bottom-half processing. */
-       priv->irq0_stat =
-               bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+       /* Read irq status */
+       status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
  
        /* clear interrupts */
-       bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
  
        netif_dbg(priv, intr, priv->dev,
-                 "IRQ=0x%x\n", priv->irq0_stat);
+                 "IRQ=0x%x\n", status);
  
-       if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+       if (status & UMAC_IRQ_RXDMA_DONE) {
                rx_ring = &priv->rx_rings[DESC_INDEX];
  
                if (likely(napi_schedule_prep(&rx_ring->napi))) {
                }
        }
  
-       if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+       if (status & UMAC_IRQ_TXDMA_DONE) {
                tx_ring = &priv->tx_rings[DESC_INDEX];
  
                if (likely(napi_schedule_prep(&tx_ring->napi))) {
                }
        }
  
 +      if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
 +                              UMAC_IRQ_PHY_DET_F |
 +                              UMAC_IRQ_LINK_EVENT |
 +                              UMAC_IRQ_HFB_SM |
 +                              UMAC_IRQ_HFB_MM)) {
 +              /* all other interested interrupts handled in bottom half */
 +              schedule_work(&priv->bcmgenet_irq_work);
 +      }
 +
        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-           priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
-               priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+               status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
                wake_up(&priv->wq);
        }
  
+       /* all other interested interrupts handled in bottom half */
+       status &= (UMAC_IRQ_LINK_EVENT |
+                  UMAC_IRQ_MPD_R);
+       if (status) {
+               /* Save irq status for bottom-half processing. */
+               spin_lock_irqsave(&priv->lock, flags);
+               priv->irq0_stat |= status;
+               spin_unlock_irqrestore(&priv->lock, flags);
+               schedule_work(&priv->bcmgenet_irq_work);
+       }
        return IRQ_HANDLED;
  }
  
@@@ -2817,6 -2875,8 +2908,8 @@@ err_irq0
  err_fini_dma:
        bcmgenet_fini_dma(priv);
  err_clk_disable:
+       if (priv->internal_phy)
+               bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        clk_disable_unprepare(priv->clk);
        return ret;
  }
@@@ -2861,7 -2921,7 +2954,7 @@@ static int bcmgenet_close(struct net_de
        if (ret)
                return ret;
  
 -      /* Disable MAC transmit. TX DMA disabled have to done before this */
 +      /* Disable MAC transmit. TX DMA disabled must be done before this */
        umac_enable_set(priv, CMD_TX_EN, false);
  
        /* tx reclaim */
@@@ -3126,25 -3186,6 +3219,25 @@@ static struct bcmgenet_hw_params bcmgen
                .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
                         GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
        },
 +      [GENET_V5] = {
 +              .tx_queues = 4,
 +              .tx_bds_per_q = 32,
 +              .rx_queues = 0,
 +              .rx_bds_per_q = 0,
 +              .bp_in_en_shift = 17,
 +              .bp_in_mask = 0x1ffff,
 +              .hfb_filter_cnt = 48,
 +              .hfb_filter_size = 128,
 +              .qtag_mask = 0x3F,
 +              .tbuf_offset = 0x0600,
 +              .hfb_offset = 0x8000,
 +              .hfb_reg_offset = 0xfc00,
 +              .rdma_offset = 0x2000,
 +              .tdma_offset = 0x4000,
 +              .words_per_bd = 3,
 +              .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
 +                       GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
 +      },
  };
  
  /* Infer hardware parameters from the detected GENET version */
@@@ -3155,22 -3196,26 +3248,22 @@@ static void bcmgenet_set_hw_params(stru
        u8 major;
        u16 gphy_rev;
  
 -      if (GENET_IS_V4(priv)) {
 +      if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
                genet_dma_ring_regs = genet_dma_ring_regs_v4;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
 -              priv->version = GENET_V4;
        } else if (GENET_IS_V3(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
                genet_dma_ring_regs = genet_dma_ring_regs_v123;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
 -              priv->version = GENET_V3;
        } else if (GENET_IS_V2(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
                genet_dma_ring_regs = genet_dma_ring_regs_v123;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
 -              priv->version = GENET_V2;
        } else if (GENET_IS_V1(priv)) {
                bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
                genet_dma_ring_regs = genet_dma_ring_regs_v123;
                priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
 -              priv->version = GENET_V1;
        }
  
        /* enum genet_version starts at 1 */
        /* Read GENET HW version */
        reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
        major = (reg >> 24 & 0x0f);
 -      if (major == 5)
 +      if (major == 6)
 +              major = 5;
 +      else if (major == 5)
                major = 4;
        else if (major == 0)
                major = 1;
         */
        gphy_rev = reg & 0xffff;
  
 -      if (gphy_rev == 0 || gphy_rev == 0x01ff) {
 +      if (GENET_IS_V5(priv)) {
 +              /* The EPHY revision should come from the MDIO registers of
 +               * the PHY not from GENET.
 +               */
 +              if (gphy_rev != 0) {
 +                      pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
 +                              gphy_rev);
 +              }
+       /* This is reserved so should require special treatment */
 -      }
 -
++      } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+               pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+               return;
        /* This is the good old scheme, just GPHY major, no minor nor patch */
 -      if ((gphy_rev & 0xf0) != 0)
 +      } else if ((gphy_rev & 0xf0) != 0) {
                priv->gphy_rev = gphy_rev << 8;
 -
        /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
 -      else if ((gphy_rev & 0xff00) != 0)
 +      } else if ((gphy_rev & 0xff00) != 0) {
                priv->gphy_rev = gphy_rev;
-       /* This is reserved so should require special treatment */
-       } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
-               pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
-               return;
 +      }
  
  #ifdef CONFIG_PHYS_ADDR_T_64BIT
        if (!(params->flags & GENET_HAS_40BITS))
@@@ -3258,7 -3295,6 +3351,7 @@@ static const struct of_device_id bcmgen
        { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
        { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
        { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
 +      { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
        { },
  };
  MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@@ -3273,6 -3309,7 +3366,7 @@@ static int bcmgenet_probe(struct platfo
        const void *macaddr;
        struct resource *r;
        int err = -EIO;
+       const char *phy_mode_str;
  
        /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
        dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
                goto err;
        }
  
+       spin_lock_init(&priv->lock);
        SET_NETDEV_DEV(dev, &pdev->dev);
        dev_set_drvdata(&pdev->dev, dev);
        ether_addr_copy(dev->dev_addr, macaddr);
                priv->clk_eee = NULL;
        }
  
+       /* If this is an internal GPHY, power it on now, before UniMAC is
+        * brought out of reset as absolutely no UniMAC activity is allowed
+        */
+       if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+           !strcasecmp(phy_mode_str, "internal"))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
        err = reset_umac(priv);
        if (err)
                goto err_clk_disable;
@@@ -3446,7 -3492,7 +3549,7 @@@ static int bcmgenet_suspend(struct devi
        if (ret)
                return ret;
  
 -      /* Disable MAC transmit. TX DMA disabled have to done before this */
 +      /* Disable MAC transmit. TX DMA disabled must be done before this */
        umac_enable_set(priv, CMD_TX_EN, false);
  
        /* tx reclaim */
@@@ -3542,6 -3588,8 +3645,8 @@@ static int bcmgenet_resume(struct devic
        return 0;
  
  out_clk_disable:
+       if (priv->internal_phy)
+               bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        clk_disable_unprepare(priv->clk);
        return ret;
  }
index 1001d9131ba8a0bea9cd1baf255f732e3a1a12c1,db7f289d65ae2abd1589446ee0cadc00ffbf0254..5692c05824344b34da81f1041fe1f085f3588c50
@@@ -214,7 -214,9 +214,9 @@@ struct bcmgenet_mib_counters 
  #define  MDIO_REG_SHIFT                       16
  #define  MDIO_REG_MASK                        0x1F
  
- #define UMAC_RBUF_OVFL_CNT            0x61C
+ #define UMAC_RBUF_OVFL_CNT_V1         0x61C
+ #define RBUF_OVFL_CNT_V2              0x80
+ #define RBUF_OVFL_CNT_V3PLUS          0x94
  
  #define UMAC_MPD_CTRL                 0x620
  #define  MPD_EN                               (1 << 0)
  
  #define UMAC_MPD_PW_MS                        0x624
  #define UMAC_MPD_PW_LS                        0x628
- #define UMAC_RBUF_ERR_CNT             0x634
+ #define UMAC_RBUF_ERR_CNT_V1          0x634
+ #define RBUF_ERR_CNT_V2                       0x84
+ #define RBUF_ERR_CNT_V3PLUS           0x98
  #define UMAC_MDF_ERR_CNT              0x638
  #define UMAC_MDF_CTRL                 0x650
  #define UMAC_MDF_ADDR                 0x654
  #define  EXT_PWR_DN_EN_LD             (1 << 3)
  #define  EXT_ENERGY_DET                       (1 << 4)
  #define  EXT_IDDQ_FROM_PHY            (1 << 5)
 +#define  EXT_IDDQ_GLBL_PWR            (1 << 7)
  #define  EXT_PHY_RESET                        (1 << 8)
  #define  EXT_ENERGY_DET_MASK          (1 << 12)
 +#define  EXT_PWR_DOWN_PHY_TX          (1 << 16)
 +#define  EXT_PWR_DOWN_PHY_RX          (1 << 17)
 +#define  EXT_PWR_DOWN_PHY_SD          (1 << 18)
 +#define  EXT_PWR_DOWN_PHY_RD          (1 << 19)
 +#define  EXT_PWR_DOWN_PHY_EN          (1 << 20)
  
  #define EXT_RGMII_OOB_CTRL            0x0C
  #define  RGMII_LINK                   (1 << 4)
@@@ -501,15 -499,13 +505,15 @@@ enum bcmgenet_version 
        GENET_V1 = 1,
        GENET_V2,
        GENET_V3,
 -      GENET_V4
 +      GENET_V4,
 +      GENET_V5
  };
  
  #define GENET_IS_V1(p)        ((p)->version == GENET_V1)
  #define GENET_IS_V2(p)        ((p)->version == GENET_V2)
  #define GENET_IS_V3(p)        ((p)->version == GENET_V3)
  #define GENET_IS_V4(p)        ((p)->version == GENET_V4)
 +#define GENET_IS_V5(p)        ((p)->version == GENET_V5)
  
  /* Hardware flags */
  #define GENET_HAS_40BITS      (1 << 0)
@@@ -627,11 -623,13 +631,13 @@@ struct bcmgenet_priv 
        struct work_struct bcmgenet_irq_work;
        int irq0;
        int irq1;
-       unsigned int irq0_stat;
-       unsigned int irq1_stat;
        int wol_irq;
        bool wol_irq_disabled;
  
+       /* shared status */
+       spinlock_t lock;
+       unsigned int irq0_stat;
        /* HW descriptors/checksum variables */
        bool desc_64b_en;
        bool desc_rxchk_en;
index acfd848d534488e7213933c79fca3e559c750522,92f46b1375c32527b29e24a4476d6b455835bd46..ca529a78bca7ff9e4aa8e5b3b43802a3c7a09384
@@@ -152,7 -152,7 +152,7 @@@ struct octnic_gather 
         */
        struct octeon_sg_entry *sg;
  
-       u64 sg_dma_ptr;
+       dma_addr_t sg_dma_ptr;
  };
  
  struct handshake {
@@@ -734,6 -734,9 +734,9 @@@ static void delete_glists(struct lio *l
        struct octnic_gather *g;
        int i;
  
+       kfree(lio->glist_lock);
+       lio->glist_lock = NULL;
        if (!lio->glist)
                return;
  
                do {
                        g = (struct octnic_gather *)
                                list_delete_head(&lio->glist[i]);
-                       if (g) {
-                               if (g->sg) {
-                                       dma_unmap_single(&lio->oct_dev->
-                                                        pci_dev->dev,
-                                                        g->sg_dma_ptr,
-                                                        g->sg_size,
-                                                        DMA_TO_DEVICE);
-                                       kfree((void *)((unsigned long)g->sg -
-                                                      g->adjust));
-                               }
+                       if (g)
                                kfree(g);
-                       }
                } while (g);
+               if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+                       lio_dma_free(lio->oct_dev,
+                                    lio->glist_entry_size * lio->tx_qsize,
+                                    lio->glists_virt_base[i],
+                                    lio->glists_dma_base[i]);
+               }
        }
  
-       kfree((void *)lio->glist);
-       kfree((void *)lio->glist_lock);
+       kfree(lio->glists_virt_base);
+       lio->glists_virt_base = NULL;
+       kfree(lio->glists_dma_base);
+       lio->glists_dma_base = NULL;
+       kfree(lio->glist);
+       lio->glist = NULL;
  }
  
  /**
@@@ -772,22 -778,49 +778,49 @@@ static int setup_glists(struct octeon_d
        lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
                                  GFP_KERNEL);
        if (!lio->glist_lock)
-               return 1;
+               return -ENOMEM;
  
        lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
                             GFP_KERNEL);
        if (!lio->glist) {
-               kfree((void *)lio->glist_lock);
-               return 1;
+               kfree(lio->glist_lock);
+               lio->glist_lock = NULL;
+               return -ENOMEM;
+       }
+       lio->glist_entry_size =
+               ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+       /* allocate memory to store virtual and dma base address of
+        * per glist consistent memory
+        */
+       lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+                                       GFP_KERNEL);
+       lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+                                      GFP_KERNEL);
+       if (!lio->glists_virt_base || !lio->glists_dma_base) {
+               delete_glists(lio);
+               return -ENOMEM;
        }
  
        for (i = 0; i < num_iqs; i++) {
 -              int numa_node = cpu_to_node(i % num_online_cpus());
 +              int numa_node = dev_to_node(&oct->pci_dev->dev);
  
                spin_lock_init(&lio->glist_lock[i]);
  
                INIT_LIST_HEAD(&lio->glist[i]);
  
+               lio->glists_virt_base[i] =
+                       lio_dma_alloc(oct,
+                                     lio->glist_entry_size * lio->tx_qsize,
+                                     &lio->glists_dma_base[i]);
+               if (!lio->glists_virt_base[i]) {
+                       delete_glists(lio);
+                       return -ENOMEM;
+               }
                for (j = 0; j < lio->tx_qsize; j++) {
                        g = kzalloc_node(sizeof(*g), GFP_KERNEL,
                                         numa_node);
                        if (!g)
                                break;
  
-                       g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
-                                     OCT_SG_ENTRY_SIZE);
+                       g->sg = lio->glists_virt_base[i] +
+                               (j * lio->glist_entry_size);
  
-                       g->sg = kmalloc_node(g->sg_size + 8,
-                                            GFP_KERNEL, numa_node);
-                       if (!g->sg)
-                               g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
-                       if (!g->sg) {
-                               kfree(g);
-                               break;
-                       }
-                       /* The gather component should be aligned on 64-bit
-                        * boundary
-                        */
-                       if (((unsigned long)g->sg) & 7) {
-                               g->adjust = 8 - (((unsigned long)g->sg) & 7);
-                               g->sg = (struct octeon_sg_entry *)
-                                       ((unsigned long)g->sg + g->adjust);
-                       }
-                       g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
-                                                      g->sg, g->sg_size,
-                                                      DMA_TO_DEVICE);
-                       if (dma_mapping_error(&oct->pci_dev->dev,
-                                             g->sg_dma_ptr)) {
-                               kfree((void *)((unsigned long)g->sg -
-                                              g->adjust));
-                               kfree(g);
-                               break;
-                       }
+                       g->sg_dma_ptr = lio->glists_dma_base[i] +
+                                       (j * lio->glist_entry_size);
  
                        list_add_tail(&g->list, &lio->glist[i]);
                }
  
                if (j != lio->tx_qsize) {
                        delete_glists(lio);
-                       return 1;
+                       return -ENOMEM;
                }
        }
  
@@@ -1885,9 -1893,6 +1893,6 @@@ static void free_netsgbuf(void *buf
                i++;
        }
  
-       dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
-                               g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
        iq = skb_iq(lio, skb);
        spin_lock(&lio->glist_lock[iq]);
        list_add_tail(&g->list, &lio->glist[iq]);
@@@ -1933,9 -1938,6 +1938,6 @@@ static void free_netsgbuf_with_resp(voi
                i++;
        }
  
-       dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
-                               g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
        iq = skb_iq(lio, skb);
  
        spin_lock(&lio->glist_lock[iq]);
@@@ -2553,15 -2555,6 +2555,15 @@@ static inline int setup_io_queues(struc
                                __func__);
                        return 1;
                }
 +
 +              if (octeon_dev->ioq_vector) {
 +                      struct octeon_ioq_vector *ioq_vector;
 +
 +                      ioq_vector = &octeon_dev->ioq_vector[q];
 +                      netif_set_xps_queue(netdev,
 +                                          &ioq_vector->affinity_mask,
 +                                          ioq_vector->iq_index);
 +              }
        }
  
        return 0;
@@@ -3282,8 -3275,6 +3284,6 @@@ static int liquidio_xmit(struct sk_buf
                        i++;
                }
  
-               dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
-                                          g->sg_size, DMA_TO_DEVICE);
                dptr = g->sg_dma_ptr;
  
                if (OCTEON_CN23XX_PF(oct))
index a91835da1accf64665e33924dd6da47e8d4a4f0f,79f809479af6e7d865cc7c280c84232622af982e..00970597ada834f0969fdd845d81df588f3a8dfb
@@@ -155,11 -155,6 +155,6 @@@ octeon_droq_destroy_ring_buffers(struc
                        recv_buffer_destroy(droq->recv_buf_list[i].buffer,
                                            pg_info);
  
-               if (droq->desc_ring && droq->desc_ring[i].info_ptr)
-                       lio_unmap_ring_info(oct->pci_dev,
-                                           (u64)droq->
-                                           desc_ring[i].info_ptr,
-                                           OCT_DROQ_INFO_SIZE);
                droq->recv_buf_list[i].buffer = NULL;
        }
  
@@@ -211,10 -206,7 +206,7 @@@ int octeon_delete_droq(struct octeon_de
        vfree(droq->recv_buf_list);
  
        if (droq->info_base_addr)
-               cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
-                                      droq->info_alloc_size,
-                                      droq->info_base_addr,
-                                      droq->info_list_dma);
+               lio_free_info_buffer(oct, droq);
  
        if (droq->desc_ring)
                lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@@ -234,7 -226,8 +226,7 @@@ int octeon_init_droq(struct octeon_devi
        struct octeon_droq *droq;
        u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
        u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
 -      int orig_node = dev_to_node(&oct->pci_dev->dev);
 -      int numa_node = cpu_to_node(q_no % num_online_cpus());
 +      int numa_node = dev_to_node(&oct->pci_dev->dev);
  
        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
  
        droq->buffer_size = c_buf_size;
  
        desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
 -      set_dev_node(&oct->pci_dev->dev, numa_node);
        droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
                                        (dma_addr_t *)&droq->desc_ring_dma);
 -      set_dev_node(&oct->pci_dev->dev, orig_node);
 -      if (!droq->desc_ring)
 -              droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
 -                                      (dma_addr_t *)&droq->desc_ring_dma);
  
        if (!droq->desc_ring) {
                dev_err(&oct->pci_dev->dev,
        dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
                droq->max_count);
  
-       droq->info_list =
-               cnnic_numa_alloc_aligned_dma((droq->max_count *
-                                             OCT_DROQ_INFO_SIZE),
-                                            &droq->info_alloc_size,
-                                            &droq->info_base_addr,
-                                            numa_node);
+       droq->info_list = lio_alloc_info_buffer(oct, droq);
        if (!droq->info_list) {
                dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
                lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@@ -977,7 -970,7 +964,7 @@@ int octeon_create_droq(struct octeon_de
                       u32 desc_size, void *app_ctx)
  {
        struct octeon_droq *droq;
 -      int numa_node = cpu_to_node(q_no % num_online_cpus());
 +      int numa_node = dev_to_node(&oct->pci_dev->dev);
  
        if (oct->droq[q_no]) {
                dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
index 393743c08650fecedb5ea5b88800874b5da48902,d9616daf8a705645eb5b14e5af889e0b5020ab0d..e7a652c43b5c8d5ed1e1758681cb51d914810869
@@@ -769,7 -769,7 +769,7 @@@ static inline void mlxsw_reg_spvid_pack
  #define MLXSW_REG_SPVM_ID 0x200F
  #define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
  #define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
- #define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+ #define MLXSW_REG_SPVM_REC_MAX_COUNT 255
  #define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
                    MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
  
@@@ -1702,7 -1702,7 +1702,7 @@@ static inline void mlxsw_reg_sfmr_pack(
  #define MLXSW_REG_SPVMLR_ID 0x2020
  #define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
  #define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
- #define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+ #define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
  #define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
                              MLXSW_REG_SPVMLR_REC_LEN * \
                              MLXSW_REG_SPVMLR_REC_MAX_COUNT)
@@@ -4141,8 -4141,7 +4141,8 @@@ static inline void mlxsw_reg_ritr_sp_if
  
  static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
                                       enum mlxsw_reg_ritr_if_type type,
 -                                     u16 rif, u16 mtu, const char *mac)
 +                                     u16 rif, u16 vr_id, u16 mtu,
 +                                     const char *mac)
  {
        bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
  
        mlxsw_reg_ritr_rif_set(payload, rif);
        mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
        mlxsw_reg_ritr_lb_en_set(payload, 1);
 +      mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
        mlxsw_reg_ritr_mtu_set(payload, mtu);
        mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
  }
@@@ -5506,70 -5504,6 +5506,70 @@@ static inline void mlxsw_reg_mpsc_pack(
        mlxsw_reg_mpsc_rate_set(payload, rate);
  }
  
 +/* MGPC - Monitoring General Purpose Counter Set Register
 + * The MGPC register retrieves and sets the General Purpose Counter Set.
 + */
 +#define MLXSW_REG_MGPC_ID 0x9081
 +#define MLXSW_REG_MGPC_LEN 0x18
 +
 +MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
 +
 +enum mlxsw_reg_mgpc_counter_set_type {
 +      /* No count */
 +      MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
 +      /* Count packets and bytes */
 +      MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
 +      /* Count only packets */
 +      MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
 +};
 +
 +/* reg_mgpc_counter_set_type
 + * Counter set type.
 + * Access: OP
 + */
 +MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
 +
 +/* reg_mgpc_counter_index
 + * Counter index.
 + * Access: Index
 + */
 +MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
 +
 +enum mlxsw_reg_mgpc_opcode {
 +      /* Nop */
 +      MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
 +      /* Clear counters */
 +      MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
 +};
 +
 +/* reg_mgpc_opcode
 + * Opcode.
 + * Access: OP
 + */
 +MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
 +
 +/* reg_mgpc_byte_counter
 + * Byte counter value.
 + * Access: RW
 + */
 +MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
 +
 +/* reg_mgpc_packet_counter
 + * Packet counter value.
 + * Access: RW
 + */
 +MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
 +
 +static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
 +                                     enum mlxsw_reg_mgpc_opcode opcode,
 +                                     enum mlxsw_reg_mgpc_counter_set_type set_type)
 +{
 +      MLXSW_REG_ZERO(mgpc, payload);
 +      mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
 +      mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
 +      mlxsw_reg_mgpc_opcode_set(payload, opcode);
 +}
 +
  /* SBPR - Shared Buffer Pools Register
   * -----------------------------------
   * The SBPR configures and retrieves the shared buffer pools and configuration.
@@@ -6043,7 -5977,6 +6043,7 @@@ static const struct mlxsw_reg_info *mlx
        MLXSW_REG(mpar),
        MLXSW_REG(mlcr),
        MLXSW_REG(mpsc),
 +      MLXSW_REG(mgpc),
        MLXSW_REG(sbpr),
        MLXSW_REG(sbcm),
        MLXSW_REG(sbpm),
index 28bc989371c62e426b3ca514020f60585470200e,ae6cccc666e4619bbb1a8360b5cc090d303da60b..e724c6266247a0818cb4ac387d0730428953577e
@@@ -39,7 -39,6 +39,7 @@@
  #include <net/pkt_cls.h>
  #include <net/tc_act/tc_gact.h>
  #include <net/tc_act/tc_mirred.h>
 +#include <net/tc_act/tc_vlan.h>
  
  #include "spectrum.h"
  #include "core_acl_flex_keys.h"
@@@ -56,11 -55,6 +56,11 @@@ static int mlxsw_sp_flower_parse_action
        if (tc_no_actions(exts))
                return 0;
  
 +      /* Count action is inserted first */
 +      err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
 +      if (err)
 +              return err;
 +
        tcf_exts_to_list(exts, &actions);
        list_for_each_entry(a, &actions, list) {
                if (is_tcf_gact_shot(a)) {
                                                         out_dev);
                        if (err)
                                return err;
 +              } else if (is_tcf_vlan(a)) {
 +                      u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
 +                      u32 action = tcf_vlan_action(a);
 +                      u8 prio = tcf_vlan_push_prio(a);
 +                      u16 vid = tcf_vlan_push_vid(a);
 +
 +                      return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
 +                                                         action, vid,
 +                                                         proto, prio);
                } else {
                        dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
                        return -EOPNOTSUPP;
@@@ -188,8 -173,7 +188,8 @@@ static int mlxsw_sp_flower_parse(struc
              BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
 -            BIT(FLOW_DISSECTOR_KEY_PORTS))) {
 +            BIT(FLOW_DISSECTOR_KEY_PORTS) |
 +            BIT(FLOW_DISSECTOR_KEY_VLAN))) {
                dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
                return -EOPNOTSUPP;
        }
                                               sizeof(key->src));
        }
  
 +      if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
 +              struct flow_dissector_key_vlan *key =
 +                      skb_flow_dissector_target(f->dissector,
 +                                                FLOW_DISSECTOR_KEY_VLAN,
 +                                                f->key);
 +              struct flow_dissector_key_vlan *mask =
 +                      skb_flow_dissector_target(f->dissector,
 +                                                FLOW_DISSECTOR_KEY_VLAN,
 +                                                f->mask);
 +              if (mask->vlan_id != 0)
 +                      mlxsw_sp_acl_rulei_keymask_u32(rulei,
 +                                                     MLXSW_AFK_ELEMENT_VID,
 +                                                     key->vlan_id,
 +                                                     mask->vlan_id);
 +              if (mask->vlan_priority != 0)
 +                      mlxsw_sp_acl_rulei_keymask_u32(rulei,
 +                                                     MLXSW_AFK_ELEMENT_PCP,
 +                                                     key->vlan_priority,
 +                                                     mask->vlan_priority);
 +      }
 +
        if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
                mlxsw_sp_flower_parse_ipv4(rulei, f);
  
@@@ -340,58 -303,14 +340,58 @@@ void mlxsw_sp_flower_destroy(struct mlx
        ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
                                           ingress,
                                           MLXSW_SP_ACL_PROFILE_FLOWER);
-       if (WARN_ON(IS_ERR(ruleset)))
+       if (IS_ERR(ruleset))
                return;
  
        rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
-       if (!WARN_ON(!rule)) {
+       if (rule) {
                mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
                mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
        }
  
        mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  }
 +
 +int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 +                        struct tc_cls_flower_offload *f)
 +{
 +      struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 +      struct mlxsw_sp_acl_ruleset *ruleset;
 +      struct mlxsw_sp_acl_rule *rule;
 +      struct tc_action *a;
 +      LIST_HEAD(actions);
 +      u64 packets;
 +      u64 lastuse;
 +      u64 bytes;
 +      int err;
 +
 +      ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
 +                                         ingress,
 +                                         MLXSW_SP_ACL_PROFILE_FLOWER);
 +      if (WARN_ON(IS_ERR(ruleset)))
 +              return -EINVAL;
 +
 +      rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
 +      if (!rule)
 +              return -EINVAL;
 +
 +      err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &bytes, &packets,
 +                                        &lastuse);
 +      if (err)
 +              goto err_rule_get_stats;
 +
 +      preempt_disable();
 +
 +      tcf_exts_to_list(f->exts, &actions);
 +      list_for_each_entry(a, &actions, list)
 +              tcf_action_stats_update(a, bytes, packets, lastuse);
 +
 +      preempt_enable();
 +
 +      mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 +      return 0;
 +
 +err_rule_get_stats:
 +      mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
 +      return err;
 +}
index 89d210a54335f30d60bc8e7ccc10dc31f3fc916c,7e3a6fed3da6d94fe47139aef697563b56726950..9ff62cc5723d379b10d31358ced9f3eba9ca4c4a
@@@ -71,7 -71,8 +71,7 @@@
  #define TM_ALIGN        BIT(TM_SHIFT)
  #define TM_ELEM_SIZE    4
  
 -/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
 -#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
 +#define ILT_DEFAULT_HW_P_SIZE 4
  
  #define ILT_PAGE_IN_BYTES(hw_p_size)  (1U << ((hw_p_size) + 12))
  #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@@ -421,8 -422,9 +421,9 @@@ static void qed_cxt_set_proto_cid_count
                u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
                u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
                u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+               u32 align = elems_per_page * DQ_RANGE_ALIGN;
  
-               p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+               p_conn->cid_count = roundup(p_conn->cid_count, align);
        }
  }
  
@@@ -1125,7 -1127,7 +1126,7 @@@ int qed_cxt_mngr_alloc(struct qed_hwfn 
        clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
        clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
        clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
 -      /* default ILT page size for all clients is 32K */
 +      /* default ILT page size for all clients is 64K */
        for (i = 0; i < ILT_CLI_MAX; i++)
                p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
  
index bd4f43ffb5a18f8e5a5ae5240ff6ae79b838b591,e518f914eab13f52d8f82a8e1a29a5a80a2f2b24..8b5df71aa3c18b4ced2492be694293cccd7abdb3
@@@ -674,19 -674,11 +674,19 @@@ int qed_final_cleanup(struct qed_hwfn *
        return rc;
  }
  
 -static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
 +static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
  {
        int hw_mode = 0;
  
 -      hw_mode = (1 << MODE_BB_B0);
 +      if (QED_IS_BB_B0(p_hwfn->cdev)) {
 +              hw_mode |= 1 << MODE_BB;
 +      } else if (QED_IS_AH(p_hwfn->cdev)) {
 +              hw_mode |= 1 << MODE_K2;
 +      } else {
 +              DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
 +                        p_hwfn->cdev->type);
 +              return -EINVAL;
 +      }
  
        switch (p_hwfn->cdev->num_ports_in_engines) {
        case 1:
        default:
                DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
                          p_hwfn->cdev->num_ports_in_engines);
 -              return;
 +              return -EINVAL;
        }
  
        switch (p_hwfn->cdev->mf_mode) {
        DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
                   "Configuring function for hw_mode: 0x%08x\n",
                   p_hwfn->hw_info.hw_mode);
 +
 +      return 0;
  }
  
  /* Init run time data for all PFs on an engine. */
@@@ -764,10 -754,10 +764,10 @@@ static int qed_hw_init_common(struct qe
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct qed_qm_common_rt_init_params params;
        struct qed_dev *cdev = p_hwfn->cdev;
 +      u8 vf_id, max_num_vfs;
        u16 num_pfs, pf_id;
        u32 concrete_fid;
        int rc = 0;
 -      u8 vf_id;
  
        qed_init_cau_rt_data(cdev);
  
                qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
        }
  
 -      for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
 +      max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
 +      for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
                concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
                qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
                qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@@ -1146,9 -1135,7 +1146,9 @@@ int qed_hw_init(struct qed_dev *cdev
                /* Enable DMAE in PXP */
                rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
  
 -              qed_calc_hw_mode(p_hwfn);
 +              rc = qed_calc_hw_mode(p_hwfn);
 +              if (rc)
 +                      return rc;
  
                rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
                if (rc) {
@@@ -1498,25 -1485,10 +1498,25 @@@ static void qed_hw_hwfn_free(struct qed
  static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
  {
        /* clear indirect access */
 -      qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
 -      qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
 -      qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
 -      qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
 +      if (QED_IS_AH(p_hwfn->cdev)) {
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
 +      } else {
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
 +              qed_wr(p_hwfn, p_hwfn->p_main_ptt,
 +                     PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
 +      }
  
        /* Clean Previous errors if such exist */
        qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@@ -1638,7 -1610,6 +1638,7 @@@ static u32 qed_hw_get_dflt_resc_num(str
                                    enum qed_resources res_id)
  {
        u8 num_funcs = p_hwfn->num_funcs_on_engine;
 +      bool b_ah = QED_IS_AH(p_hwfn->cdev);
        struct qed_sb_cnt_info sb_cnt_info;
        u32 dflt_resc_num = 0;
  
                dflt_resc_num = sb_cnt_info.sb_cnt;
                break;
        case QED_L2_QUEUE:
 -              dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
 +              dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2
 +                                    : MAX_NUM_L2_QUEUES_BB) / num_funcs;
                break;
        case QED_VPORT:
                dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
 +              dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2
 +                                    : MAX_NUM_VPORTS_BB) / num_funcs;
                break;
        case QED_RSS_ENG:
 -              dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
 +              dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2
 +                                    : ETH_RSS_ENGINE_NUM_BB) / num_funcs;
                break;
        case QED_PQ:
                /* The granularity of the PQs is 8 */
 -              dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
 +              dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2
 +                                    : MAX_QM_TX_QUEUES_BB) / num_funcs;
                dflt_resc_num &= ~0x7;
                break;
        case QED_RL:
                dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
                break;
        case QED_ILT:
 -              dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
 +              dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2
 +                                    : PXP_NUM_ILT_RECORDS_BB) / num_funcs;
                break;
        case QED_LL2_QUEUE:
                dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
                dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
                break;
        case QED_RDMA_STATS_QUEUE:
 -              dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
 +              dflt_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
 +                                    : RDMA_NUM_STATISTIC_COUNTERS_BB) /
 +                              num_funcs;
 +
                break;
        default:
                break;
@@@ -1818,7 -1780,6 +1818,7 @@@ out
  
  static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
  {
 +      bool b_ah = QED_IS_AH(p_hwfn->cdev);
        u8 res_id;
        int rc;
  
        }
  
        /* Sanity for ILT */
 -      if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
 +      if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
 +          (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
                DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
                          RESC_START(p_hwfn, QED_ILT),
                          RESC_END(p_hwfn, QED_ILT) - 1);
@@@ -1900,15 -1860,9 +1900,15 @@@ static int qed_hw_get_nvm_info(struct q
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
                p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
                break;
 +      case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
 +              p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
 +              break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
                p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
                break;
 +      case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
 +              p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
 +              break;
        default:
                DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
                break;
@@@ -2022,9 -1976,8 +2022,9 @@@ static void qed_get_num_funcs(struct qe
  {
        u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
        u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
 +      struct qed_dev *cdev = p_hwfn->cdev;
  
 -      num_funcs = MAX_NUM_PFS_BB;
 +      num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
  
        /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
         * in the other bits are selected.
        reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
  
        if (reg_function_hide & 0x1) {
 -              if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
 -                      num_funcs = 0;
 -                      eng_mask = 0xaaaa;
 +              if (QED_IS_BB(cdev)) {
 +                      if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
 +                              num_funcs = 0;
 +                              eng_mask = 0xaaaa;
 +                      } else {
 +                              num_funcs = 1;
 +                              eng_mask = 0x5554;
 +                      }
                } else {
                        num_funcs = 1;
 -                      eng_mask = 0x5554;
 +                      eng_mask = 0xfffe;
                }
  
                /* Get the number of the enabled functions on the engine */
                   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
  }
  
 -static int
 -qed_get_hw_info(struct qed_hwfn *p_hwfn,
 -              struct qed_ptt *p_ptt,
 -              enum qed_pci_personality personality)
 +static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
 +                                  struct qed_ptt *p_ptt)
  {
        u32 port_mode;
 -      int rc;
  
 -      /* Since all information is common, only first hwfns should do this */
 -      if (IS_LEAD_HWFN(p_hwfn)) {
 -              rc = qed_iov_hw_info(p_hwfn);
 -              if (rc)
 -                      return rc;
 -      }
 -
 -      /* Read the port mode */
 -      port_mode = qed_rd(p_hwfn, p_ptt,
 -                         CNIG_REG_NW_PORT_MODE_BB_B0);
 +      port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
  
        if (port_mode < 3) {
                p_hwfn->cdev->num_ports_in_engines = 1;
                /* Default num_ports_in_engines to something */
                p_hwfn->cdev->num_ports_in_engines = 1;
        }
 +}
 +
 +static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
 +                                  struct qed_ptt *p_ptt)
 +{
 +      u32 port;
 +      int i;
 +
 +      p_hwfn->cdev->num_ports_in_engines = 0;
 +
 +      for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
 +              port = qed_rd(p_hwfn, p_ptt,
 +                            CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
 +              if (port & 1)
 +                      p_hwfn->cdev->num_ports_in_engines++;
 +      }
 +
 +      if (!p_hwfn->cdev->num_ports_in_engines) {
 +              DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
 +
 +              /* Default num_ports_in_engine to something */
 +              p_hwfn->cdev->num_ports_in_engines = 1;
 +      }
 +}
 +
 +static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 +{
 +      if (QED_IS_BB(p_hwfn->cdev))
 +              qed_hw_info_port_num_bb(p_hwfn, p_ptt);
 +      else
 +              qed_hw_info_port_num_ah(p_hwfn, p_ptt);
 +}
 +
 +static int
 +qed_get_hw_info(struct qed_hwfn *p_hwfn,
 +              struct qed_ptt *p_ptt,
 +              enum qed_pci_personality personality)
 +{
 +      int rc;
 +
 +      /* Since all information is common, only first hwfns should do this */
 +      if (IS_LEAD_HWFN(p_hwfn)) {
 +              rc = qed_iov_hw_info(p_hwfn);
 +              if (rc)
 +                      return rc;
 +      }
 +
 +      qed_hw_info_port_num(p_hwfn, p_ptt);
  
        qed_hw_get_nvm_info(p_hwfn, p_ptt);
  
  static int qed_get_dev_info(struct qed_dev *cdev)
  {
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 +      u16 device_id_mask;
        u32 tmp;
  
        /* Read Vendor Id / Device Id */
        pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
        pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
  
 +      /* Determine type */
 +      device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
 +      switch (device_id_mask) {
 +      case QED_DEV_ID_MASK_BB:
 +              cdev->type = QED_DEV_TYPE_BB;
 +              break;
 +      case QED_DEV_ID_MASK_AH:
 +              cdev->type = QED_DEV_TYPE_AH;
 +              break;
 +      default:
 +              DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
 +              return -EBUSY;
 +      }
 +
        cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
                                     MISCS_REG_CHIP_NUM);
        cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
                                     MISCS_REG_CHIP_REV);
        MASK_FIELD(CHIP_REV, cdev->chip_rev);
  
 -      cdev->type = QED_DEV_TYPE_BB;
        /* Learn number of HW-functions */
        tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
                     MISCS_REG_CMT_ENABLED_FOR_PAIR);
        MASK_FIELD(CHIP_METAL, cdev->chip_metal);
  
        DP_INFO(cdev->hwfns,
 -              "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
 +              "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
 +              QED_IS_BB(cdev) ? "BB" : "AH",
 +              'A' + cdev->chip_rev,
 +              (int)cdev->chip_metal,
                cdev->chip_num, cdev->chip_rev,
                cdev->chip_bond_id, cdev->chip_metal);
  
@@@ -2494,9 -2389,8 +2494,8 @@@ qed_chain_alloc_sanity_check(struct qed
         * size/capacity fields are of a u32 type.
         */
        if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
-            chain_size > 0x10000) ||
-           (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
-            chain_size > 0x100000000ULL)) {
+            chain_size > ((u32)U16_MAX + 1)) ||
+           (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
                DP_NOTICE(cdev,
                          "The actual chain size (0x%llx) is larger than the maximal possible value\n",
                          chain_size);
@@@ -3469,8 -3363,3 +3468,8 @@@ void qed_clean_wfq_db(struct qed_hwfn *
        memset(p_hwfn->qm_info.wfq_data, 0,
               sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
  }
 +
 +int qed_device_num_engines(struct qed_dev *cdev)
 +{
 +      return QED_IS_BB(cdev) ? 2 : 1;
 +}
index 161d90376dae8a44b3614f2edbcaf61c60cb6894,0d3cef409c96d0849c7860e8f03a920b3b8966f1..178650aa0c6c24dc160e6c3021963852b4d029c5
@@@ -211,6 -211,8 +211,8 @@@ static void qed_ll2b_complete_rx_packet
        /* If need to reuse or there's no replacement buffer, repost this */
        if (rc)
                goto out_post;
+       dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+                        cdev->ll2->rx_size, DMA_FROM_DEVICE);
  
        skb = build_skb(buffer->data, 0);
        if (!skb) {
@@@ -474,7 -476,7 +476,7 @@@ qed_ll2_rxq_completion_gsi(struct qed_h
  static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
                                      struct qed_ll2_info *p_ll2_conn,
                                      union core_rx_cqe_union *p_cqe,
-                                     unsigned long lock_flags,
+                                     unsigned long *p_lock_flags,
                                      bool b_last_cqe)
  {
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
                          "Mismatch between active_descq and the LL2 Rx chain\n");
        list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
  
-       spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+       spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
        qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
                                    p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
-       spin_lock_irqsave(&p_rx->lock, lock_flags);
+       spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
  
        return 0;
  }
@@@ -538,7 -540,8 +540,8 @@@ static int qed_ll2_rxq_completion(struc
                        break;
                case CORE_RX_CQE_TYPE_REGULAR:
                        rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
-                                                       cqe, flags, b_last_cqe);
+                                                       cqe, &flags,
+                                                       b_last_cqe);
                        break;
                default:
                        rc = -EIO;
@@@ -594,7 -597,7 +597,7 @@@ static u8 qed_ll2_convert_rx_parse_to_t
        u8 bd_flags = 0;
  
        if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
 -              SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
 +              SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
  
        return bd_flags;
  }
@@@ -755,8 -758,8 +758,8 @@@ qed_ooo_submit_tx_buffers(struct qed_hw
                             p_buffer->placement_offset;
                parse_flags = p_buffer->parse_flags;
                bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
 -              SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
 -              SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
 +              SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
 +              SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
  
                rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
                                               p_buffer->vlan, bd_flags,
@@@ -968,7 -971,7 +971,7 @@@ static int qed_ll2_start_ooo(struct qed
  {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
-       struct qed_ll2_conn ll2_info;
+       struct qed_ll2_conn ll2_info = { 0 };
        int rc;
  
        ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
@@@ -1588,34 -1591,33 +1591,34 @@@ static void qed_ll2_prepare_tx_packet_s
        p_tx->cur_send_frag_num++;
  }
  
 -static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 -                                           struct qed_ll2_info *p_ll2,
 -                                           struct qed_ll2_tx_packet *p_curp,
 -                                           u8 num_of_bds,
 -                                           enum core_tx_dest tx_dest,
 -                                           u16 vlan,
 -                                           u8 bd_flags,
 -                                           u16 l4_hdr_offset_w,
 -                                           enum core_roce_flavor_type type,
 -                                           dma_addr_t first_frag,
 -                                           u16 first_frag_len)
 +static void
 +qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 +                               struct qed_ll2_info *p_ll2,
 +                               struct qed_ll2_tx_packet *p_curp,
 +                               u8 num_of_bds,
 +                               enum core_tx_dest tx_dest,
 +                               u16 vlan,
 +                               u8 bd_flags,
 +                               u16 l4_hdr_offset_w,
 +                               enum core_roce_flavor_type roce_flavor,
 +                               dma_addr_t first_frag,
 +                               u16 first_frag_len)
  {
        struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
        u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
        struct core_tx_bd *start_bd = NULL;
 -      u16 frag_idx;
 +      u16 bd_data = 0, frag_idx;
  
        start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
        start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
        SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
                  cpu_to_le16(l4_hdr_offset_w));
        SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
 -      start_bd->bd_flags.as_bitfield = bd_flags;
 -      start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
 -          CORE_TX_BD_FLAGS_START_BD_SHIFT;
 -      SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
 -      SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
 +      bd_data |= bd_flags;
 +      SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
 +      SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
 +      SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
 +      start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
        DMA_REGPAIR_LE(start_bd->addr, first_frag);
        start_bd->nbytes = cpu_to_le16(first_frag_len);
  
                struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
  
                *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
 -              (*p_bd)->bd_flags.as_bitfield = 0;
 +              (*p_bd)->bd_data.as_bitfield = 0;
                (*p_bd)->bitfield1 = 0;
 -              (*p_bd)->bitfield0 = 0;
                p_curp->bds_set[frag_idx].tx_frag = 0;
                p_curp->bds_set[frag_idx].frag_len = 0;
        }
@@@ -2238,11 -2241,11 +2241,11 @@@ static int qed_ll2_start_xmit(struct qe
        /* Request HW to calculate IP csum */
        if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
              ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
 -              flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
 +              flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
  
        if (skb_vlan_tag_present(skb)) {
                vlan = skb_vlan_tag_get(skb);
 -              flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
 +              flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
        }
  
        rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
index 7433b164e51356d86f27a2605911eb90f1d7c18c,f9f3dba7a58800d9288199b50bec0b85d18cb249..b09c4fca18050d9c59edecc9a5b1e534303c5475
@@@ -196,7 -196,6 +196,7 @@@ int netvsc_recv_callback(struct net_dev
                         const struct ndis_tcp_ip_checksum_info *csum_info,
                         const struct ndis_pkt_8021q_info *vlan);
  void netvsc_channel_cb(void *context);
 +int netvsc_poll(struct napi_struct *napi, int budget);
  int rndis_filter_open(struct netvsc_device *nvdev);
  int rndis_filter_close(struct netvsc_device *nvdev);
  int rndis_filter_device_add(struct hv_device *dev,
@@@ -701,6 -700,8 +701,8 @@@ struct net_device_context 
  
        u32 tx_checksum_mask;
  
+       u32 tx_send_table[VRSS_SEND_TAB_SIZE];
        /* Ethtool settings */
        u8 duplex;
        u32 speed;
  /* Per channel data */
  struct netvsc_channel {
        struct vmbus_channel *channel;
 +      struct napi_struct napi;
        struct multi_send_data msd;
        struct multi_recv_comp mrc;
        atomic_t queue_sends;
@@@ -759,7 -759,6 +761,6 @@@ struct netvsc_device 
  
        struct nvsp_message revoke_packet;
  
-       u32 send_table[VRSS_SEND_TAB_SIZE];
        u32 max_chn;
        u32 num_chn;
        spinlock_t sc_lock; /* Protects num_sc_offered variable */
index 8f9aeec2ce0f4f491e838b6f9ac4f194752523ca,4c1d8cca247b921e263268bf8344898c31bb488a..0e71164849dd47fd8800792031851f55aecfe0c1
@@@ -91,6 -91,15 +91,6 @@@ static void free_netvsc_device(struct n
  }
  
  
 -static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
 -                                     u16 q_idx)
 -{
 -      const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 -
 -      return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
 -              atomic_read(&nvchan->queue_sends) == 0;
 -}
 -
  static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
  {
        struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
@@@ -547,7 -556,6 +547,7 @@@ void netvsc_device_remove(struct hv_dev
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct netvsc_device *net_device = net_device_ctx->nvdev;
 +      int i;
  
        netvsc_disconnect_vsp(device);
  
        /* Now, we can close the channel safely */
        vmbus_close(device->channel);
  
 +      for (i = 0; i < net_device->num_chn; i++)
 +              napi_disable(&net_device->chan_table[i].napi);
 +
        /* Release all resources */
        free_netvsc_device(net_device);
  }
@@@ -595,9 -600,9 +595,9 @@@ static inline void netvsc_free_send_slo
  static void netvsc_send_tx_complete(struct netvsc_device *net_device,
                                    struct vmbus_channel *incoming_channel,
                                    struct hv_device *device,
 -                                  struct vmpacket_descriptor *packet)
 +                                  const struct vmpacket_descriptor *desc)
  {
 -      struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
 +      struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct vmbus_channel *channel = device->channel;
  static void netvsc_send_completion(struct netvsc_device *net_device,
                                   struct vmbus_channel *incoming_channel,
                                   struct hv_device *device,
 -                                 struct vmpacket_descriptor *packet)
 +                                 const struct vmpacket_descriptor *desc)
  {
 -      struct nvsp_message *nvsp_packet;
 +      struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
        struct net_device *ndev = hv_get_drvdata(device);
  
 -      nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
 -                                            (packet->offset8 << 3));
 -
        switch (nvsp_packet->hdr.msg_type) {
        case NVSP_MSG_TYPE_INIT_COMPLETE:
        case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
  
        case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
                netvsc_send_tx_complete(net_device, incoming_channel,
 -                                      device, packet);
 +                                      device, desc);
                break;
  
        default:
@@@ -1058,29 -1066,28 +1058,29 @@@ static inline struct recv_comp_data *ge
        return rcd;
  }
  
 -static void netvsc_receive(struct net_device *ndev,
 +static int netvsc_receive(struct net_device *ndev,
                   struct netvsc_device *net_device,
                   struct net_device_context *net_device_ctx,
                   struct hv_device *device,
                   struct vmbus_channel *channel,
 -                 struct vmtransfer_page_packet_header *vmxferpage_packet,
 +                 const struct vmpacket_descriptor *desc,
                   struct nvsp_message *nvsp)
  {
 +      const struct vmtransfer_page_packet_header *vmxferpage_packet
 +              = container_of(desc, const struct vmtransfer_page_packet_header, d);
 +      u16 q_idx = channel->offermsg.offer.sub_channel_index;
        char *recv_buf = net_device->recv_buf;
        u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
        int ret;
 -      struct recv_comp_data *rcd;
 -      u16 q_idx = channel->offermsg.offer.sub_channel_index;
  
        /* Make sure this is a valid nvsp packet */
        if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
                netif_err(net_device_ctx, rx_err, ndev,
                          "Unknown nvsp packet type received %u\n",
                          nvsp->hdr.msg_type);
 -              return;
 +              return 0;
        }
  
        if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
                          "Invalid xfer page set id - expecting %x got %x\n",
                          NETVSC_RECEIVE_BUFFER_ID,
                          vmxferpage_packet->xfer_pageset_id);
 -              return;
 +              return 0;
        }
  
        count = vmxferpage_packet->range_cnt;
                                              channel, data, buflen);
        }
  
 -      if (!net_device->chan_table[q_idx].mrc.buf) {
 +      if (net_device->chan_table[q_idx].mrc.buf) {
 +              struct recv_comp_data *rcd;
 +
 +              rcd = get_recv_comp_slot(net_device, channel, q_idx);
 +              if (rcd) {
 +                      rcd->tid = vmxferpage_packet->d.trans_id;
 +                      rcd->status = status;
 +              } else {
 +                      netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
 +                                 q_idx, vmxferpage_packet->d.trans_id);
 +              }
 +      } else {
                ret = netvsc_send_recv_completion(channel,
                                                  vmxferpage_packet->d.trans_id,
                                                  status);
                if (ret)
                        netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
                                   q_idx, vmxferpage_packet->d.trans_id, ret);
 -              return;
 -      }
 -
 -      rcd = get_recv_comp_slot(net_device, channel, q_idx);
 -
 -      if (!rcd) {
 -              netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
 -                         q_idx, vmxferpage_packet->d.trans_id);
 -              return;
        }
 -
 -      rcd->tid = vmxferpage_packet->d.trans_id;
 -      rcd->status = status;
 +      return count;
  }
  
  static void netvsc_send_table(struct hv_device *hdev,
                              struct nvsp_message *nvmsg)
  {
-       struct netvsc_device *nvscdev;
        struct net_device *ndev = hv_get_drvdata(hdev);
+       struct net_device_context *net_device_ctx = netdev_priv(ndev);
        int i;
        u32 count, *tab;
  
-       nvscdev = get_outbound_net_device(hdev);
-       if (!nvscdev)
-               return;
        count = nvmsg->msg.v5_msg.send_table.count;
        if (count != VRSS_SEND_TAB_SIZE) {
                netdev_err(ndev, "Received wrong send-table size:%u\n", count);
                      nvmsg->msg.v5_msg.send_table.offset);
  
        for (i = 0; i < count; i++)
-               nvscdev->send_table[i] = tab[i];
+               net_device_ctx->tx_send_table[i] = tab[i];
  }
  
  static void netvsc_send_vf(struct net_device_context *net_device_ctx,
@@@ -1173,15 -1176,17 +1169,15 @@@ static inline void netvsc_receive_inban
        }
  }
  
 -static void netvsc_process_raw_pkt(struct hv_device *device,
 -                                 struct vmbus_channel *channel,
 -                                 struct netvsc_device *net_device,
 -                                 struct net_device *ndev,
 -                                 u64 request_id,
 -                                 struct vmpacket_descriptor *desc)
 +static int netvsc_process_raw_pkt(struct hv_device *device,
 +                                struct vmbus_channel *channel,
 +                                struct netvsc_device *net_device,
 +                                struct net_device *ndev,
 +                                u64 request_id,
 +                                const struct vmpacket_descriptor *desc)
  {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
 -      struct nvsp_message *nvmsg
 -              = (struct nvsp_message *)((unsigned long)desc
 -                                        + (desc->offset8 << 3));
 +      struct nvsp_message *nvmsg = hv_pkt_data(desc);
  
        switch (desc->type) {
        case VM_PKT_COMP:
                break;
  
        case VM_PKT_DATA_USING_XFER_PAGES:
 -              netvsc_receive(ndev, net_device, net_device_ctx,
 -                             device, channel,
 -                             (struct vmtransfer_page_packet_header *)desc,
 -                             nvmsg);
 +              return netvsc_receive(ndev, net_device, net_device_ctx,
 +                                    device, channel, desc, nvmsg);
                break;
  
        case VM_PKT_DATA_INBAND:
                           desc->type, request_id);
                break;
        }
 +
 +      return 0;
 +}
 +
 +static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
 +{
 +      struct vmbus_channel *primary = channel->primary_channel;
 +
 +      return primary ? primary->device_obj : channel->device_obj;
 +}
 +
 +int netvsc_poll(struct napi_struct *napi, int budget)
 +{
 +      struct netvsc_channel *nvchan
 +              = container_of(napi, struct netvsc_channel, napi);
 +      struct vmbus_channel *channel = nvchan->channel;
 +      struct hv_device *device = netvsc_channel_to_device(channel);
 +      u16 q_idx = channel->offermsg.offer.sub_channel_index;
 +      struct net_device *ndev = hv_get_drvdata(device);
 +      struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
 +      const struct vmpacket_descriptor *desc;
 +      int work_done = 0;
 +
 +      desc = hv_pkt_iter_first(channel);
 +      while (desc) {
 +              int count;
 +
 +              count = netvsc_process_raw_pkt(device, channel, net_device,
 +                                             ndev, desc->trans_id, desc);
 +              work_done += count;
 +              desc = __hv_pkt_iter_next(channel, desc);
 +
 +              /* If receive packet budget is exhausted, reschedule */
 +              if (work_done >= budget) {
 +                      work_done = budget;
 +                      break;
 +              }
 +      }
 +      hv_pkt_iter_close(channel);
 +
 +      /* If ring is empty and NAPI is not doing polling */
 +      if (work_done < budget &&
 +          napi_complete_done(napi, work_done) &&
 +          hv_end_read(&channel->inbound) != 0)
 +              napi_reschedule(napi);
 +
 +      netvsc_chk_recv_comp(net_device, channel, q_idx);
 +      return work_done;
  }
  
  void netvsc_channel_cb(void *context)
  {
        struct vmbus_channel *channel = context;
 +      struct hv_device *device = netvsc_channel_to_device(channel);
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
 -      struct hv_device *device;
        struct netvsc_device *net_device;
 -      struct vmpacket_descriptor *desc;
        struct net_device *ndev;
 -      bool need_to_commit = false;
 -
 -      if (channel->primary_channel != NULL)
 -              device = channel->primary_channel->device_obj;
 -      else
 -              device = channel->device_obj;
  
        ndev = hv_get_drvdata(device);
        if (unlikely(!ndev))
                return;
  
 -      net_device = net_device_to_netvsc_device(ndev);
 -      if (unlikely(net_device->destroy) &&
 -          netvsc_channel_idle(net_device, q_idx))
 -              return;
 -
 -      /* commit_rd_index() -> hv_signal_on_read() needs this. */
 -      init_cached_read_index(channel);
 -
 -      while ((desc = get_next_pkt_raw(channel)) != NULL) {
 -              netvsc_process_raw_pkt(device, channel, net_device,
 -                                     ndev, desc->trans_id, desc);
 -
 -              put_pkt_raw(channel, desc);
 -              need_to_commit = true;
 -      }
 -
 -      if (need_to_commit)
 -              commit_rd_index(channel);
 +      /* disable interupts from host */
 +      hv_begin_read(&channel->inbound);
  
 -      netvsc_chk_recv_comp(net_device, channel, q_idx);
 +      net_device = net_device_to_netvsc_device(ndev);
 +      napi_schedule(&net_device->chan_table[q_idx].napi);
  }
  
  /*
@@@ -1290,11 -1271,6 +1286,11 @@@ int netvsc_device_add(struct hv_device 
  
        net_device->ring_size = ring_size;
  
 +      /* Because the device uses NAPI, all the interrupt batching and
 +       * control is done via Net softirq, not the channel handling
 +       */
 +      set_channel_read_mode(device->channel, HV_CALL_ISR);
 +
        /* Open the channel */
        ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
                         ring_size * PAGE_SIZE, NULL, 0,
         * chn_table with the default channel to use it before subchannels are
         * opened.
         */
 -      for (i = 0; i < VRSS_CHANNEL_MAX; i++)
 -              net_device->chan_table[i].channel = device->channel;
 +      for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
 +              struct netvsc_channel *nvchan = &net_device->chan_table[i];
 +
 +              nvchan->channel = device->channel;
 +              netif_napi_add(ndev, &nvchan->napi,
 +                             netvsc_poll, NAPI_POLL_WEIGHT);
 +      }
 +
 +      /* Enable NAPI handler for init callbacks */
 +      napi_enable(&net_device->chan_table[0].napi);
  
        /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
         * populated.
        return ret;
  
  close:
 +      napi_disable(&net_device->chan_table[0].napi);
 +
        /* Now, we can close the channel safely */
        vmbus_close(device->channel);
  
index b12808ab343230194e984d2cc41d7d8d8cef532f,5ede87f30463e8211ef2828a8f74d4951c4166a6..191372486a877d685ea96faec4eff49383db19d2
@@@ -206,17 -206,15 +206,15 @@@ static u16 netvsc_select_queue(struct n
                        void *accel_priv, select_queue_fallback_t fallback)
  {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-       struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
+       unsigned int num_tx_queues = ndev->real_num_tx_queues;
        struct sock *sk = skb->sk;
        int q_idx = sk_tx_queue_get(sk);
  
-       if (q_idx < 0 || skb->ooo_okay ||
-           q_idx >= ndev->real_num_tx_queues) {
+       if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
                u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
                int new_idx;
  
-               new_idx = nvsc_dev->send_table[hash]
-                       % nvsc_dev->num_chn;
+               new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
  
                if (q_idx != new_idx && sk &&
                    sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
                q_idx = new_idx;
        }
  
-       if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
-               q_idx = 0;
        return q_idx;
  }
  
@@@ -589,14 -584,13 +584,14 @@@ void netvsc_linkstatus_callback(struct 
  }
  
  static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 +                                           struct napi_struct *napi,
                                             const struct ndis_tcp_ip_checksum_info *csum_info,
                                             const struct ndis_pkt_8021q_info *vlan,
                                             void *data, u32 buflen)
  {
        struct sk_buff *skb;
  
 -      skb = netdev_alloc_skb_ip_align(net, buflen);
 +      skb = napi_alloc_skb(napi, buflen);
        if (!skb)
                return skb;
  
@@@ -643,11 -637,11 +638,11 @@@ int netvsc_recv_callback(struct net_dev
  {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct netvsc_device *net_device = net_device_ctx->nvdev;
 +      u16 q_idx = channel->offermsg.offer.sub_channel_index;
 +      struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
        struct net_device *vf_netdev;
        struct sk_buff *skb;
        struct netvsc_stats *rx_stats;
 -      u16 q_idx = channel->offermsg.offer.sub_channel_index;
 -
  
        if (net->reg_state != NETREG_REGISTERED)
                return NVSP_STAT_FAIL;
                net = vf_netdev;
  
        /* Allocate a skb - TODO direct I/O to pages? */
 -      skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
 +      skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
 +                                  csum_info, vlan, data, len);
        if (unlikely(!skb)) {
                ++net->stats.rx_dropped;
                rcu_read_unlock();
         * on the synthetic device because modifying the VF device
         * statistics will not work correctly.
         */
 -      rx_stats = &net_device->chan_table[q_idx].rx_stats;
 +      rx_stats = &nvchan->rx_stats;
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->packets++;
        rx_stats->bytes += len;
                ++rx_stats->multicast;
        u64_stats_update_end(&rx_stats->syncp);
  
 -      /*
 -       * Pass the skb back up. Network stack will deallocate the skb when it
 -       * is done.
 -       * TODO - use NAPI?
 -       */
 -      netif_receive_skb(skb);
 +      napi_gro_receive(&nvchan->napi, skb);
        rcu_read_unlock();
  
        return 0;
@@@ -789,19 -787,18 +784,19 @@@ static int netvsc_set_channels(struct n
        return ret;
  }
  
 -static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
 +static bool
 +netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
  {
 -      struct ethtool_cmd diff1 = *cmd;
 -      struct ethtool_cmd diff2 = {};
 +      struct ethtool_link_ksettings diff1 = *cmd;
 +      struct ethtool_link_ksettings diff2 = {};
  
 -      ethtool_cmd_speed_set(&diff1, 0);
 -      diff1.duplex = 0;
 +      diff1.base.speed = 0;
 +      diff1.base.duplex = 0;
        /* advertising and cmd are usually set */
 -      diff1.advertising = 0;
 -      diff1.cmd = 0;
 +      ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
 +      diff1.base.cmd = 0;
        /* We set port to PORT_OTHER */
 -      diff2.port = PORT_OTHER;
 +      diff2.base.port = PORT_OTHER;
  
        return !memcmp(&diff1, &diff2, sizeof(diff1));
  }
@@@ -814,32 -811,30 +809,32 @@@ static void netvsc_init_settings(struc
        ndc->duplex = DUPLEX_UNKNOWN;
  }
  
 -static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +static int netvsc_get_link_ksettings(struct net_device *dev,
 +                                   struct ethtool_link_ksettings *cmd)
  {
        struct net_device_context *ndc = netdev_priv(dev);
  
 -      ethtool_cmd_speed_set(cmd, ndc->speed);
 -      cmd->duplex = ndc->duplex;
 -      cmd->port = PORT_OTHER;
 +      cmd->base.speed = ndc->speed;
 +      cmd->base.duplex = ndc->duplex;
 +      cmd->base.port = PORT_OTHER;
  
        return 0;
  }
  
 -static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +static int netvsc_set_link_ksettings(struct net_device *dev,
 +                                   const struct ethtool_link_ksettings *cmd)
  {
        struct net_device_context *ndc = netdev_priv(dev);
        u32 speed;
  
 -      speed = ethtool_cmd_speed(cmd);
 +      speed = cmd->base.speed;
        if (!ethtool_validate_speed(speed) ||
 -          !ethtool_validate_duplex(cmd->duplex) ||
 +          !ethtool_validate_duplex(cmd->base.duplex) ||
            !netvsc_validate_ethtool_ss_cmd(cmd))
                return -EINVAL;
  
        ndc->speed = speed;
 -      ndc->duplex = cmd->duplex;
 +      ndc->duplex = cmd->base.duplex;
  
        return 0;
  }
@@@ -1173,13 -1168,13 +1168,13 @@@ static const struct ethtool_ops ethtool
        .get_channels   = netvsc_get_channels,
        .set_channels   = netvsc_set_channels,
        .get_ts_info    = ethtool_op_get_ts_info,
 -      .get_settings   = netvsc_get_settings,
 -      .set_settings   = netvsc_set_settings,
        .get_rxnfc      = netvsc_get_rxnfc,
        .get_rxfh_key_size = netvsc_get_rxfh_key_size,
        .get_rxfh_indir_size = netvsc_rss_indir_size,
        .get_rxfh       = netvsc_get_rxfh,
        .set_rxfh       = netvsc_set_rxfh,
 +      .get_link_ksettings = netvsc_get_link_ksettings,
 +      .set_link_ksettings = netvsc_set_link_ksettings,
  };
  
  static const struct net_device_ops device_ops = {
diff --combined drivers/net/tun.c
index c418f0a9d2d18969e5eb396e79a422c167986168,34cc3c590aa5c5c49509159d8fbf0f0cfcfca988..16dfb4cb1980121afe2264d5d332c88f9f91d537
@@@ -822,7 -822,18 +822,18 @@@ static void tun_net_uninit(struct net_d
  /* Net device open. */
  static int tun_net_open(struct net_device *dev)
  {
+       struct tun_struct *tun = netdev_priv(dev);
+       int i;
        netif_tx_start_all_queues(dev);
+       for (i = 0; i < tun->numqueues; i++) {
+               struct tun_file *tfile;
+               tfile = rtnl_dereference(tun->tfiles[i]);
+               tfile->socket.sk->sk_write_space(tfile->socket.sk);
+       }
        return 0;
  }
  
@@@ -1103,9 -1114,10 +1114,10 @@@ static unsigned int tun_chr_poll(struc
        if (!skb_array_empty(&tfile->tx_array))
                mask |= POLLIN | POLLRDNORM;
  
-       if (sock_writeable(sk) ||
-           (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
-            sock_writeable(sk)))
+       if (tun->dev->flags & IFF_UP &&
+           (sock_writeable(sk) ||
+            (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+             sock_writeable(sk))))
                mask |= POLLOUT | POLLWRNORM;
  
        if (tun->dev->reg_state != NETREG_REGISTERED)
@@@ -2430,16 -2442,18 +2442,16 @@@ static struct miscdevice tun_miscdev = 
  
  /* ethtool interface */
  
 -static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 -{
 -      cmd->supported          = 0;
 -      cmd->advertising        = 0;
 -      ethtool_cmd_speed_set(cmd, SPEED_10);
 -      cmd->duplex             = DUPLEX_FULL;
 -      cmd->port               = PORT_TP;
 -      cmd->phy_address        = 0;
 -      cmd->transceiver        = XCVR_INTERNAL;
 -      cmd->autoneg            = AUTONEG_DISABLE;
 -      cmd->maxtxpkt           = 0;
 -      cmd->maxrxpkt           = 0;
 +static int tun_get_link_ksettings(struct net_device *dev,
 +                                struct ethtool_link_ksettings *cmd)
 +{
 +      ethtool_link_ksettings_zero_link_mode(cmd, supported);
 +      ethtool_link_ksettings_zero_link_mode(cmd, advertising);
 +      cmd->base.speed         = SPEED_10;
 +      cmd->base.duplex        = DUPLEX_FULL;
 +      cmd->base.port          = PORT_TP;
 +      cmd->base.phy_address   = 0;
 +      cmd->base.autoneg       = AUTONEG_DISABLE;
        return 0;
  }
  
@@@ -2502,6 -2516,7 +2514,6 @@@ static int tun_set_coalesce(struct net_
  }
  
  static const struct ethtool_ops tun_ethtool_ops = {
 -      .get_settings   = tun_get_settings,
        .get_drvinfo    = tun_get_drvinfo,
        .get_msglevel   = tun_get_msglevel,
        .set_msglevel   = tun_set_msglevel,
        .get_ts_info    = ethtool_op_get_ts_info,
        .get_coalesce   = tun_get_coalesce,
        .set_coalesce   = tun_set_coalesce,
 +      .get_link_ksettings = tun_get_link_ksettings,
  };
  
  static int tun_queue_resize(struct tun_struct *tun)
@@@ -2568,7 -2582,6 +2580,6 @@@ static int __init tun_init(void
        int ret = 0;
  
        pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
-       pr_info("%s\n", DRV_COPYRIGHT);
  
        ret = rtnl_link_register(&tun_link_ops);
        if (ret) {
diff --combined drivers/net/vxlan.c
index 168257aa8acea8fa48abb24d64b0625cec088afb,bdb6ae16d4a85bf9539199e189011bce104ba51a..09855be219e9535ba373f5a6deaf70aff86c6b80
@@@ -276,9 -276,9 +276,9 @@@ static int vxlan_fdb_info(struct sk_buf
        send_eth = send_ip = true;
  
        if (type == RTM_GETNEIGH) {
 -              ndm->ndm_family = AF_INET;
                send_ip = !vxlan_addr_any(&rdst->remote_ip);
                send_eth = !is_zero_ether_addr(fdb->eth_addr);
 +              ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
        } else
                ndm->ndm_family = AF_BRIDGE;
        ndm->ndm_state = fdb->state;
@@@ -2976,6 -2976,44 +2976,44 @@@ static int vxlan_dev_configure(struct n
        return 0;
  }
  
+ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+                             struct vxlan_config *conf)
+ {
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       int err;
+       err = vxlan_dev_configure(net, dev, conf, false);
+       if (err)
+               return err;
+       dev->ethtool_ops = &vxlan_ethtool_ops;
+       /* create an fdb entry for a valid default destination */
+       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+               err = vxlan_fdb_create(vxlan, all_zeros_mac,
+                                      &vxlan->default_dst.remote_ip,
+                                      NUD_REACHABLE | NUD_PERMANENT,
+                                      NLM_F_EXCL | NLM_F_CREATE,
+                                      vxlan->cfg.dst_port,
+                                      vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_ifindex,
+                                      NTF_SELF);
+               if (err)
+                       return err;
+       }
+       err = register_netdevice(dev);
+       if (err) {
+               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
+               return err;
+       }
+       list_add(&vxlan->next, &vn->vxlan_list);
+       return 0;
+ }
  static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
                         struct net_device *dev, struct vxlan_config *conf,
                         bool changelink)
  static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
  {
-       struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
-       struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_config conf;
        int err;
  
        if (err)
                return err;
  
-       err = vxlan_dev_configure(src_net, dev, &conf, false);
-       if (err)
-               return err;
-       dev->ethtool_ops = &vxlan_ethtool_ops;
-       /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
-               err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
-                                      NUD_REACHABLE | NUD_PERMANENT,
-                                      NLM_F_EXCL | NLM_F_CREATE,
-                                      vxlan->cfg.dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
-               if (err)
-                       return err;
-       }
-       err = register_netdevice(dev);
-       if (err) {
-               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
-               return err;
-       }
-       list_add(&vxlan->next, &vn->vxlan_list);
-       return 0;
+       return __vxlan_dev_create(src_net, dev, &conf);
  }
  
  static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@@ -3440,7 -3447,7 +3447,7 @@@ struct net_device *vxlan_dev_create(str
        if (IS_ERR(dev))
                return dev;
  
-       err = vxlan_dev_configure(net, dev, conf, false);
+       err = __vxlan_dev_create(net, dev, conf);
        if (err < 0) {
                free_netdev(dev);
                return ERR_PTR(err);
index af2294635ab22182efb78b403139672130e7a12f,46debe5034af102710a574a80254433b94265271..1d7f90d0adc1a0c4f55ade57f0bf18f622372184
@@@ -96,7 -96,7 +96,7 @@@ void qedf_cmd_mgr_free(struct qedf_cmd_
        if (!cmgr->io_bdt_pool)
                goto free_cmd_pool;
  
 -      bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
 +      bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
        for (i = 0; i < num_ios; i++) {
                bdt_info = cmgr->io_bdt_pool[i];
                if (bdt_info->bd_tbl) {
@@@ -119,8 -119,6 +119,8 @@@ free_cmd_pool
  
        for (i = 0; i < num_ios; i++) {
                io_req = &cmgr->cmds[i];
 +              kfree(io_req->sgl_task_params);
 +              kfree(io_req->task_params);
                /* Make sure we free per command sense buffer */
                if (io_req->sense_buffer)
                        dma_free_coherent(&qedf->pdev->dev,
@@@ -180,7 -178,7 +180,7 @@@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc
        spin_lock_init(&cmgr->lock);
  
        /*
 -       * Initialize list of qedf_ioreq.
 +       * Initialize I/O request fields.
         */
        xid = QEDF_MIN_XID;
  
                    GFP_KERNEL);
                if (!io_req->sense_buffer)
                        goto mem_err;
 +
 +              /* Allocate task parameters to pass to f/w init funcions */
 +              io_req->task_params = kzalloc(sizeof(*io_req->task_params),
 +                                            GFP_KERNEL);
 +              if (!io_req->task_params) {
 +                      QEDF_ERR(&(qedf->dbg_ctx),
 +                               "Failed to allocate task_params for xid=0x%x\n",
 +                               i);
 +                      goto mem_err;
 +              }
 +
 +              /*
 +               * Allocate scatter/gather list info to pass to f/w init
 +               * functions.
 +               */
 +              io_req->sgl_task_params = kzalloc(
 +                  sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
 +              if (!io_req->sgl_task_params) {
 +                      QEDF_ERR(&(qedf->dbg_ctx),
 +                               "Failed to allocate sgl_task_params for xid=0x%x\n",
 +                               i);
 +                      goto mem_err;
 +              }
        }
  
        /* Allocate pool of io_bdts - one for each qedf_ioreq */
                cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
                    GFP_KERNEL);
                if (!cmgr->io_bdt_pool[i]) {
 -                      QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
 -                                 "io_bdt_pool[%d].\n", i);
 +                      QEDF_WARN(&(qedf->dbg_ctx),
 +                                "Failed to alloc io_bdt_pool[%d].\n", i);
                        goto mem_err;
                }
        }
        for (i = 0; i < num_ios; i++) {
                bdt_info = cmgr->io_bdt_pool[i];
                bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
 -                  QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
 +                  QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
                    &bdt_info->bd_tbl_dma, GFP_KERNEL);
                if (!bdt_info->bd_tbl) {
 -                      QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
 -                                 "bdt_tbl[%d].\n", i);
 +                      QEDF_WARN(&(qedf->dbg_ctx),
 +                                "Failed to alloc bdt_tbl[%d].\n", i);
                        goto mem_err;
                }
        }
@@@ -343,7 -318,6 +343,7 @@@ struct qedf_ioreq *qedf_alloc_cmd(struc
        }
        bd_tbl->io_req = io_req;
        io_req->cmd_type = cmd_type;
 +      io_req->tm_flags = 0;
  
        /* Reset sequence offset data */
        io_req->rx_buf_off = 0;
@@@ -362,9 -336,10 +362,9 @@@ static void qedf_free_mp_resc(struct qe
  {
        struct qedf_mp_req *mp_req = &(io_req->mp_req);
        struct qedf_ctx *qedf = io_req->fcport->qedf;
 -      uint64_t sz = sizeof(struct fcoe_sge);
 +      uint64_t sz = sizeof(struct scsi_sge);
  
        /* clear tm flags */
 -      mp_req->tm_flags = 0;
        if (mp_req->mp_req_bd) {
                dma_free_coherent(&qedf->pdev->dev, sz,
                    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@@ -412,7 -387,7 +412,7 @@@ void qedf_release_cmd(struct kref *ref
  static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
        int bd_index)
  {
 -      struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
 +      struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
        int frag_size, sg_frags;
  
        sg_frags = 0;
                        frag_size = sg_len;
                bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
                bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
 -              bd[bd_index + sg_frags].size = (uint16_t)frag_size;
 +              bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
  
                addr += (u64)frag_size;
                sg_frags++;
@@@ -438,7 -413,7 +438,7 @@@ static int qedf_map_sg(struct qedf_iore
        struct Scsi_Host *host = sc->device->host;
        struct fc_lport *lport = shost_priv(host);
        struct qedf_ctx *qedf = lport_priv(lport);
 -      struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
 +      struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
        struct scatterlist *sg;
        int byte_count = 0;
        int sg_count = 0;
  
                bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
                bd[bd_count].sge_addr.hi = (addr >> 32);
 -              bd[bd_count].size = (u16)sg_len;
 +              bd[bd_count].sge_len = (u16)sg_len;
  
                return ++bd_count;
        }
                        sg_frags = 1;
                        bd[bd_count].sge_addr.lo = U64_LO(addr);
                        bd[bd_count].sge_addr.hi  = U64_HI(addr);
 -                      bd[bd_count].size = (uint16_t)sg_len;
 +                      bd[bd_count].sge_len = (uint16_t)sg_len;
                }
  
                bd_count += sg_frags;
  static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
  {
        struct scsi_cmnd *sc = io_req->sc_cmd;
 -      struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
 +      struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
        int bd_count;
  
        if (scsi_sg_count(sc)) {
        } else {
                bd_count = 0;
                bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
 -              bd[0].size = 0;
 +              bd[0].sge_len = 0;
        }
        io_req->bd_tbl->bd_valid = bd_count;
  
@@@ -554,223 -529,430 +554,223 @@@ static void qedf_build_fcp_cmnd(struct 
  
        /* 4 bytes: flag info */
        fcp_cmnd->fc_pri_ta = 0;
 -      fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
 +      fcp_cmnd->fc_tm_flags = io_req->tm_flags;
        fcp_cmnd->fc_flags = io_req->io_req_flags;
        fcp_cmnd->fc_cmdref = 0;
  
        /* Populate data direction */
 -      if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
 -              fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
 -      else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
 +      if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
                fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
 +      } else {
 +              if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
 +                      fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
 +              else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
 +                      fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
 +      }
  
        fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
  
        /* 16 bytes: CDB information */
 -      memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
 +      if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
 +              memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
  
        /* 4 bytes: FCP data length */
        fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
 -
  }
  
  static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 -      struct qedf_ioreq *io_req, u32 *ptu_invalidate,
 -      struct fcoe_task_context *task_ctx)
 +      struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
 +      struct fcoe_wqe *sqe)
  {
        enum fcoe_task_type task_type;
        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
        struct io_bdt *bd_tbl = io_req->bd_tbl;
 -      union fcoe_data_desc_ctx *data_desc;
 -      u32 *fcp_cmnd;
 +      u8 fcp_cmnd[32];
        u32 tmp_fcp_cmnd[8];
 -      int cnt, i;
 -      int bd_count;
 +      int bd_count = 0;
        struct qedf_ctx *qedf = fcport->qedf;
        uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
 -      u8 tmp_sgl_mode = 0;
 -      u8 mst_sgl_mode = 0;
 +      struct regpair sense_data_buffer_phys_addr;
 +      u32 tx_io_size = 0;
 +      u32 rx_io_size = 0;
 +      int i, cnt;
  
 -      memset(task_ctx, 0, sizeof(struct fcoe_task_context));
 +      /* Note init_initiator_rw_fcoe_task memsets the task context */
        io_req->task = task_ctx;
 +      memset(task_ctx, 0, sizeof(struct fcoe_task_context));
 +      memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
 +      memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
  
 -      if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
 -              task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
 -      else
 +      /* Set task type bassed on DMA directio of command */
 +      if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
                task_type = FCOE_TASK_TYPE_READ_INITIATOR;
 -
 -      /* Y Storm context */
 -      task_ctx->ystorm_st_context.expect_first_xfer = 1;
 -      task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
 -      /* Check if this is required */
 -      task_ctx->ystorm_st_context.ox_id = io_req->xid;
 -      task_ctx->ystorm_st_context.task_rety_identifier =
 -          io_req->task_retry_identifier;
 -
 -      /* T Storm ag context */
 -      SET_FIELD(task_ctx->tstorm_ag_context.flags0,
 -          TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
 -      task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
 -
 -      /* T Storm st context */
 -      SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
 -          FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
 -          1);
 -      task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
 -
 -      task_ctx->tstorm_st_context.read_only.dev_type =
 -          FCOE_TASK_DEV_TYPE_DISK;
 -      task_ctx->tstorm_st_context.read_only.conf_supported = 0;
 -      task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
 -
 -      /* Completion queue for response. */
 -      task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
 -      task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
 -          io_req->data_xfer_len;
 -      task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
 -          lport->e_d_tov;
 -
 -      task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
 -      io_req->fp_idx = cq_idx;
 -
 -      bd_count = bd_tbl->bd_valid;
 -      if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
 -              /* Setup WRITE task */
 -              struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
 -
 -              task_ctx->ystorm_st_context.task_type =
 -                  FCOE_TASK_TYPE_WRITE_INITIATOR;
 -              data_desc = &task_ctx->ystorm_st_context.data_desc;
 -
 -              if (io_req->use_slowpath) {
 -                      SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
 -                          YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
 -                          FCOE_SLOW_SGL);
 -                      data_desc->slow.base_sgl_addr.lo =
 -                          U64_LO(bd_tbl->bd_tbl_dma);
 -                      data_desc->slow.base_sgl_addr.hi =
 -                          U64_HI(bd_tbl->bd_tbl_dma);
 -                      data_desc->slow.remainder_num_sges = bd_count;
 -                      data_desc->slow.curr_sge_off = 0;
 -                      data_desc->slow.curr_sgl_index = 0;
 -                      qedf->slow_sge_ios++;
 -                      io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 -              } else {
 -                      SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
 -                          YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
 -                          (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
 -                          FCOE_MUL_FAST_SGES);
 -
 -                      if (bd_count == 1) {
 -                              data_desc->single_sge.sge_addr.lo =
 -                                  fcoe_bd_tbl->sge_addr.lo;
 -                              data_desc->single_sge.sge_addr.hi =
 -                                  fcoe_bd_tbl->sge_addr.hi;
 -                              data_desc->single_sge.size =
 -                                  fcoe_bd_tbl->size;
 -                              data_desc->single_sge.is_valid_sge = 0;
 -                              qedf->single_sge_ios++;
 -                              io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
 -                      } else {
 -                              data_desc->fast.sgl_start_addr.lo =
 -                                  U64_LO(bd_tbl->bd_tbl_dma);
 -                              data_desc->fast.sgl_start_addr.hi =
 -                                  U64_HI(bd_tbl->bd_tbl_dma);
 -                              data_desc->fast.sgl_byte_offset =
 -                                  data_desc->fast.sgl_start_addr.lo &
 -                                  (QEDF_PAGE_SIZE - 1);
 -                              if (data_desc->fast.sgl_byte_offset > 0)
 -                                      QEDF_ERR(&(qedf->dbg_ctx),
 -                                          "byte_offset=%u for xid=0x%x.\n",
 -                                          io_req->xid,
 -                                          data_desc->fast.sgl_byte_offset);
 -                              data_desc->fast.task_reuse_cnt =
 -                                  io_req->reuse_count;
 -                              io_req->reuse_count++;
 -                              if (io_req->reuse_count == QEDF_MAX_REUSE) {
 -                                      *ptu_invalidate = 1;
 -                                      io_req->reuse_count = 0;
 -                              }
 -                              qedf->fast_sge_ios++;
 -                              io_req->sge_type = QEDF_IOREQ_FAST_SGE;
 -                      }
 -              }
 -
 -              /* T Storm context */
 -              task_ctx->tstorm_st_context.read_only.task_type =
 -                  FCOE_TASK_TYPE_WRITE_INITIATOR;
 -
 -              /* M Storm context */
 -              tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
 -                  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
 -              SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
 -                  FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
 -                  tmp_sgl_mode);
 -
        } else {
 -              /* Setup READ task */
 -
 -              /* M Storm context */
 -              struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
 -
 -              data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
 -              task_ctx->mstorm_st_context.fp.data_2_trns_rem =
 -                  io_req->data_xfer_len;
 -
 -              if (io_req->use_slowpath) {
 -                      SET_FIELD(
 -                          task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
 -                          FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
 -                          FCOE_SLOW_SGL);
 -                      data_desc->slow.base_sgl_addr.lo =
 -                          U64_LO(bd_tbl->bd_tbl_dma);
 -                      data_desc->slow.base_sgl_addr.hi =
 -                          U64_HI(bd_tbl->bd_tbl_dma);
 -                      data_desc->slow.remainder_num_sges =
 -                          bd_count;
 -                      data_desc->slow.curr_sge_off = 0;
 -                      data_desc->slow.curr_sgl_index = 0;
 -                      qedf->slow_sge_ios++;
 -                      io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 +              if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
 +                      task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
 +                      tx_io_size = io_req->data_xfer_len;
                } else {
 -                      SET_FIELD(
 -                          task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
 -                          FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
 -                          (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
 -                          FCOE_MUL_FAST_SGES);
 -
 -                      if (bd_count == 1) {
 -                              data_desc->single_sge.sge_addr.lo =
 -                                  fcoe_bd_tbl->sge_addr.lo;
 -                              data_desc->single_sge.sge_addr.hi =
 -                                  fcoe_bd_tbl->sge_addr.hi;
 -                              data_desc->single_sge.size =
 -                                  fcoe_bd_tbl->size;
 -                              data_desc->single_sge.is_valid_sge = 0;
 -                              qedf->single_sge_ios++;
 -                              io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
 -                      } else {
 -                              data_desc->fast.sgl_start_addr.lo =
 -                                  U64_LO(bd_tbl->bd_tbl_dma);
 -                              data_desc->fast.sgl_start_addr.hi =
 -                                  U64_HI(bd_tbl->bd_tbl_dma);
 -                              data_desc->fast.sgl_byte_offset = 0;
 -                              data_desc->fast.task_reuse_cnt =
 -                                  io_req->reuse_count;
 -                              io_req->reuse_count++;
 -                              if (io_req->reuse_count == QEDF_MAX_REUSE) {
 -                                      *ptu_invalidate = 1;
 -                                      io_req->reuse_count = 0;
 -                              }
 -                              qedf->fast_sge_ios++;
 -                              io_req->sge_type = QEDF_IOREQ_FAST_SGE;
 -                      }
 +                      task_type = FCOE_TASK_TYPE_READ_INITIATOR;
 +                      rx_io_size = io_req->data_xfer_len;
                }
 -
 -              /* Y Storm context */
 -              task_ctx->ystorm_st_context.expect_first_xfer = 0;
 -              task_ctx->ystorm_st_context.task_type =
 -                  FCOE_TASK_TYPE_READ_INITIATOR;
 -
 -              /* T Storm context */
 -              task_ctx->tstorm_st_context.read_only.task_type =
 -                  FCOE_TASK_TYPE_READ_INITIATOR;
 -              mst_sgl_mode = GET_FIELD(
 -                  task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
 -                  FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
 -              SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
 -                  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
 -                  mst_sgl_mode);
        }
  
 +      /* Setup the fields for fcoe_task_params */
 +      io_req->task_params->context = task_ctx;
 +      io_req->task_params->sqe = sqe;
 +      io_req->task_params->task_type = task_type;
 +      io_req->task_params->tx_io_size = tx_io_size;
 +      io_req->task_params->rx_io_size = rx_io_size;
 +      io_req->task_params->conn_cid = fcport->fw_cid;
 +      io_req->task_params->itid = io_req->xid;
 +      io_req->task_params->cq_rss_number = cq_idx;
 +      io_req->task_params->is_tape_device = fcport->dev_type;
 +
 +      /* Fill in information for scatter/gather list */
 +      if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
 +              bd_count = bd_tbl->bd_valid;
 +              io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
 +              io_req->sgl_task_params->sgl_phys_addr.lo =
 +                      U64_LO(bd_tbl->bd_tbl_dma);
 +              io_req->sgl_task_params->sgl_phys_addr.hi =
 +                      U64_HI(bd_tbl->bd_tbl_dma);
 +              io_req->sgl_task_params->num_sges = bd_count;
 +              io_req->sgl_task_params->total_buffer_size =
 +                  scsi_bufflen(io_req->sc_cmd);
 +              io_req->sgl_task_params->small_mid_sge =
 +                      io_req->use_slowpath;
 +      }
 +
 +      /* Fill in physical address of sense buffer */
 +      sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
 +      sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
 +
        /* fill FCP_CMND IU */
 -      fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
 -      qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
 +      qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
  
        /* Swap fcp_cmnd since FC is big endian */
        cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
 -
        for (i = 0; i < cnt; i++) {
 -              *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
 -              fcp_cmnd++;
 +              tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
 +      }
 +      memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
 +
 +      init_initiator_rw_fcoe_task(io_req->task_params,
 +                                  io_req->sgl_task_params,
 +                                  sense_data_buffer_phys_addr,
 +                                  io_req->task_retry_identifier, fcp_cmnd);
 +
 +      /* Increment SGL type counters */
 +      if (bd_count == 1) {
 +              qedf->single_sge_ios++;
 +              io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
 +      } else if (io_req->use_slowpath) {
 +              qedf->slow_sge_ios++;
 +              io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 +      } else {
 +              qedf->fast_sge_ios++;
 +              io_req->sge_type = QEDF_IOREQ_FAST_SGE;
        }
 -
 -      /* M Storm context - Sense buffer */
 -      task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
 -              U64_LO(io_req->sense_buffer_dma);
 -      task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
 -              U64_HI(io_req->sense_buffer_dma);
  }
  
  void qedf_init_mp_task(struct qedf_ioreq *io_req,
 -      struct fcoe_task_context *task_ctx)
 +      struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
  {
        struct qedf_mp_req *mp_req = &(io_req->mp_req);
        struct qedf_rport *fcport = io_req->fcport;
        struct qedf_ctx *qedf = io_req->fcport->qedf;
        struct fc_frame_header *fc_hdr;
 -      enum fcoe_task_type task_type = 0;
 -      union fcoe_data_desc_ctx *data_desc;
 +      struct fcoe_tx_mid_path_params task_fc_hdr;
 +      struct scsi_sgl_task_params tx_sgl_task_params;
 +      struct scsi_sgl_task_params rx_sgl_task_params;
  
 -      QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
 -                 "for cmd_type = %d\n", io_req->cmd_type);
 +      QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 +                "Initializing MP task for cmd_type=%d\n",
 +                io_req->cmd_type);
  
        qedf->control_requests++;
  
 -      /* Obtain task_type */
 -      if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
 -          (io_req->cmd_type == QEDF_ELS)) {
 -              task_type = FCOE_TASK_TYPE_MIDPATH;
 -      } else if (io_req->cmd_type == QEDF_ABTS) {
 -              task_type = FCOE_TASK_TYPE_ABTS;
 -      }
 -
 +      memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 +      memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
        memset(task_ctx, 0, sizeof(struct fcoe_task_context));
 +      memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
  
        /* Setup the task from io_req for easy reference */
        io_req->task = task_ctx;
  
 -      QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
 -                 task_type);
 -
 -      /* YSTORM only */
 -      {
 -              /* Initialize YSTORM task context */
 -              struct fcoe_tx_mid_path_params *task_fc_hdr =
 -                  &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
 -              memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
 -              task_ctx->ystorm_st_context.task_rety_identifier =
 -                  io_req->task_retry_identifier;
 -
 -              /* Init SGL parameters */
 -              if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
 -                  (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
 -                      data_desc = &task_ctx->ystorm_st_context.data_desc;
 -                      data_desc->slow.base_sgl_addr.lo =
 -                          U64_LO(mp_req->mp_req_bd_dma);
 -                      data_desc->slow.base_sgl_addr.hi =
 -                          U64_HI(mp_req->mp_req_bd_dma);
 -                      data_desc->slow.remainder_num_sges = 1;
 -                      data_desc->slow.curr_sge_off = 0;
 -                      data_desc->slow.curr_sgl_index = 0;
 -              }
 -
 -              fc_hdr = &(mp_req->req_fc_hdr);
 -              if (task_type == FCOE_TASK_TYPE_MIDPATH) {
 -                      fc_hdr->fh_ox_id = io_req->xid;
 -                      fc_hdr->fh_rx_id = htons(0xffff);
 -              } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
 -                      fc_hdr->fh_rx_id = io_req->xid;
 -              }
 +      /* Setup the fields for fcoe_task_params */
 +      io_req->task_params->context = task_ctx;
 +      io_req->task_params->sqe = sqe;
 +      io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
 +      io_req->task_params->tx_io_size = io_req->data_xfer_len;
 +      /* rx_io_size tells the f/w how large a response buffer we have */
 +      io_req->task_params->rx_io_size = PAGE_SIZE;
 +      io_req->task_params->conn_cid = fcport->fw_cid;
 +      io_req->task_params->itid = io_req->xid;
 +      /* Return middle path commands on CQ 0 */
 +      io_req->task_params->cq_rss_number = 0;
 +      io_req->task_params->is_tape_device = fcport->dev_type;
 +
 +      fc_hdr = &(mp_req->req_fc_hdr);
 +      /* Set OX_ID and RX_ID based on driver task id */
 +      fc_hdr->fh_ox_id = io_req->xid;
 +      fc_hdr->fh_rx_id = htons(0xffff);
 +
 +      /* Set up FC header information */
 +      task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
 +      task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
 +      task_fc_hdr.type = fc_hdr->fh_type;
 +      task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
 +      task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
 +      task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
 +      task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
 +
 +      /* Set up s/g list parameters for request buffer */
 +      tx_sgl_task_params.sgl = mp_req->mp_req_bd;
 +      tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
 +      tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
 +      tx_sgl_task_params.num_sges = 1;
 +      /* Set PAGE_SIZE for now since sg element is that size ??? */
 +      tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
 +      tx_sgl_task_params.small_mid_sge = 0;
 +
 +      /* Set up s/g list parameters for request buffer */
 +      rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
 +      rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
 +      rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
 +      rx_sgl_task_params.num_sges = 1;
 +      /* Set PAGE_SIZE for now since sg element is that size ??? */
 +      rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
 +      rx_sgl_task_params.small_mid_sge = 0;
  
 -              /* Fill FC Header into middle path buffer */
 -              task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
 -              task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
 -              task_fc_hdr->type = fc_hdr->fh_type;
 -              task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
 -              task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
 -              task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
 -              task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
 -
 -              task_ctx->ystorm_st_context.data_2_trns_rem =
 -                  io_req->data_xfer_len;
 -              task_ctx->ystorm_st_context.task_type = task_type;
 -      }
 -
 -      /* TSTORM ONLY */
 -      {
 -              task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
 -              task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
 -              /* Always send middle-path repsonses on CQ #0 */
 -              task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
 -              io_req->fp_idx = 0;
 -              SET_FIELD(task_ctx->tstorm_ag_context.flags0,
 -                  TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
 -                  PROTOCOLID_FCOE);
 -              task_ctx->tstorm_st_context.read_only.task_type = task_type;
 -              SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
 -                  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
 -                  1);
 -              task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
 -      }
 -
 -      /* MSTORM only */
 -      {
 -              if (task_type == FCOE_TASK_TYPE_MIDPATH) {
 -                      /* Initialize task context */
 -                      data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
 -
 -                      /* Set cache sges address and length */
 -                      data_desc->slow.base_sgl_addr.lo =
 -                          U64_LO(mp_req->mp_resp_bd_dma);
 -                      data_desc->slow.base_sgl_addr.hi =
 -                          U64_HI(mp_req->mp_resp_bd_dma);
 -                      data_desc->slow.remainder_num_sges = 1;
 -                      data_desc->slow.curr_sge_off = 0;
 -                      data_desc->slow.curr_sgl_index = 0;
  
 -                      /*
 -                       * Also need to fil in non-fastpath response address
 -                       * for middle path commands.
 -                       */
 -                      task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
 -                          U64_LO(mp_req->mp_resp_bd_dma);
 -                      task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
 -                          U64_HI(mp_req->mp_resp_bd_dma);
 -              }
 -      }
 -
 -      /* USTORM ONLY */
 -      {
 -              task_ctx->ustorm_ag_context.global_cq_num = 0;
 -      }
 +      /*
 +       * Last arg is 0 as previous code did not set that we wanted the
 +       * fc header information.
 +       */
 +      init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
 +                                                   &task_fc_hdr,
 +                                                   &tx_sgl_task_params,
 +                                                   &rx_sgl_task_params, 0);
  
 -      /* I/O stats. Middle path commands always use slow SGEs */
 -      qedf->slow_sge_ios++;
 -      io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 +      /* Midpath requests always consume 1 SGE */
 +      qedf->single_sge_ios++;
  }
  
 -void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
 -      enum fcoe_task_type req_type, u32 offset)
 +/* Presumed that fcport->rport_lock is held */
 +u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
  {
 -      struct fcoe_wqe *sqe;
        uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
 +      u16 rval;
  
 -      sqe = &fcport->sq[fcport->sq_prod_idx];
 +      rval = fcport->sq_prod_idx;
  
 +      /* Adjust ring index */
        fcport->sq_prod_idx++;
        fcport->fw_sq_prod_idx++;
        if (fcport->sq_prod_idx == total_sqe)
                fcport->sq_prod_idx = 0;
  
 -      switch (req_type) {
 -      case FCOE_TASK_TYPE_WRITE_INITIATOR:
 -      case FCOE_TASK_TYPE_READ_INITIATOR:
 -              SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
 -              if (ptu_invalidate)
 -                      SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
 -              break;
 -      case FCOE_TASK_TYPE_MIDPATH:
 -              SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
 -              break;
 -      case FCOE_TASK_TYPE_ABTS:
 -              SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
 -                  SEND_FCOE_ABTS_REQUEST);
 -              break;
 -      case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
 -              SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
 -                   FCOE_EXCHANGE_CLEANUP);
 -              break;
 -      case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
 -              SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
 -                  FCOE_SEQUENCE_RECOVERY);
 -              /* NOTE: offset param only used for sequence recovery */
 -              sqe->additional_info_union.seq_rec_updated_offset = offset;
 -              break;
 -      case FCOE_TASK_TYPE_UNSOLICITED:
 -              break;
 -      default:
 -              break;
 -      }
 -
 -      sqe->task_id = xid;
 -
 -      /* Make sure SQ data is coherent */
 -      wmb();
 -
 +      return rval;
  }
  
  void qedf_ring_doorbell(struct qedf_rport *fcport)
@@@ -847,8 -1029,7 +847,8 @@@ int qedf_post_io_req(struct qedf_rport 
        struct fcoe_task_context *task_ctx;
        u16 xid;
        enum fcoe_task_type req_type = 0;
 -      u32 ptu_invalidate = 0;
 +      struct fcoe_wqe *sqe;
 +      u16 sqe_idx;
  
        /* Initialize rest of io_req fileds */
        io_req->data_xfer_len = scsi_bufflen(sc_cmd);
                return -EAGAIN;
        }
  
 +      if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
 +              QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
 +              kref_put(&io_req->refcount, qedf_release_cmd);
 +      }
 +
 +      /* Obtain free SQE */
 +      sqe_idx = qedf_get_sqe_idx(fcport);
 +      sqe = &fcport->sq[sqe_idx];
 +      memset(sqe, 0, sizeof(struct fcoe_wqe));
 +
        /* Get the task context */
        task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
        if (!task_ctx) {
                return -EINVAL;
        }
  
 -      qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
 -
 -      if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
 -              QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
 -              kref_put(&io_req->refcount, qedf_release_cmd);
 -      }
 -
 -      /* Obtain free SQ entry */
 -      qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
 +      qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
  
        /* Ring doorbell */
        qedf_ring_doorbell(fcport);
@@@ -1163,7 -1342,7 +1163,7 @@@ void qedf_scsi_completion(struct qedf_c
                } else {
                        refcount = kref_read(&io_req->refcount);
                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
-                           "%d:0:%d:%d xid=0x%0x op=0x%02x "
+                           "%d:0:%d:%lld xid=0x%0x op=0x%02x "
                            "lba=%02x%02x%02x%02x cdb_status=%d "
                            "fcp_resid=0x%x refcount=%d.\n",
                            qedf->lport->host->host_no, sc_cmd->device->id,
@@@ -1247,7 -1426,7 +1247,7 @@@ void qedf_scsi_done(struct qedf_ctx *qe
  
        sc_cmd->result = result << 16;
        refcount = kref_read(&io_req->refcount);
-       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
+       QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
            "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
            "allowed=%d retries=%d refcount=%d.\n",
            qedf->lport->host->host_no, sc_cmd->device->id,
@@@ -1482,8 -1661,6 +1482,8 @@@ int qedf_initiate_abts(struct qedf_iore
        u32 r_a_tov = 0;
        int rc = 0;
        unsigned long flags;
 +      struct fcoe_wqe *sqe;
 +      u16 sqe_idx;
  
        r_a_tov = rdata->r_a_tov;
        lport = qedf->lport;
  
        spin_lock_irqsave(&fcport->rport_lock, flags);
  
 -      /* Add ABTS to send queue */
 -      qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
 +      sqe_idx = qedf_get_sqe_idx(fcport);
 +      sqe = &fcport->sq[sqe_idx];
 +      memset(sqe, 0, sizeof(struct fcoe_wqe));
 +      io_req->task_params->sqe = sqe;
  
 -      /* Ring doorbell */
 +      init_initiator_abort_fcoe_task(io_req->task_params);
        qedf_ring_doorbell(fcport);
  
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@@ -1609,8 -1784,8 +1609,8 @@@ void qedf_process_abts_compl(struct qed
  int qedf_init_mp_req(struct qedf_ioreq *io_req)
  {
        struct qedf_mp_req *mp_req;
 -      struct fcoe_sge *mp_req_bd;
 -      struct fcoe_sge *mp_resp_bd;
 +      struct scsi_sge *mp_req_bd;
 +      struct scsi_sge *mp_resp_bd;
        struct qedf_ctx *qedf = io_req->fcport->qedf;
        dma_addr_t addr;
        uint64_t sz;
        }
  
        /* Allocate and map mp_req_bd and mp_resp_bd */
 -      sz = sizeof(struct fcoe_sge);
 +      sz = sizeof(struct scsi_sge);
        mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
            &mp_req->mp_req_bd_dma, GFP_KERNEL);
        if (!mp_req->mp_req_bd) {
        mp_req_bd = mp_req->mp_req_bd;
        mp_req_bd->sge_addr.lo = U64_LO(addr);
        mp_req_bd->sge_addr.hi = U64_HI(addr);
 -      mp_req_bd->size = QEDF_PAGE_SIZE;
 +      mp_req_bd->sge_len = QEDF_PAGE_SIZE;
  
        /*
         * MP buffer is either a task mgmt command or an ELS.
        addr = mp_req->resp_buf_dma;
        mp_resp_bd->sge_addr.lo = U64_LO(addr);
        mp_resp_bd->sge_addr.hi = U64_HI(addr);
 -      mp_resp_bd->size = QEDF_PAGE_SIZE;
 +      mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
  
        return 0;
  }
@@@ -1720,8 -1895,6 +1720,8 @@@ int qedf_initiate_cleanup(struct qedf_i
        int tmo = 0;
        int rc = SUCCESS;
        unsigned long flags;
 +      struct fcoe_wqe *sqe;
 +      u16 sqe_idx;
  
        fcport = io_req->fcport;
        if (!fcport) {
  
        init_completion(&io_req->tm_done);
  
 -      /* Obtain free SQ entry */
        spin_lock_irqsave(&fcport->rport_lock, flags);
 -      qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
  
 -      /* Ring doorbell */
 +      sqe_idx = qedf_get_sqe_idx(fcport);
 +      sqe = &fcport->sq[sqe_idx];
 +      memset(sqe, 0, sizeof(struct fcoe_wqe));
 +      io_req->task_params->sqe = sqe;
 +
 +      init_initiator_cleanup_fcoe_task(io_req->task_params);
        qedf_ring_doorbell(fcport);
 +
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
  
        tmo = wait_for_completion_timeout(&io_req->tm_done,
@@@ -1822,15 -1991,16 +1822,15 @@@ static int qedf_execute_tmf(struct qedf
        uint8_t tm_flags)
  {
        struct qedf_ioreq *io_req;
 -      struct qedf_mp_req *tm_req;
        struct fcoe_task_context *task;
 -      struct fc_frame_header *fc_hdr;
 -      struct fcp_cmnd *fcp_cmnd;
        struct qedf_ctx *qedf = fcport->qedf;
 +      struct fc_lport *lport = qedf->lport;
        int rc = 0;
        uint16_t xid;
 -      uint32_t sid, did;
        int tmo = 0;
        unsigned long flags;
 +      struct fcoe_wqe *sqe;
 +      u16 sqe_idx;
  
        if (!sc_cmd) {
                QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
        /* Set the return CPU to be the same as the request one */
        io_req->cpu = smp_processor_id();
  
 -      tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
 -
 -      rc = qedf_init_mp_req(io_req);
 -      if (rc == FAILED) {
 -              QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
 -                        "failed\n");
 -              kref_put(&io_req->refcount, qedf_release_cmd);
 -              goto reset_tmf_err;
 -      }
 -
        /* Set TM flags */
 -      io_req->io_req_flags = 0;
 -      tm_req->tm_flags = tm_flags;
 +      io_req->io_req_flags = QEDF_READ;
 +      io_req->data_xfer_len = 0;
 +      io_req->tm_flags = tm_flags;
  
        /* Default is to return a SCSI command when an error occurs */
        io_req->return_scsi_cmd_on_abts = true;
  
 -      /* Fill FCP_CMND */
 -      qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
 -      fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
 -      memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
 -      fcp_cmnd->fc_dl = 0;
 -
 -      /* Fill FC header */
 -      fc_hdr = &(tm_req->req_fc_hdr);
 -      sid = fcport->sid;
 -      did = fcport->rdata->ids.port_id;
 -      __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
 -                         FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
 -                         FC_FC_SEQ_INIT, 0);
        /* Obtain exchange id */
        xid = io_req->xid;
  
  
        /* Initialize task context for this IO request */
        task = qedf_get_task_mem(&qedf->tasks, xid);
 -      qedf_init_mp_task(io_req, task);
  
        init_completion(&io_req->tm_done);
  
 -      /* Obtain free SQ entry */
        spin_lock_irqsave(&fcport->rport_lock, flags);
 -      qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
  
 -      /* Ring doorbell */
 +      sqe_idx = qedf_get_sqe_idx(fcport);
 +      sqe = &fcport->sq[sqe_idx];
 +      memset(sqe, 0, sizeof(struct fcoe_wqe));
 +
 +      qedf_init_task(fcport, lport, io_req, task, sqe);
        qedf_ring_doorbell(fcport);
 +
        spin_unlock_irqrestore(&fcport->rport_lock, flags);
  
        tmo = wait_for_completion_timeout(&io_req->tm_done,
@@@ -1972,6 -2162,14 +1972,6 @@@ void qedf_process_tmf_compl(struct qedf
        struct qedf_ioreq *io_req)
  {
        struct fcoe_cqe_rsp_info *fcp_rsp;
 -      struct fcoe_cqe_midpath_info *mp_info;
 -
 -
 -      /* Get TMF response length from CQE */
 -      mp_info = &cqe->cqe_info.midpath_info;
 -      io_req->mp_req.resp_len = mp_info->data_placement_size;
 -      QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
 -          "Response len is %d.\n", io_req->mp_req.resp_len);
  
        fcp_rsp = &cqe->cqe_info.rsp_info;
        qedf_parse_fcp_rsp(io_req, fcp_rsp);
index eca40b0513a3a997e9f667c72ff3d2520011a292,2bce3efc66a4b4bda8bae40768ca3e961a6d33b0..d6978cbc56f0586aa8a075191433184c50c93b01
@@@ -14,8 -14,6 +14,8 @@@
  #include "qedi.h"
  #include "qedi_iscsi.h"
  #include "qedi_gbl.h"
 +#include "qedi_fw_iscsi.h"
 +#include "qedi_fw_scsi.h"
  
  static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
                               struct iscsi_task *mtask);
@@@ -55,8 -53,8 +55,8 @@@ static void qedi_process_logout_resp(st
        resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
        resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
  
 -      resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
 -      resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
 +      resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
 +      resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
  
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
                  "Freeing tid=0x%x for cid=0x%x\n",
@@@ -977,6 -975,81 +977,6 @@@ exit_fp_process
        return;
  }
  
 -static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
 -                         u16 tid, uint16_t ptu_invalidate, int is_cleanup)
 -{
 -      struct iscsi_wqe *wqe;
 -      struct iscsi_wqe_field *cont_field;
 -      struct qedi_endpoint *ep;
 -      struct scsi_cmnd *sc = task->sc;
 -      struct iscsi_login_req *login_hdr;
 -      struct qedi_cmd *cmd = task->dd_data;
 -
 -      login_hdr = (struct iscsi_login_req *)task->hdr;
 -      ep = qedi_conn->ep;
 -      wqe = &ep->sq[ep->sq_prod_idx];
 -
 -      memset(wqe, 0, sizeof(*wqe));
 -
 -      ep->sq_prod_idx++;
 -      ep->fw_sq_prod_idx++;
 -      if (ep->sq_prod_idx == QEDI_SQ_SIZE)
 -              ep->sq_prod_idx = 0;
 -
 -      if (is_cleanup) {
 -              SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
 -                        ISCSI_WQE_TYPE_TASK_CLEANUP);
 -              wqe->task_id = tid;
 -              return;
 -      }
 -
 -      if (ptu_invalidate) {
 -              SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
 -                        ISCSI_WQE_SET_PTU_INVALIDATE);
 -      }
 -
 -      cont_field = &wqe->cont_prevtid_union.cont_field;
 -
 -      switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
 -      case ISCSI_OP_LOGIN:
 -      case ISCSI_OP_TEXT:
 -              SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
 -                        ISCSI_WQE_TYPE_MIDDLE_PATH);
 -              SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
 -                        1);
 -              cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
 -              break;
 -      case ISCSI_OP_LOGOUT:
 -      case ISCSI_OP_NOOP_OUT:
 -      case ISCSI_OP_SCSI_TMFUNC:
 -               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
 -                         ISCSI_WQE_TYPE_NORMAL);
 -              break;
 -      default:
 -              if (!sc)
 -                      break;
 -
 -              SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
 -                        ISCSI_WQE_TYPE_NORMAL);
 -              cont_field->contlen_cdbsize_field =
 -                              (sc->sc_data_direction == DMA_TO_DEVICE) ?
 -                              scsi_bufflen(sc) : 0;
 -              if (cmd->use_slowpath)
 -                      SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
 -              else
 -                      SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
 -                                (sc->sc_data_direction ==
 -                                 DMA_TO_DEVICE) ?
 -                                min((u16)QEDI_FAST_SGE_COUNT,
 -                                    (u16)cmd->io_tbl.sge_valid) : 0);
 -              break;
 -      }
 -
 -      wqe->task_id = tid;
 -      /* Make sure SQ data is coherent */
 -      wmb();
 -}
 -
  static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
  {
        struct iscsi_db_data dbell = { 0 };
                  qedi_conn->iscsi_conn_id);
  }
  
 +static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
 +{
 +      struct qedi_endpoint *ep;
 +      u16 rval;
 +
 +      ep = qedi_conn->ep;
 +      rval = ep->sq_prod_idx;
 +
 +      /* Increament SQ index */
 +      ep->sq_prod_idx++;
 +      ep->fw_sq_prod_idx++;
 +      if (ep->sq_prod_idx == QEDI_SQ_SIZE)
 +              ep->sq_prod_idx = 0;
 +
 +      return rval;
 +}
 +
  int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
                          struct iscsi_task *task)
  {
 -      struct qedi_ctx *qedi = qedi_conn->qedi;
 +      struct iscsi_login_req_hdr login_req_pdu_header;
 +      struct scsi_sgl_task_params tx_sgl_task_params;
 +      struct scsi_sgl_task_params rx_sgl_task_params;
 +      struct iscsi_task_params task_params;
        struct iscsi_task_context *fw_task_ctx;
 +      struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_login_req *login_hdr;
 -      struct iscsi_login_req_hdr *fw_login_req = NULL;
 -      struct iscsi_cached_sge_ctx *cached_sge = NULL;
 -      struct iscsi_sge *single_sge = NULL;
 -      struct iscsi_sge *req_sge = NULL;
 -      struct iscsi_sge *resp_sge = NULL;
 +      struct scsi_sge *req_sge = NULL;
 +      struct scsi_sge *resp_sge = NULL;
        struct qedi_cmd *qedi_cmd;
 -      s16 ptu_invalidate = 0;
 +      struct qedi_endpoint *ep;
        s16 tid = 0;
 +      u16 sq_idx = 0;
 +      int rval = 0;
  
 -      req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 -      resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 +      req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +      resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
 +      ep = qedi_conn->ep;
        login_hdr = (struct iscsi_login_req *)task->hdr;
  
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
                return -ENOMEM;
  
 -      fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 +      fw_task_ctx =
 +           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
 -      /* Ystorm context */
 -      fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
 -      fw_login_req->opcode = login_hdr->opcode;
 -      fw_login_req->version_min = login_hdr->min_version;
 -      fw_login_req->version_max = login_hdr->max_version;
 -      fw_login_req->flags_attr = login_hdr->flags;
 -      fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
 -      fw_login_req->isid_d = *((u32 *)login_hdr->isid);
 -      fw_login_req->tsih = login_hdr->tsih;
 -      qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 -      fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
 -      fw_login_req->cid = qedi_conn->iscsi_conn_id;
 -      fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
 -      fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
 -      fw_login_req->exp_stat_sn = 0;
 -
 -      if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
 -              ptu_invalidate = 1;
 -              qedi->tid_reuse_count[tid] = 0;
 -      }
 +      memset(&task_params, 0, sizeof(task_params));
 +      memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
 +      memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
 +      memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
 +      /* Update header info */
 +      login_req_pdu_header.opcode = login_hdr->opcode;
 +      login_req_pdu_header.version_min = login_hdr->min_version;
 +      login_req_pdu_header.version_max = login_hdr->max_version;
 +      login_req_pdu_header.flags_attr = login_hdr->flags;
 +      login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
 +      login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
 +
 +      login_req_pdu_header.tsih = login_hdr->tsih;
 +      login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
  
 -      fw_task_ctx->ystorm_st_context.state.reuse_count =
 -                                              qedi->tid_reuse_count[tid];
 -      fw_task_ctx->mstorm_st_context.reuse_count =
 -                                              qedi->tid_reuse_count[tid]++;
 -      cached_sge =
 -             &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
 -      cached_sge->sge.sge_len = req_sge->sge_len;
 -      cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
 -      cached_sge->sge.sge_addr.hi =
 -                           (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
 -
 -      /* Mstorm context */
 -      single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
 -      fw_task_ctx->mstorm_st_context.task_type = 0x2;
 -      fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
 -      single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
 -      single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
 -      single_sge->sge_len = resp_sge->sge_len;
 -
 -      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                ISCSI_MFLAGS_SINGLE_SGE, 1);
 -      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                ISCSI_MFLAGS_SLOW_IO, 0);
 -      fw_task_ctx->mstorm_st_context.sgl_size = 1;
 -      fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
 -
 -      /* Ustorm context */
 -      fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
 -      fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
 -                                              ntoh24(login_hdr->dlength);
 -      fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
 -      fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
 -      fw_task_ctx->ustorm_st_context.task_type = 0x2;
 -      fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
 -      fw_task_ctx->ustorm_ag_context.exp_data_acked =
 -                                               ntoh24(login_hdr->dlength);
 -      SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
 -                USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
 -                USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
 +      qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 +      login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
 +      login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
 +      login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
 +      login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
 +      login_req_pdu_header.exp_stat_sn = 0;
 +
 +      /* Fill tx AHS and rx buffer */
 +      tx_sgl_task_params.sgl =
 +                             (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +      tx_sgl_task_params.sgl_phys_addr.lo =
 +                                       (u32)(qedi_conn->gen_pdu.req_dma_addr);
 +      tx_sgl_task_params.sgl_phys_addr.hi =
 +                            (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
 +      tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
 +      tx_sgl_task_params.num_sges = 1;
 +
 +      rx_sgl_task_params.sgl =
 +                            (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 +      rx_sgl_task_params.sgl_phys_addr.lo =
 +                                      (u32)(qedi_conn->gen_pdu.resp_dma_addr);
 +      rx_sgl_task_params.sgl_phys_addr.hi =
 +                           (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
 +      rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
 +      rx_sgl_task_params.num_sges = 1;
 +
 +      /* Fill fw input params */
 +      task_params.context = fw_task_ctx;
 +      task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
 +      task_params.itid = tid;
 +      task_params.cq_rss_number = 0;
 +      task_params.tx_io_size = ntoh24(login_hdr->dlength);
 +      task_params.rx_io_size = resp_sge->sge_len;
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +      task_params.sqe = &ep->sq[sq_idx];
 +
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +      rval = init_initiator_login_request_task(&task_params,
 +                                               &login_req_pdu_header,
 +                                               &tx_sgl_task_params,
 +                                               &rx_sgl_task_params);
 +      if (rval)
 +              return -1;
  
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
  
 -      qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
        return 0;
  }
  int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
                           struct iscsi_task *task)
  {
 -      struct qedi_ctx *qedi = qedi_conn->qedi;
 -      struct iscsi_logout_req_hdr *fw_logout_req = NULL;
 -      struct iscsi_task_context *fw_task_ctx = NULL;
 +      struct iscsi_logout_req_hdr logout_pdu_header;
 +      struct scsi_sgl_task_params tx_sgl_task_params;
 +      struct scsi_sgl_task_params rx_sgl_task_params;
 +      struct iscsi_task_params task_params;
 +      struct iscsi_task_context *fw_task_ctx;
        struct iscsi_logout *logout_hdr = NULL;
 -      struct qedi_cmd *qedi_cmd = NULL;
 -      s16  tid = 0;
 -      s16 ptu_invalidate = 0;
 +      struct qedi_ctx *qedi = qedi_conn->qedi;
 +      struct qedi_cmd *qedi_cmd;
 +      struct qedi_endpoint *ep;
 +      s16 tid = 0;
 +      u16 sq_idx = 0;
 +      int rval = 0;
  
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
        logout_hdr = (struct iscsi_logout *)task->hdr;
 +      ep = qedi_conn->ep;
  
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
                return -ENOMEM;
  
 -      fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 -
 +      fw_task_ctx =
 +           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +
        qedi_cmd->task_id = tid;
  
 -      /* Ystorm context */
 -      fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
 -      fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
 -      fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
 -      qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 -      fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
 -      fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
 -      fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
 +      memset(&task_params, 0, sizeof(task_params));
 +      memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
 +      memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
 +      memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
  
 -      if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
 -              ptu_invalidate = 1;
 -              qedi->tid_reuse_count[tid] = 0;
 -      }
 -      fw_task_ctx->ystorm_st_context.state.reuse_count =
 -                                                qedi->tid_reuse_count[tid];
 -      fw_task_ctx->mstorm_st_context.reuse_count =
 -                                              qedi->tid_reuse_count[tid]++;
 -      fw_logout_req->cid = qedi_conn->iscsi_conn_id;
 -      fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
 -
 -      /* Mstorm context */
 -      fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
 -      fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
 -
 -      /* Ustorm context */
 -      fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
 -      fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
 -      fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
 -      fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
 -      fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
 -
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
 -                USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                ISCSI_REG1_NUM_FAST_SGES, 0);
 -
 -      fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
 -      SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
 -                USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 +      /* Update header info */
 +      logout_pdu_header.opcode = logout_hdr->opcode;
 +      logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
 +      qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 +      logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
 +      logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
 +      logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
 +      logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
 +
 +      /* Fill fw input params */
 +      task_params.context = fw_task_ctx;
 +      task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
 +      task_params.itid = tid;
 +      task_params.cq_rss_number = 0;
 +      task_params.tx_io_size = 0;
 +      task_params.rx_io_size = 0;
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +      task_params.sqe = &ep->sq[sq_idx];
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +
 +      rval = init_initiator_logout_request_task(&task_params,
 +                                                &logout_pdu_header,
 +                                                NULL, NULL);
 +      if (rval)
 +              return -1;
  
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
  
 -      qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
 -
        return 0;
  }
  
@@@ -1404,9 -1461,9 +1404,9 @@@ static void qedi_tmf_work(struct work_s
                  get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
                  qedi_conn->iscsi_conn_id);
  
-       if (do_not_recover) {
+       if (qedi_do_not_recover) {
                QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
-                        do_not_recover);
+                        qedi_do_not_recover);
                goto abort_ret;
        }
  
@@@ -1476,46 -1533,47 +1476,46 @@@ ldel_exit
  static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
                               struct iscsi_task *mtask)
  {
 -      struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 +      struct iscsi_tmf_request_hdr tmf_pdu_header;
 +      struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_task_context *fw_task_ctx;
 -      struct iscsi_tmf_request_hdr *fw_tmf_request;
 -      struct iscsi_sge *single_sge;
 -      struct qedi_cmd *qedi_cmd;
 -      struct qedi_cmd *cmd;
 +      struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_task *ctask;
        struct iscsi_tm *tmf_hdr;
 -      struct iscsi_sge *req_sge;
 -      struct iscsi_sge *resp_sge;
 -      u32 lun[2];
 -      s16 tid = 0, ptu_invalidate = 0;
 +      struct qedi_cmd *qedi_cmd;
 +      struct qedi_cmd *cmd;
 +      struct qedi_endpoint *ep;
 +      u32 scsi_lun[2];
 +      s16 tid = 0;
 +      u16 sq_idx = 0;
 +      int rval = 0;
  
 -      req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 -      resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 -      qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
        tmf_hdr = (struct iscsi_tm *)mtask->hdr;
 +      qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
 +      ep = qedi_conn->ep;
  
 -      tid = qedi_cmd->task_id;
 -      qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
 +      tid = qedi_get_task_idx(qedi);
 +      if (tid == -1)
 +              return -ENOMEM;
  
 -      fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 +      fw_task_ctx =
 +           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  
 -      fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
 -      fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
 -      fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
 +      qedi_cmd->task_id = tid;
  
 -      memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
 -      fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
 -      fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
 +      memset(&task_params, 0, sizeof(task_params));
 +      memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
  
 -      if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
 -              ptu_invalidate = 1;
 -              qedi->tid_reuse_count[tid] = 0;
 -      }
 -      fw_task_ctx->ystorm_st_context.state.reuse_count =
 -                                              qedi->tid_reuse_count[tid];
 -      fw_task_ctx->mstorm_st_context.reuse_count =
 -                                              qedi->tid_reuse_count[tid]++;
 +      /* Update header info */
 +      qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
 +      tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
 +      tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
 +
 +      memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
 +      tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
 +      tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  
        if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
             ISCSI_TM_FUNC_ABORT_TASK) {
                        return 0;
                }
                cmd = (struct qedi_cmd *)ctask->dd_data;
 -              fw_tmf_request->rtt =
 +              tmf_pdu_header.rtt =
                                qedi_set_itt(cmd->task_id,
                                             get_itt(tmf_hdr->rtt));
        } else {
 -              fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
 +              tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
        }
  
 -      fw_tmf_request->opcode = tmf_hdr->opcode;
 -      fw_tmf_request->function = tmf_hdr->flags;
 -      fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
 -      fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
 -
 -      single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
 -      fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
 -      fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
 -      single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
 -      single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
 -      single_sge->sge_len = resp_sge->sge_len;
 -
 -      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                ISCSI_MFLAGS_SINGLE_SGE, 1);
 -      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                ISCSI_MFLAGS_SLOW_IO, 0);
 -      fw_task_ctx->mstorm_st_context.sgl_size = 1;
 -      fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
 -
 -      /* Ustorm context */
 -      fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
 -      fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
 -      fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
 -      fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
 -      fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
 -
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
 -                USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                ISCSI_REG1_NUM_FAST_SGES, 0);
 -
 -      fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
 -      SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
 -                USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 -      fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
 -      fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
 +      tmf_pdu_header.opcode = tmf_hdr->opcode;
 +      tmf_pdu_header.function = tmf_hdr->flags;
 +      tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
 +      tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
  
 -      QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
 -                "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
 -                tid,  mtask->itt, qedi_conn->iscsi_conn_id);
 +      /* Fill fw input params */
 +      task_params.context = fw_task_ctx;
 +      task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
 +      task_params.itid = tid;
 +      task_params.cq_rss_number = 0;
 +      task_params.tx_io_size = 0;
 +      task_params.rx_io_size = 0;
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +      task_params.sqe = &ep->sq[sq_idx];
 +
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +      rval = init_initiator_tmf_request_task(&task_params,
 +                                             &tmf_pdu_header);
 +      if (rval)
 +              return -1;
  
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
  
 -      qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
        return 0;
  }
@@@ -1611,98 -1689,101 +1611,98 @@@ int qedi_iscsi_abort_work(struct qedi_c
  int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
                         struct iscsi_task *task)
  {
 -      struct qedi_ctx *qedi = qedi_conn->qedi;
 +      struct iscsi_text_request_hdr text_request_pdu_header;
 +      struct scsi_sgl_task_params tx_sgl_task_params;
 +      struct scsi_sgl_task_params rx_sgl_task_params;
 +      struct iscsi_task_params task_params;
        struct iscsi_task_context *fw_task_ctx;
 -      struct iscsi_text_request_hdr *fw_text_request;
 -      struct iscsi_cached_sge_ctx *cached_sge;
 -      struct iscsi_sge *single_sge;
 -      struct qedi_cmd *qedi_cmd;
 -      /* For 6.5 hdr iscsi_hdr */
 +      struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_text *text_hdr;
 -      struct iscsi_sge *req_sge;
 -      struct iscsi_sge *resp_sge;
 -      s16 ptu_invalidate = 0;
 +      struct scsi_sge *req_sge = NULL;
 +      struct scsi_sge *resp_sge = NULL;
 +      struct qedi_cmd *qedi_cmd;
 +      struct qedi_endpoint *ep;
        s16 tid = 0;
 +      u16 sq_idx = 0;
 +      int rval = 0;
  
 -      req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 -      resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 +      req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +      resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
        text_hdr = (struct iscsi_text *)task->hdr;
 +      ep = qedi_conn->ep;
  
        tid = qedi_get_task_idx(qedi);
        if (tid == -1)
                return -ENOMEM;
  
 -      fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 +      fw_task_ctx =
 +           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
 -      /* Ystorm context */
 -      fw_text_request =
 -                      &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
 -      fw_text_request->opcode = text_hdr->opcode;
 -      fw_text_request->flags_attr = text_hdr->flags;
 +      memset(&task_params, 0, sizeof(task_params));
 +      memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
 +      memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
 +      memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
 +
 +      /* Update header info */
 +      text_request_pdu_header.opcode = text_hdr->opcode;
 +      text_request_pdu_header.flags_attr = text_hdr->flags;
  
        qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 -      fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
 -      fw_text_request->ttt = text_hdr->ttt;
 -      fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
 -      fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
 -      fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
 -
 -      if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
 -              ptu_invalidate = 1;
 -              qedi->tid_reuse_count[tid] = 0;
 -      }
 -      fw_task_ctx->ystorm_st_context.state.reuse_count =
 -                                                   qedi->tid_reuse_count[tid];
 -      fw_task_ctx->mstorm_st_context.reuse_count =
 -                                                 qedi->tid_reuse_count[tid]++;
 -
 -      cached_sge =
 -             &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
 -      cached_sge->sge.sge_len = req_sge->sge_len;
 -      cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
 -      cached_sge->sge.sge_addr.hi =
 +      text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
 +      text_request_pdu_header.ttt = text_hdr->ttt;
 +      text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
 +      text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
 +      text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
 +
 +      /* Fill tx AHS and rx buffer */
 +      tx_sgl_task_params.sgl =
 +                             (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +      tx_sgl_task_params.sgl_phys_addr.lo =
 +                                       (u32)(qedi_conn->gen_pdu.req_dma_addr);
 +      tx_sgl_task_params.sgl_phys_addr.hi =
                              (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
 +      tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
 +      tx_sgl_task_params.num_sges = 1;
 +
 +      rx_sgl_task_params.sgl =
 +                            (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 +      rx_sgl_task_params.sgl_phys_addr.lo =
 +                                      (u32)(qedi_conn->gen_pdu.resp_dma_addr);
 +      rx_sgl_task_params.sgl_phys_addr.hi =
 +                           (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
 +      rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
 +      rx_sgl_task_params.num_sges = 1;
 +
 +      /* Fill fw input params */
 +      task_params.context = fw_task_ctx;
 +      task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
 +      task_params.itid = tid;
 +      task_params.cq_rss_number = 0;
 +      task_params.tx_io_size = ntoh24(text_hdr->dlength);
 +      task_params.rx_io_size = resp_sge->sge_len;
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +      task_params.sqe = &ep->sq[sq_idx];
 +
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +      rval = init_initiator_text_request_task(&task_params,
 +                                              &text_request_pdu_header,
 +                                              &tx_sgl_task_params,
 +                                              &rx_sgl_task_params);
 +      if (rval)
 +              return -1;
  
 -      /* Mstorm context */
 -      single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
 -      fw_task_ctx->mstorm_st_context.task_type = 0x2;
 -      fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
 -      single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
 -      single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
 -      single_sge->sge_len = resp_sge->sge_len;
 -
 -      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                ISCSI_MFLAGS_SINGLE_SGE, 1);
 -      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                ISCSI_MFLAGS_SLOW_IO, 0);
 -      fw_task_ctx->mstorm_st_context.sgl_size = 1;
 -      fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
 -
 -      /* Ustorm context */
 -      fw_task_ctx->ustorm_ag_context.exp_data_acked =
 -                                                    ntoh24(text_hdr->dlength);
 -      fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
 -      fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
 -                                                    ntoh24(text_hdr->dlength);
 -      fw_task_ctx->ustorm_st_context.exp_data_sn =
 -                                            be32_to_cpu(text_hdr->exp_statsn);
 -      fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
 -      fw_task_ctx->ustorm_st_context.task_type = 0x2;
 -      fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
 -      SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
 -                USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 -
 -      /*  Add command in active command list */
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
        qedi_cmd->io_cmd_in_list = true;
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
  
 -      qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
 -
        return 0;
  }
  
@@@ -1710,62 -1791,58 +1710,62 @@@ int qedi_send_iscsi_nopout(struct qedi_
                           struct iscsi_task *task,
                           char *datap, int data_len, int unsol)
  {
 +      struct iscsi_nop_out_hdr nop_out_pdu_header;
 +      struct scsi_sgl_task_params tx_sgl_task_params;
 +      struct scsi_sgl_task_params rx_sgl_task_params;
 +      struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_task_context *fw_task_ctx;
 -      struct iscsi_nop_out_hdr *fw_nop_out;
 -      struct qedi_cmd *qedi_cmd;
 -      /* For 6.5 hdr iscsi_hdr */
        struct iscsi_nopout *nopout_hdr;
 -      struct iscsi_cached_sge_ctx *cached_sge;
 -      struct iscsi_sge *single_sge;
 -      struct iscsi_sge *req_sge;
 -      struct iscsi_sge *resp_sge;
 -      u32 lun[2];
 -      s16 ptu_invalidate = 0;
 +      struct scsi_sge *req_sge = NULL;
 +      struct scsi_sge *resp_sge = NULL;
 +      struct qedi_cmd *qedi_cmd;
 +      struct qedi_endpoint *ep;
 +      u32 scsi_lun[2];
        s16 tid = 0;
 +      u16 sq_idx = 0;
 +      int rval = 0;
  
 -      req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 -      resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 +      req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +      resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
        qedi_cmd = (struct qedi_cmd *)task->dd_data;
        nopout_hdr = (struct iscsi_nopout *)task->hdr;
 +      ep = qedi_conn->ep;
  
        tid = qedi_get_task_idx(qedi);
 -      if (tid == -1) {
 -              QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
 +      if (tid == -1)
                return -ENOMEM;
 -      }
 -
 -      fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
  
 +      fw_task_ctx =
 +           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +
        qedi_cmd->task_id = tid;
  
 -      /* Ystorm context */
 -      fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
 -      SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
 -      SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
 +      memset(&task_params, 0, sizeof(task_params));
 +      memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
 +      memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
 +      memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
 +
 +      /* Update header info */
 +      nop_out_pdu_header.opcode = nopout_hdr->opcode;
 +      SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
 +      SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
  
 -      memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
 -      fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
 -      fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
 +      memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
 +      nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
 +      nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
 +      nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
 +      nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
  
        qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
  
        if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
 -              fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
 -              fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
 -              fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
 -              fw_task_ctx->ystorm_st_context.state.local_comp = 1;
 -              SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
 -                        USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
 +              nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
 +              nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
        } else {
 -              fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
 -              fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
 -              fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
 +              nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
 +              nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
  
                spin_lock(&qedi_conn->list_lock);
                list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
                spin_unlock(&qedi_conn->list_lock);
        }
  
 -      fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
 -      fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
 -      fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
 -
 -      cached_sge =
 -             &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
 -      cached_sge->sge.sge_len = req_sge->sge_len;
 -      cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
 -      cached_sge->sge.sge_addr.hi =
 -                      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
 -
 -      /* Mstorm context */
 -      fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
 -      fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
 -
 -      single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
 -      single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
 -      single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
 -      single_sge->sge_len = resp_sge->sge_len;
 -      fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
 -
 -      if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
 -              ptu_invalidate = 1;
 -              qedi->tid_reuse_count[tid] = 0;
 -      }
 -      fw_task_ctx->ystorm_st_context.state.reuse_count =
 -                                              qedi->tid_reuse_count[tid];
 -      fw_task_ctx->mstorm_st_context.reuse_count =
 -                                              qedi->tid_reuse_count[tid]++;
 -      /* Ustorm context */
 -      fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
 -      fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
 -      fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
 -      fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
 -      fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
 -
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                ISCSI_REG1_NUM_FAST_SGES, 0);
 -
 -      fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
 -      SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
 -                USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 -
 -      fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
 -      fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
 -
 -      qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
 +      /* Fill tx AHS and rx buffer */
 +      if (data_len) {
 +              tx_sgl_task_params.sgl =
 +                             (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +              tx_sgl_task_params.sgl_phys_addr.lo =
 +                                       (u32)(qedi_conn->gen_pdu.req_dma_addr);
 +              tx_sgl_task_params.sgl_phys_addr.hi =
 +                            (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
 +              tx_sgl_task_params.total_buffer_size = data_len;
 +              tx_sgl_task_params.num_sges = 1;
 +
 +              rx_sgl_task_params.sgl =
 +                            (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 +              rx_sgl_task_params.sgl_phys_addr.lo =
 +                                      (u32)(qedi_conn->gen_pdu.resp_dma_addr);
 +              rx_sgl_task_params.sgl_phys_addr.hi =
 +                           (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
 +              rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
 +              rx_sgl_task_params.num_sges = 1;
 +      }
 +
 +      /* Fill fw input params */
 +      task_params.context = fw_task_ctx;
 +      task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
 +      task_params.itid = tid;
 +      task_params.cq_rss_number = 0;
 +      task_params.tx_io_size = data_len;
 +      task_params.rx_io_size = resp_sge->sge_len;
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +      task_params.sqe = &ep->sq[sq_idx];
 +
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +      rval = init_initiator_nop_out_task(&task_params,
 +                                         &nop_out_pdu_header,
 +                                         &tx_sgl_task_params,
 +                                         &rx_sgl_task_params);
 +      if (rval)
 +              return -1;
 +
        qedi_ring_doorbell(qedi_conn);
        return 0;
  }
  static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
                         int bd_index)
  {
 -      struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
 +      struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
        int frag_size, sg_frags;
  
        sg_frags = 0;
  static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
  {
        struct scsi_cmnd *sc = cmd->scsi_cmd;
 -      struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
 +      struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
        struct scatterlist *sg;
        int byte_count = 0;
        int bd_count = 0;
@@@ -1956,7 -2040,7 +1956,7 @@@ static void qedi_iscsi_map_sg_list(stru
                if (bd_count == 0)
                        return;
        } else {
 -              struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
 +              struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
  
                bd[0].sge_addr.lo = 0;
                bd[0].sge_addr.hi = 0;
@@@ -2052,182 -2136,244 +2052,182 @@@ int qedi_iscsi_send_ioreq(struct iscsi_
        struct qedi_conn *qedi_conn = conn->dd_data;
        struct qedi_cmd *cmd = task->dd_data;
        struct scsi_cmnd *sc = task->sc;
 +      struct iscsi_cmd_hdr cmd_pdu_header;
 +      struct scsi_sgl_task_params tx_sgl_task_params;
 +      struct scsi_sgl_task_params rx_sgl_task_params;
 +      struct scsi_sgl_task_params *prx_sgl = NULL;
 +      struct scsi_sgl_task_params *ptx_sgl = NULL;
 +      struct iscsi_task_params task_params;
 +      struct iscsi_conn_params conn_params;
 +      struct scsi_initiator_cmd_params cmd_params;
        struct iscsi_task_context *fw_task_ctx;
 -      struct iscsi_cached_sge_ctx *cached_sge;
 -      struct iscsi_phys_sgl_ctx *phys_sgl;
 -      struct iscsi_virt_sgl_ctx *virt_sgl;
 -      struct ystorm_iscsi_task_st_ctx *yst_cxt;
 -      struct mstorm_iscsi_task_st_ctx *mst_cxt;
 -      struct iscsi_sgl *sgl_struct;
 -      struct iscsi_sge *single_sge;
 +      struct iscsi_cls_conn *cls_conn;
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
 -      struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
 -      enum iscsi_task_type task_type;
 -      struct iscsi_cmd_hdr *fw_cmd;
 -      u32 lun[2];
 -      u32 exp_data;
 -      u16 cq_idx = smp_processor_id() % qedi->num_queues;
 -      s16 ptu_invalidate = 0;
 +      enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
 +      struct qedi_endpoint *ep;
 +      u32 scsi_lun[2];
        s16 tid = 0;
 -      u8 num_fast_sgs;
 +      u16 sq_idx = 0;
 +      u16 cq_idx;
 +      int rval = 0;
  
 -      tid = qedi_get_task_idx(qedi);
 -      if (tid == -1)
 -              return -ENOMEM;
 +      ep = qedi_conn->ep;
 +      cls_conn = qedi_conn->cls_conn;
 +      conn = cls_conn->dd_data;
  
        qedi_iscsi_map_sg_list(cmd);
 +      int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
  
 -      int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
 -      fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 +      tid = qedi_get_task_idx(qedi);
 +      if (tid == -1)
 +              return -ENOMEM;
  
 +      fw_task_ctx =
 +           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
        memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 -      cmd->task_id = tid;
  
 -      /* Ystorm context */
 -      fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
 -      SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
 +      cmd->task_id = tid;
  
 +      memset(&task_params, 0, sizeof(task_params));
 +      memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
 +      memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
 +      memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
 +      memset(&conn_params, 0, sizeof(conn_params));
 +      memset(&cmd_params, 0, sizeof(cmd_params));
 +
 +      cq_idx = smp_processor_id() % qedi->num_queues;
 +      /* Update header info */
 +      SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
 +                ISCSI_ATTR_SIMPLE);
        if (sc->sc_data_direction == DMA_TO_DEVICE) {
 -              if (conn->session->initial_r2t_en) {
 -                      exp_data = min((conn->session->imm_data_en *
 -                                      conn->max_xmit_dlength),
 -                                     conn->session->first_burst);
 -                      exp_data = min(exp_data, scsi_bufflen(sc));
 -                      fw_task_ctx->ustorm_ag_context.exp_data_acked =
 -                                                        cpu_to_le32(exp_data);
 -              } else {
 -                      fw_task_ctx->ustorm_ag_context.exp_data_acked =
 -                            min(conn->session->first_burst, scsi_bufflen(sc));
 -              }
 -
 -              SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
 +              SET_FIELD(cmd_pdu_header.flags_attr,
 +                        ISCSI_CMD_HDR_WRITE, 1);
                task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
        } else {
 -              if (scsi_bufflen(sc))
 -                      SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
 +              SET_FIELD(cmd_pdu_header.flags_attr,
 +                        ISCSI_CMD_HDR_READ, 1);
                task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
        }
  
 -      fw_cmd->lun.lo = be32_to_cpu(lun[0]);
 -      fw_cmd->lun.hi = be32_to_cpu(lun[1]);
 +      cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
 +      cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
  
        qedi_update_itt_map(qedi, tid, task->itt, cmd);
 -      fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
 -      fw_cmd->expected_transfer_length = scsi_bufflen(sc);
 -      fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
 -      fw_cmd->opcode = hdr->opcode;
 -      qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
 -
 -      /* Mstorm context */
 -      fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
 -      fw_task_ctx->mstorm_st_context.sense_db.hi =
 -                                      (u32)((u64)cmd->sense_buffer_dma >> 32);
 -      fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
 -      fw_task_ctx->mstorm_st_context.task_type = task_type;
 -
 -      if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
 -              ptu_invalidate = 1;
 -              qedi->tid_reuse_count[tid] = 0;
 -      }
 -      fw_task_ctx->ystorm_st_context.state.reuse_count =
 -                                                   qedi->tid_reuse_count[tid];
 -      fw_task_ctx->mstorm_st_context.reuse_count =
 -                                                 qedi->tid_reuse_count[tid]++;
 -
 -      /* Ustorm context */
 -      fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
 -      fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
 -      fw_task_ctx->ustorm_st_context.exp_data_sn =
 -                                                 be32_to_cpu(hdr->exp_statsn);
 -      fw_task_ctx->ustorm_st_context.task_type = task_type;
 -      fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
 -      fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
 -
 -      SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
 -                USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
 -                USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
 -
 -      num_fast_sgs = (cmd->io_tbl.sge_valid ?
 -                      min((u16)QEDI_FAST_SGE_COUNT,
 -                          (u16)cmd->io_tbl.sge_valid) : 0);
 -      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
 -
 -      fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
 -      fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
 -
 -      QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
 -                cmd->io_tbl.sge_valid);
 -
 -      yst_cxt = &fw_task_ctx->ystorm_st_context;
 -      mst_cxt = &fw_task_ctx->mstorm_st_context;
 -      /* Tx path */
 +      cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
 +      cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
 +      cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
 +      cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
 +      cmd_pdu_header.opcode = hdr->opcode;
 +      qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
 +
 +      /* Fill tx AHS and rx buffer */
        if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
 -              /* not considering  superIO or FastIO */
 -              if (cmd->io_tbl.sge_valid == 1) {
 -                      cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
 -                      cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
 -                      cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
 -                      cached_sge->sge.sge_len = bd[0].sge_len;
 -                      qedi->cached_sgls++;
 -              } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
 -                      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                                ISCSI_MFLAGS_SLOW_IO, 1);
 -                      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                                ISCSI_REG1_NUM_FAST_SGES, 0);
 -                      phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
 -                      phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
 -                      phys_sgl->sgl_base.hi =
 -                                   (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
 -                      phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
 -                      qedi->slow_sgls++;
 -              } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
 -                      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                                ISCSI_MFLAGS_SLOW_IO, 0);
 -                      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                                ISCSI_REG1_NUM_FAST_SGES,
 -                                min((u16)QEDI_FAST_SGE_COUNT,
 -                                    (u16)cmd->io_tbl.sge_valid));
 -                      virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
 -                      virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
 -                      virt_sgl->sgl_base.hi =
 +              tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
 +              tx_sgl_task_params.sgl_phys_addr.lo =
 +                                               (u32)(cmd->io_tbl.sge_tbl_dma);
 +              tx_sgl_task_params.sgl_phys_addr.hi =
                                      (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
 -                      virt_sgl->sgl_initial_offset =
 -                               (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
 -                      qedi->fast_sgls++;
 -              }
 -              fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
 -              fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
 -      } else {
 -      /* Rx path */
 -              if (cmd->io_tbl.sge_valid == 1) {
 -                      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                                ISCSI_MFLAGS_SLOW_IO, 0);
 -                      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                                ISCSI_MFLAGS_SINGLE_SGE, 1);
 -                      single_sge = &mst_cxt->sgl_union.single_sge;
 -                      single_sge->sge_addr.lo = bd[0].sge_addr.lo;
 -                      single_sge->sge_addr.hi = bd[0].sge_addr.hi;
 -                      single_sge->sge_len = bd[0].sge_len;
 -                      qedi->cached_sgls++;
 -              } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
 -                      sgl_struct = &mst_cxt->sgl_union.sgl_struct;
 -                      sgl_struct->sgl_addr.lo =
 -                                              (u32)(cmd->io_tbl.sge_tbl_dma);
 -                      sgl_struct->sgl_addr.hi =
 -                                   (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
 -                      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                                ISCSI_MFLAGS_SLOW_IO, 1);
 -                      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                                ISCSI_REG1_NUM_FAST_SGES, 0);
 -                      sgl_struct->updated_sge_size = 0;
 -                      sgl_struct->updated_sge_offset = 0;
 -                      qedi->slow_sgls++;
 -              } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
 -                      sgl_struct = &mst_cxt->sgl_union.sgl_struct;
 -                      sgl_struct->sgl_addr.lo =
 -                                              (u32)(cmd->io_tbl.sge_tbl_dma);
 -                      sgl_struct->sgl_addr.hi =
 -                                   (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
 -                      sgl_struct->byte_offset =
 -                              (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
 -                      SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
 -                                ISCSI_MFLAGS_SLOW_IO, 0);
 -                      SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
 -                                ISCSI_REG1_NUM_FAST_SGES, 0);
 -                      sgl_struct->updated_sge_size = 0;
 -                      sgl_struct->updated_sge_offset = 0;
 -                      qedi->fast_sgls++;
 -              }
 -              fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
 -              fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
 -      }
 -
 -      if (cmd->io_tbl.sge_valid == 1)
 -              /* Singel-SGL */
 -              qedi->use_cached_sge = true;
 -      else {
 +              tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
 +              tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
                if (cmd->use_slowpath)
 -                      qedi->use_slow_sge = true;
 -              else
 -                      qedi->use_fast_sge = true;
 -      }
 +                      tx_sgl_task_params.small_mid_sge = true;
 +      } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
 +              rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
 +              rx_sgl_task_params.sgl_phys_addr.lo =
 +                                               (u32)(cmd->io_tbl.sge_tbl_dma);
 +              rx_sgl_task_params.sgl_phys_addr.hi =
 +                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
 +              rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
 +              rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
 +      }
 +
 +      /* Add conn param */
 +      conn_params.first_burst_length = conn->session->first_burst;
 +      conn_params.max_send_pdu_length = conn->max_xmit_dlength;
 +      conn_params.max_burst_length = conn->session->max_burst;
 +      if (conn->session->initial_r2t_en)
 +              conn_params.initial_r2t = true;
 +      if (conn->session->imm_data_en)
 +              conn_params.immediate_data = true;
 +
 +      /* Add cmd params */
 +      cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
 +      cmd_params.sense_data_buffer_phys_addr.hi =
 +                                      (u32)((u64)cmd->sense_buffer_dma >> 32);
 +      /* Fill fw input params */
 +      task_params.context = fw_task_ctx;
 +      task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
 +      task_params.itid = tid;
 +      task_params.cq_rss_number = cq_idx;
 +      if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
 +              task_params.tx_io_size = scsi_bufflen(sc);
 +      else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
 +              task_params.rx_io_size = scsi_bufflen(sc);
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +      task_params.sqe = &ep->sq[sq_idx];
 +
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
 -                "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
 +                "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
                  (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
                  "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
                  "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
 -                (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
 +                (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
 +                (u32)(cmd->io_tbl.sge_tbl_dma),
                  (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
  
 -      /*  Add command in active command list */
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +
 +      if (task_params.tx_io_size != 0)
 +              ptx_sgl = &tx_sgl_task_params;
 +      if (task_params.rx_io_size != 0)
 +              prx_sgl = &rx_sgl_task_params;
 +
 +      rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
 +                                          &cmd_params, &cmd_pdu_header,
 +                                          ptx_sgl, prx_sgl,
 +                                          NULL);
 +      if (rval)
 +              return -1;
 +
        spin_lock(&qedi_conn->list_lock);
        list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
        cmd->io_cmd_in_list = true;
        qedi_conn->active_cmd_count++;
        spin_unlock(&qedi_conn->list_lock);
  
 -      qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
        qedi_ring_doorbell(qedi_conn);
 -      if (qedi_io_tracing)
 -              qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
 -
        return 0;
  }
  
  int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
  {
 +      struct iscsi_task_params task_params;
 +      struct qedi_endpoint *ep;
        struct iscsi_conn *conn = task->conn;
        struct qedi_conn *qedi_conn = conn->dd_data;
        struct qedi_cmd *cmd = task->dd_data;
 -      s16 ptu_invalidate = 0;
 +      u16 sq_idx = 0;
 +      int rval = 0;
  
        QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
                  cmd->task_id, get_itt(task->itt), task->state,
                  cmd->state, qedi_conn->iscsi_conn_id);
  
 -      qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
 -      qedi_ring_doorbell(qedi_conn);
 +      memset(&task_params, 0, sizeof(task_params));
 +      ep = qedi_conn->ep;
 +
 +      sq_idx = qedi_get_wqe_idx(qedi_conn);
 +
 +      task_params.sqe = &ep->sq[sq_idx];
 +      memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
 +      task_params.itid = cmd->task_id;
  
 +      rval = init_cleanup_task(&task_params);
 +      if (rval)
 +              return rval;
 +
 +      qedi_ring_doorbell(qedi_conn);
        return 0;
  }
index d5eff68507e5dc38b3c0641f8508fdaf1903b81c,4cc474364c50568806b16520ddd66239b9f3ebfd..d1de172bebac626b9b61b2da4e1588a36519bb9d
@@@ -175,7 -175,7 +175,7 @@@ static void qedi_destroy_cmd_pool(struc
                if (cmd->io_tbl.sge_tbl)
                        dma_free_coherent(&qedi->pdev->dev,
                                          QEDI_ISCSI_MAX_BDS_PER_CMD *
 -                                        sizeof(struct iscsi_sge),
 +                                        sizeof(struct scsi_sge),
                                          cmd->io_tbl.sge_tbl,
                                          cmd->io_tbl.sge_tbl_dma);
  
@@@ -191,7 -191,7 +191,7 @@@ static int qedi_alloc_sget(struct qedi_
                           struct qedi_cmd *cmd)
  {
        struct qedi_io_bdt *io = &cmd->io_tbl;
 -      struct iscsi_sge *sge;
 +      struct scsi_sge *sge;
  
        io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
                                         QEDI_ISCSI_MAX_BDS_PER_CMD *
@@@ -708,20 -708,22 +708,20 @@@ static void qedi_conn_get_stats(struct 
  
  static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
  {
 -      struct iscsi_sge *bd_tbl;
 +      struct scsi_sge *bd_tbl;
  
 -      bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 +      bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
  
        bd_tbl->sge_addr.hi =
                (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
        bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
        bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
                                qedi_conn->gen_pdu.req_buf;
 -      bd_tbl->reserved0 = 0;
 -      bd_tbl = (struct iscsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
 +      bd_tbl = (struct scsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
        bd_tbl->sge_addr.hi =
                        (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
        bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
        bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
 -      bd_tbl->reserved0 = 0;
  }
  
  static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
@@@ -831,7 -833,7 +831,7 @@@ qedi_ep_connect(struct Scsi_Host *shost
                return ERR_PTR(ret);
        }
  
-       if (do_not_recover) {
+       if (qedi_do_not_recover) {
                ret = -ENOMEM;
                return ERR_PTR(ret);
        }
@@@ -955,7 -957,7 +955,7 @@@ static int qedi_ep_poll(struct iscsi_en
        struct qedi_endpoint *qedi_ep;
        int ret = 0;
  
-       if (do_not_recover)
+       if (qedi_do_not_recover)
                return 1;
  
        qedi_ep = ep->dd_data;
@@@ -1023,7 -1025,7 +1023,7 @@@ static void qedi_ep_disconnect(struct i
                }
  
                if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
-                       if (do_not_recover) {
+                       if (qedi_do_not_recover) {
                                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                                          "Do not recover cid=0x%x\n",
                                          qedi_ep->iscsi_cid);
                }
        }
  
-       if (do_not_recover)
+       if (qedi_do_not_recover)
                goto ep_exit_recover;
  
        switch (qedi_ep->state) {
diff --combined include/net/sock.h
index 6db7693b9e61854abaa461706f2678c6d429b73f,03252d53975de7ad0da66d35802738830b0e3367..08142be8938e4a758a13887e7a4cc3a3cc2da56c
@@@ -236,6 -236,7 +236,7 @@@ struct sock_common 
    *   @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
    *   @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
    *   @sk_lock:       synchronizer
+   *   @sk_kern_sock: True if sock is using kernel lock classes
    *   @sk_rcvbuf: size of receive buffer in bytes
    *   @sk_wq: sock wait queue and async head
    *   @sk_rx_dst: receive input route used by early demux
@@@ -430,7 -431,8 +431,8 @@@ struct sock 
  #endif
  
        kmemcheck_bitfield_begin(flags);
-       unsigned int            sk_padding : 2,
+       unsigned int            sk_padding : 1,
+                               sk_kern_sock : 1,
                                sk_no_check_tx : 1,
                                sk_no_check_rx : 1,
                                sk_userlocks : 4,
@@@ -1015,7 -1017,8 +1017,8 @@@ struct proto 
                                        int addr_len);
        int                     (*disconnect)(struct sock *sk, int flags);
  
-       struct sock *           (*accept)(struct sock *sk, int flags, int *err);
+       struct sock *           (*accept)(struct sock *sk, int flags, int *err,
+                                         bool kern);
  
        int                     (*ioctl)(struct sock *sk, int cmd,
                                         unsigned long arg);
@@@ -1573,7 -1576,7 +1576,7 @@@ int sock_cmsg_send(struct sock *sk, str
  int sock_no_bind(struct socket *, struct sockaddr *, int);
  int sock_no_connect(struct socket *, struct sockaddr *, int, int);
  int sock_no_socketpair(struct socket *, struct socket *);
- int sock_no_accept(struct socket *, struct socket *, int);
+ int sock_no_accept(struct socket *, struct socket *, int, bool);
  int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
  unsigned int sock_no_poll(struct file *, struct socket *,
                          struct poll_table_struct *);
@@@ -1780,8 -1783,11 +1783,8 @@@ __sk_dst_set(struct sock *sk, struct ds
  
        sk_tx_queue_clear(sk);
        sk->sk_dst_pending_confirm = 0;
 -      /*
 -       * This can be called while sk is owned by the caller only,
 -       * with no state that can be checked in a rcu_dereference_check() cond
 -       */
 -      old_dst = rcu_dereference_raw(sk->sk_dst_cache);
 +      old_dst = rcu_dereference_protected(sk->sk_dst_cache,
 +                                          lockdep_sock_is_held(sk));
        rcu_assign_pointer(sk->sk_dst_cache, dst);
        dst_release(old_dst);
  }
diff --combined net/core/sock.c
index 768aedf238f5b4dd4ca395e1320e9ca491233add,a96d5f7a5734a52dfd6a2df8490c7bd7f5f6599a..a83731c367619d55c8aa883c5583830c18533676
@@@ -197,127 -197,63 +197,117 @@@ EXPORT_SYMBOL(sk_net_capable)
  
  /*
   * Each address family might have different locking rules, so we have
-  * one slock key per address family:
+  * one slock key per address family and separate keys for internal and
+  * userspace sockets.
   */
  static struct lock_class_key af_family_keys[AF_MAX];
+ static struct lock_class_key af_family_kern_keys[AF_MAX];
  static struct lock_class_key af_family_slock_keys[AF_MAX];
+ static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
  
  /*
   * Make lock validator output more readable. (we pre-construct these
   * strings build-time, so that runtime initialization of socket
   * locks is fast):
   */
+ #define _sock_locks(x)                                                  \
+   x "AF_UNSPEC",      x "AF_UNIX"     ,       x "AF_INET"     , \
+   x "AF_AX25"  ,      x "AF_IPX"      ,       x "AF_APPLETALK", \
+   x "AF_NETROM",      x "AF_BRIDGE"   ,       x "AF_ATMPVC"   , \
+   x "AF_X25"   ,      x "AF_INET6"    ,       x "AF_ROSE"     , \
+   x "AF_DECnet",      x "AF_NETBEUI"  ,       x "AF_SECURITY" , \
+   x "AF_KEY"   ,      x "AF_NETLINK"  ,       x "AF_PACKET"   , \
+   x "AF_ASH"   ,      x "AF_ECONET"   ,       x "AF_ATMSVC"   , \
+   x "AF_RDS"   ,      x "AF_SNA"      ,       x "AF_IRDA"     , \
+   x "AF_PPPOX" ,      x "AF_WANPIPE"  ,       x "AF_LLC"      , \
+   x "27"       ,      x "28"          ,       x "AF_CAN"      , \
+   x "AF_TIPC"  ,      x "AF_BLUETOOTH",       x "IUCV"        , \
+   x "AF_RXRPC" ,      x "AF_ISDN"     ,       x "AF_PHONET"   , \
+   x "AF_IEEE802154",  x "AF_CAIF"     ,       x "AF_ALG"      , \
+   x "AF_NFC"   ,      x "AF_VSOCK"    ,       x "AF_KCM"      , \
+   x "AF_QIPCRTR",     x "AF_SMC"      ,       x "AF_MAX"
  static const char *const af_family_key_strings[AF_MAX+1] = {
-   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
-   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
-   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
-   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
-   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
-   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
-   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
-   "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
-   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
-   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
-   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
-   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
-   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
-   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
-   "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC"     , "sk_lock-AF_MAX"
+       _sock_locks("sk_lock-")
  };
  static const char *const af_family_slock_key_strings[AF_MAX+1] = {
-   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
-   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
-   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
-   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
-   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
-   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
-   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
-   "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
-   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
-   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
-   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
-   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
-   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
-   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
-   "slock-AF_QIPCRTR", "slock-AF_SMC"     , "slock-AF_MAX"
+       _sock_locks("slock-")
  };
  static const char *const af_family_clock_key_strings[AF_MAX+1] = {
-   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
-   "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
-   "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
-   "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
-   "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
-   "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
-   "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
-   "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
-   "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
-   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
-   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
-   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
-   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
-   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
-   "clock-AF_QIPCRTR", "clock-AF_SMC"     , "clock-AF_MAX"
+       _sock_locks("clock-")
+ };
+ static const char *const af_family_kern_key_strings[AF_MAX+1] = {
+       _sock_locks("k-sk_lock-")
+ };
+ static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
+       _sock_locks("k-slock-")
+ };
+ static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
+       _sock_locks("k-clock-")
  };
 +static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
 +  "rlock-AF_UNSPEC", "rlock-AF_UNIX"     , "rlock-AF_INET"     ,
 +  "rlock-AF_AX25"  , "rlock-AF_IPX"      , "rlock-AF_APPLETALK",
 +  "rlock-AF_NETROM", "rlock-AF_BRIDGE"   , "rlock-AF_ATMPVC"   ,
 +  "rlock-AF_X25"   , "rlock-AF_INET6"    , "rlock-AF_ROSE"     ,
 +  "rlock-AF_DECnet", "rlock-AF_NETBEUI"  , "rlock-AF_SECURITY" ,
 +  "rlock-AF_KEY"   , "rlock-AF_NETLINK"  , "rlock-AF_PACKET"   ,
 +  "rlock-AF_ASH"   , "rlock-AF_ECONET"   , "rlock-AF_ATMSVC"   ,
 +  "rlock-AF_RDS"   , "rlock-AF_SNA"      , "rlock-AF_IRDA"     ,
 +  "rlock-AF_PPPOX" , "rlock-AF_WANPIPE"  , "rlock-AF_LLC"      ,
 +  "rlock-27"       , "rlock-28"          , "rlock-AF_CAN"      ,
 +  "rlock-AF_TIPC"  , "rlock-AF_BLUETOOTH", "rlock-AF_IUCV"     ,
 +  "rlock-AF_RXRPC" , "rlock-AF_ISDN"     , "rlock-AF_PHONET"   ,
 +  "rlock-AF_IEEE802154", "rlock-AF_CAIF" , "rlock-AF_ALG"      ,
 +  "rlock-AF_NFC"   , "rlock-AF_VSOCK"    , "rlock-AF_KCM"      ,
 +  "rlock-AF_QIPCRTR", "rlock-AF_SMC"     , "rlock-AF_MAX"
 +};
 +static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
 +  "wlock-AF_UNSPEC", "wlock-AF_UNIX"     , "wlock-AF_INET"     ,
 +  "wlock-AF_AX25"  , "wlock-AF_IPX"      , "wlock-AF_APPLETALK",
 +  "wlock-AF_NETROM", "wlock-AF_BRIDGE"   , "wlock-AF_ATMPVC"   ,
 +  "wlock-AF_X25"   , "wlock-AF_INET6"    , "wlock-AF_ROSE"     ,
 +  "wlock-AF_DECnet", "wlock-AF_NETBEUI"  , "wlock-AF_SECURITY" ,
 +  "wlock-AF_KEY"   , "wlock-AF_NETLINK"  , "wlock-AF_PACKET"   ,
 +  "wlock-AF_ASH"   , "wlock-AF_ECONET"   , "wlock-AF_ATMSVC"   ,
 +  "wlock-AF_RDS"   , "wlock-AF_SNA"      , "wlock-AF_IRDA"     ,
 +  "wlock-AF_PPPOX" , "wlock-AF_WANPIPE"  , "wlock-AF_LLC"      ,
 +  "wlock-27"       , "wlock-28"          , "wlock-AF_CAN"      ,
 +  "wlock-AF_TIPC"  , "wlock-AF_BLUETOOTH", "wlock-AF_IUCV"     ,
 +  "wlock-AF_RXRPC" , "wlock-AF_ISDN"     , "wlock-AF_PHONET"   ,
 +  "wlock-AF_IEEE802154", "wlock-AF_CAIF" , "wlock-AF_ALG"      ,
 +  "wlock-AF_NFC"   , "wlock-AF_VSOCK"    , "wlock-AF_KCM"      ,
 +  "wlock-AF_QIPCRTR", "wlock-AF_SMC"     , "wlock-AF_MAX"
 +};
 +static const char *const af_family_elock_key_strings[AF_MAX+1] = {
 +  "elock-AF_UNSPEC", "elock-AF_UNIX"     , "elock-AF_INET"     ,
 +  "elock-AF_AX25"  , "elock-AF_IPX"      , "elock-AF_APPLETALK",
 +  "elock-AF_NETROM", "elock-AF_BRIDGE"   , "elock-AF_ATMPVC"   ,
 +  "elock-AF_X25"   , "elock-AF_INET6"    , "elock-AF_ROSE"     ,
 +  "elock-AF_DECnet", "elock-AF_NETBEUI"  , "elock-AF_SECURITY" ,
 +  "elock-AF_KEY"   , "elock-AF_NETLINK"  , "elock-AF_PACKET"   ,
 +  "elock-AF_ASH"   , "elock-AF_ECONET"   , "elock-AF_ATMSVC"   ,
 +  "elock-AF_RDS"   , "elock-AF_SNA"      , "elock-AF_IRDA"     ,
 +  "elock-AF_PPPOX" , "elock-AF_WANPIPE"  , "elock-AF_LLC"      ,
 +  "elock-27"       , "elock-28"          , "elock-AF_CAN"      ,
 +  "elock-AF_TIPC"  , "elock-AF_BLUETOOTH", "elock-AF_IUCV"     ,
 +  "elock-AF_RXRPC" , "elock-AF_ISDN"     , "elock-AF_PHONET"   ,
 +  "elock-AF_IEEE802154", "elock-AF_CAIF" , "elock-AF_ALG"      ,
 +  "elock-AF_NFC"   , "elock-AF_VSOCK"    , "elock-AF_KCM"      ,
 +  "elock-AF_QIPCRTR", "elock-AF_SMC"     , "elock-AF_MAX"
 +};
  
  /*
 - * sk_callback_lock locking rules are per-address-family,
 + * sk_callback_lock and sk queues locking rules are per-address-family,
   * so split the lock classes by using a per-AF key:
   */
  static struct lock_class_key af_callback_keys[AF_MAX];
 +static struct lock_class_key af_rlock_keys[AF_MAX];
 +static struct lock_class_key af_wlock_keys[AF_MAX];
 +static struct lock_class_key af_elock_keys[AF_MAX];
+ static struct lock_class_key af_kern_callback_keys[AF_MAX];
  
  /* Take into consideration the size of the struct sk_buff overhead in the
   * determination of these values, since that is non-constant across
@@@ -1347,7 -1283,16 +1337,16 @@@ lenout
   */
  static inline void sock_lock_init(struct sock *sk)
  {
-       sock_lock_init_class_and_name(sk,
+       if (sk->sk_kern_sock)
+               sock_lock_init_class_and_name(
+                       sk,
+                       af_family_kern_slock_key_strings[sk->sk_family],
+                       af_family_kern_slock_keys + sk->sk_family,
+                       af_family_kern_key_strings[sk->sk_family],
+                       af_family_kern_keys + sk->sk_family);
+       else
+               sock_lock_init_class_and_name(
+                       sk,
                        af_family_slock_key_strings[sk->sk_family],
                        af_family_slock_keys + sk->sk_family,
                        af_family_key_strings[sk->sk_family],
@@@ -1453,6 -1398,7 +1452,7 @@@ struct sock *sk_alloc(struct net *net, 
                 * why we need sk_prot_creator -acme
                 */
                sk->sk_prot = sk->sk_prot_creator = prot;
+               sk->sk_kern_sock = kern;
                sock_lock_init(sk);
                sk->sk_net_refcnt = kern ? 0 : 1;
                if (likely(sk->sk_net_refcnt))
@@@ -1532,27 -1478,6 +1532,27 @@@ void sk_free(struct sock *sk
  }
  EXPORT_SYMBOL(sk_free);
  
 +static void sk_init_common(struct sock *sk)
 +{
 +      skb_queue_head_init(&sk->sk_receive_queue);
 +      skb_queue_head_init(&sk->sk_write_queue);
 +      skb_queue_head_init(&sk->sk_error_queue);
 +
 +      rwlock_init(&sk->sk_callback_lock);
 +      lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
 +                      af_rlock_keys + sk->sk_family,
 +                      af_family_rlock_key_strings[sk->sk_family]);
 +      lockdep_set_class_and_name(&sk->sk_write_queue.lock,
 +                      af_wlock_keys + sk->sk_family,
 +                      af_family_wlock_key_strings[sk->sk_family]);
 +      lockdep_set_class_and_name(&sk->sk_error_queue.lock,
 +                      af_elock_keys + sk->sk_family,
 +                      af_family_elock_key_strings[sk->sk_family]);
 +      lockdep_set_class_and_name(&sk->sk_callback_lock,
 +                      af_callback_keys + sk->sk_family,
 +                      af_family_clock_key_strings[sk->sk_family]);
 +}
 +
  /**
   *    sk_clone_lock - clone a socket, and lock its clone
   *    @sk: the socket to clone
@@@ -1586,7 -1511,13 +1586,7 @@@ struct sock *sk_clone_lock(const struc
                 */
                atomic_set(&newsk->sk_wmem_alloc, 1);
                atomic_set(&newsk->sk_omem_alloc, 0);
 -              skb_queue_head_init(&newsk->sk_receive_queue);
 -              skb_queue_head_init(&newsk->sk_write_queue);
 -
 -              rwlock_init(&newsk->sk_callback_lock);
 -              lockdep_set_class_and_name(&newsk->sk_callback_lock,
 -                              af_callback_keys + newsk->sk_family,
 -                              af_family_clock_key_strings[newsk->sk_family]);
 +              sk_init_common(newsk);
  
                newsk->sk_dst_cache     = NULL;
                newsk->sk_dst_pending_confirm = 0;
                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
  
                sock_reset_flag(newsk, SOCK_DONE);
 -              skb_queue_head_init(&newsk->sk_error_queue);
  
                filter = rcu_dereference_protected(newsk->sk_filter, 1);
                if (filter != NULL)
@@@ -2345,7 -2277,8 +2345,8 @@@ int sock_no_socketpair(struct socket *s
  }
  EXPORT_SYMBOL(sock_no_socketpair);
  
- int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
+ int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
+                  bool kern)
  {
        return -EOPNOTSUPP;
  }
@@@ -2522,7 -2455,10 +2523,7 @@@ EXPORT_SYMBOL(sk_stop_timer)
  
  void sock_init_data(struct socket *sock, struct sock *sk)
  {
 -      skb_queue_head_init(&sk->sk_receive_queue);
 -      skb_queue_head_init(&sk->sk_write_queue);
 -      skb_queue_head_init(&sk->sk_error_queue);
 -
 +      sk_init_common(sk);
        sk->sk_send_head        =       NULL;
  
        init_timer(&sk->sk_timer);
                sk->sk_uid      =       make_kuid(sock_net(sk)->user_ns, 0);
        }
  
+       rwlock_init(&sk->sk_callback_lock);
+       if (sk->sk_kern_sock)
+               lockdep_set_class_and_name(
+                       &sk->sk_callback_lock,
+                       af_kern_callback_keys + sk->sk_family,
+                       af_family_kern_clock_key_strings[sk->sk_family]);
+       else
+               lockdep_set_class_and_name(
+                       &sk->sk_callback_lock,
+                       af_callback_keys + sk->sk_family,
+                       af_family_clock_key_strings[sk->sk_family]);
        sk->sk_state_change     =       sock_def_wakeup;
        sk->sk_data_ready       =       sock_def_readable;
        sk->sk_write_space      =       sock_def_write_space;
diff --combined net/decnet/af_decnet.c
index 0ec8cb4363e97514b29657f0dbce43a82b19026e,7de5b40a5d0d1245ad995877f779e0d87d1cf398..9afa2a5030b2570c89de8decc3b20aad3a224e5c
@@@ -132,7 -132,6 +132,7 @@@ Version 0.0.6    2.1.110   07-aug-98   
  #include <net/neighbour.h>
  #include <net/dst.h>
  #include <net/fib_rules.h>
 +#include <net/tcp.h>
  #include <net/dn.h>
  #include <net/dn_nsp.h>
  #include <net/dn_dev.h>
@@@ -1071,7 -1070,8 +1071,8 @@@ static struct sk_buff *dn_wait_for_conn
        return skb == NULL ? ERR_PTR(err) : skb;
  }
  
- static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
+ static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
+                    bool kern)
  {
        struct sock *sk = sock->sk, *newsk;
        struct sk_buff *skb = NULL;
  
        cb = DN_SKB_CB(skb);
        sk->sk_ack_backlog--;
-       newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
+       newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
        if (newsk == NULL) {
                release_sock(sk);
                kfree_skb(skb);
@@@ -1469,18 -1469,18 +1470,18 @@@ static int __dn_setsockopt(struct socke
        case DSO_NODELAY:
                if (optlen != sizeof(int))
                        return -EINVAL;
 -              if (scp->nonagle == 2)
 +              if (scp->nonagle == TCP_NAGLE_CORK)
                        return -EINVAL;
 -              scp->nonagle = (u.val == 0) ? 0 : 1;
 +              scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_OFF;
                /* if (scp->nonagle == 1) { Push pending frames } */
                break;
  
        case DSO_CORK:
                if (optlen != sizeof(int))
                        return -EINVAL;
 -              if (scp->nonagle == 1)
 +              if (scp->nonagle == TCP_NAGLE_OFF)
                        return -EINVAL;
 -              scp->nonagle = (u.val == 0) ? 0 : 2;
 +              scp->nonagle = (u.val == 0) ? 0 : TCP_NAGLE_CORK;
                /* if (scp->nonagle == 0) { Push pending frames } */
                break;
  
@@@ -1608,14 -1608,14 +1609,14 @@@ static int __dn_getsockopt(struct socke
        case DSO_NODELAY:
                if (r_len > sizeof(int))
                        r_len = sizeof(int);
 -              val = (scp->nonagle == 1);
 +              val = (scp->nonagle == TCP_NAGLE_OFF);
                r_data = &val;
                break;
  
        case DSO_CORK:
                if (r_len > sizeof(int))
                        r_len = sizeof(int);
 -              val = (scp->nonagle == 2);
 +              val = (scp->nonagle == TCP_NAGLE_CORK);
                r_data = &val;
                break;
  
diff --combined net/ipv4/tcp_ipv4.c
index 7b332ed6648809c6f546eb788f84c420b0c06834,575e19dcc01763ef3fa938dea3ea51995b573163..08d870e45658f51e9c9c8f0047290c05e61c1bc4
@@@ -94,12 -94,12 +94,12 @@@ static int tcp_v4_md5_hash_hdr(char *md
  struct inet_hashinfo tcp_hashinfo;
  EXPORT_SYMBOL(tcp_hashinfo);
  
 -static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff)
 +static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
  {
 -      return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
 -                                        ip_hdr(skb)->saddr,
 -                                        tcp_hdr(skb)->dest,
 -                                        tcp_hdr(skb)->source, tsoff);
 +      return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr,
 +                                      ip_hdr(skb)->saddr,
 +                                      tcp_hdr(skb)->dest,
 +                                      tcp_hdr(skb)->source, tsoff);
  }
  
  int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@@ -236,11 -236,11 +236,11 @@@ int tcp_v4_connect(struct sock *sk, str
        rt = NULL;
  
        if (likely(!tp->repair)) {
 -              seq = secure_tcp_sequence_number(inet->inet_saddr,
 -                                               inet->inet_daddr,
 -                                               inet->inet_sport,
 -                                               usin->sin_port,
 -                                               &tp->tsoffset);
 +              seq = secure_tcp_seq_and_tsoff(inet->inet_saddr,
 +                                             inet->inet_daddr,
 +                                             inet->inet_sport,
 +                                             usin->sin_port,
 +                                             &tp->tsoffset);
                if (!tp->write_seq)
                        tp->write_seq = seq;
        }
@@@ -279,10 -279,13 +279,13 @@@ EXPORT_SYMBOL(tcp_v4_connect)
   */
  void tcp_v4_mtu_reduced(struct sock *sk)
  {
-       struct dst_entry *dst;
        struct inet_sock *inet = inet_sk(sk);
-       u32 mtu = tcp_sk(sk)->mtu_info;
+       struct dst_entry *dst;
+       u32 mtu;
  
+       if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+               return;
+       mtu = tcp_sk(sk)->mtu_info;
        dst = inet_csk_update_pmtu(sk, mtu);
        if (!dst)
                return;
@@@ -428,7 -431,8 +431,8 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  
        switch (type) {
        case ICMP_REDIRECT:
-               do_redirect(icmp_skb, sk);
+               if (!sock_owned_by_user(sk))
+                       do_redirect(icmp_skb, sk);
                goto out;
        case ICMP_SOURCE_QUENCH:
                /* Just silently ignore these. */
@@@ -1249,7 -1253,7 +1253,7 @@@ static const struct tcp_request_sock_op
        .cookie_init_seq =      cookie_v4_init_sequence,
  #endif
        .route_req      =       tcp_v4_route_req,
 -      .init_seq       =       tcp_v4_init_sequence,
 +      .init_seq_tsoff =       tcp_v4_init_seq_and_tsoff,
        .send_synack    =       tcp_v4_send_synack,
  };
  
diff --combined net/ipv6/tcp_ipv6.c
index 56f742fff9672325a69dac2845e7dc5441c3636c,49fa2e8c3fa9212eef1198a1077a6726f0f1b6fc..c73a431fd06f90814d30e03d9c1307c4313dbd42
@@@ -101,12 -101,12 +101,12 @@@ static void inet6_sk_rx_dst_set(struct 
        }
  }
  
 -static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
 +static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
  {
 -      return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 -                                          ipv6_hdr(skb)->saddr.s6_addr32,
 -                                          tcp_hdr(skb)->dest,
 -                                          tcp_hdr(skb)->source, tsoff);
 +      return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
 +                                        ipv6_hdr(skb)->saddr.s6_addr32,
 +                                        tcp_hdr(skb)->dest,
 +                                        tcp_hdr(skb)->source, tsoff);
  }
  
  static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk_set_txhash(sk);
  
        if (likely(!tp->repair)) {
 -              seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 -                                                 sk->sk_v6_daddr.s6_addr32,
 -                                                 inet->inet_sport,
 -                                                 inet->inet_dport,
 -                                                 &tp->tsoffset);
 +              seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
 +                                               sk->sk_v6_daddr.s6_addr32,
 +                                               inet->inet_sport,
 +                                               inet->inet_dport,
 +                                               &tp->tsoffset);
                if (!tp->write_seq)
                        tp->write_seq = seq;
        }
@@@ -391,10 -391,12 +391,12 @@@ static void tcp_v6_err(struct sk_buff *
        np = inet6_sk(sk);
  
        if (type == NDISC_REDIRECT) {
-               struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+               if (!sock_owned_by_user(sk)) {
+                       struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
  
-               if (dst)
-                       dst->ops->redirect(dst, sk, skb);
+                       if (dst)
+                               dst->ops->redirect(dst, sk, skb);
+               }
                goto out;
        }
  
@@@ -755,7 -757,7 +757,7 @@@ static const struct tcp_request_sock_op
        .cookie_init_seq =      cookie_v6_init_sequence,
  #endif
        .route_req      =       tcp_v6_route_req,
 -      .init_seq       =       tcp_v6_init_sequence,
 +      .init_seq_tsoff =       tcp_v6_init_seq_and_tsoff,
        .send_synack    =       tcp_v6_send_synack,
  };
  
diff --combined net/mpls/af_mpls.c
index 0c5d111abe363bd5997eb590b3525d7bad04c43e,33211f9a265608c378848c97b4be36a1cec9736d..f7a08e5f97635b5c421dd4cb4174eb7215e4e50c
@@@ -32,9 -32,7 +32,9 @@@
  #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
  
  static int zero = 0;
 +static int one = 1;
  static int label_limit = (1 << 20) - 1;
 +static int ttl_max = 255;
  
  static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
                       struct nlmsghdr *nlh, struct net *net, u32 portid,
@@@ -222,8 -220,8 +222,8 @@@ out
        return &rt->rt_nh[nh_index];
  }
  
 -static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
 -                      struct mpls_entry_decoded dec)
 +static bool mpls_egress(struct net *net, struct mpls_route *rt,
 +                      struct sk_buff *skb, struct mpls_entry_decoded dec)
  {
        enum mpls_payload_type payload_type;
        bool success = false;
        switch (payload_type) {
        case MPT_IPV4: {
                struct iphdr *hdr4 = ip_hdr(skb);
 +              u8 new_ttl;
                skb->protocol = htons(ETH_P_IP);
 +
 +              /* If propagating TTL, take the decremented TTL from
 +               * the incoming MPLS header, otherwise decrement the
 +               * TTL, but only if not 0 to avoid underflow.
 +               */
 +              if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
 +                  (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
 +                   net->mpls.ip_ttl_propagate))
 +                      new_ttl = dec.ttl;
 +              else
 +                      new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
 +
                csum_replace2(&hdr4->check,
                              htons(hdr4->ttl << 8),
 -                            htons(dec.ttl << 8));
 -              hdr4->ttl = dec.ttl;
 +                            htons(new_ttl << 8));
 +              hdr4->ttl = new_ttl;
                success = true;
                break;
        }
        case MPT_IPV6: {
                struct ipv6hdr *hdr6 = ipv6_hdr(skb);
                skb->protocol = htons(ETH_P_IPV6);
 -              hdr6->hop_limit = dec.ttl;
 +
 +              /* If propagating TTL, take the decremented TTL from
 +               * the incoming MPLS header, otherwise decrement the
 +               * hop limit, but only if not 0 to avoid underflow.
 +               */
 +              if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
 +                  (rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
 +                   net->mpls.ip_ttl_propagate))
 +                      hdr6->hop_limit = dec.ttl;
 +              else if (hdr6->hop_limit)
 +                      hdr6->hop_limit = hdr6->hop_limit - 1;
                success = true;
                break;
        }
        case MPT_UNSPEC:
 +              /* Should have decided which protocol it is by now */
                break;
        }
  
@@@ -387,7 -361,7 +387,7 @@@ static int mpls_forward(struct sk_buff 
  
        if (unlikely(!new_header_size && dec.bos)) {
                /* Penultimate hop popping */
 -              if (!mpls_egress(rt, skb, dec))
 +              if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
                        goto err;
        } else {
                bool bos;
@@@ -438,7 -412,6 +438,7 @@@ static struct packet_type mpls_packet_t
  static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
        [RTA_DST]               = { .type = NLA_U32 },
        [RTA_OIF]               = { .type = NLA_U32 },
 +      [RTA_TTL_PROPAGATE]     = { .type = NLA_U8 },
  };
  
  struct mpls_route_config {
        u8                      rc_via_alen;
        u8                      rc_via[MAX_VIA_ALEN];
        u32                     rc_label;
 +      u8                      rc_ttl_propagate;
        u8                      rc_output_labels;
        u32                     rc_output_label[MAX_NEW_LABELS];
        u32                     rc_nlflags;
@@@ -884,7 -856,6 +884,7 @@@ static int mpls_route_add(struct mpls_r
  
        rt->rt_protocol = cfg->rc_protocol;
        rt->rt_payload_type = cfg->rc_payload_type;
 +      rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
  
        if (cfg->rc_mp)
                err = mpls_nh_build_multi(cfg, rt);
@@@ -1317,7 -1288,8 +1317,8 @@@ static void mpls_ifdown(struct net_devi
                                /* fall through */
                        case NETDEV_CHANGE:
                                nh->nh_flags |= RTNH_F_LINKDOWN;
-                               ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+                               if (event != NETDEV_UNREGISTER)
+                                       ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
                                break;
                        }
                        if (event == NETDEV_UNREGISTER)
@@@ -1605,7 -1577,6 +1606,7 @@@ static int rtm_to_route_config(struct s
        cfg->rc_label           = LABEL_NOT_SPECIFIED;
        cfg->rc_protocol        = rtm->rtm_protocol;
        cfg->rc_via_table       = MPLS_NEIGH_TABLE_UNSPEC;
 +      cfg->rc_ttl_propagate   = MPLS_TTL_PROP_DEFAULT;
        cfg->rc_nlflags         = nlh->nlmsg_flags;
        cfg->rc_nlinfo.portid   = NETLINK_CB(skb).portid;
        cfg->rc_nlinfo.nlh      = nlh;
                        cfg->rc_mp_len = nla_len(nla);
                        break;
                }
 +              case RTA_TTL_PROPAGATE:
 +              {
 +                      u8 ttl_propagate = nla_get_u8(nla);
 +
 +                      if (ttl_propagate > 1)
 +                              goto errout;
 +                      cfg->rc_ttl_propagate = ttl_propagate ?
 +                              MPLS_TTL_PROP_ENABLED :
 +                              MPLS_TTL_PROP_DISABLED;
 +                      break;
 +              }
                default:
                        /* Unsupported attribute */
                        goto errout;
@@@ -1723,15 -1683,6 +1724,15 @@@ static int mpls_dump_route(struct sk_bu
  
        if (nla_put_labels(skb, RTA_DST, 1, &label))
                goto nla_put_failure;
 +
 +      if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
 +              bool ttl_propagate =
 +                      rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
 +
 +              if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
 +                             ttl_propagate))
 +                      goto nla_put_failure;
 +      }
        if (rt->rt_nhn == 1) {
                const struct mpls_nh *nh = rt->rt_nh;
  
@@@ -1842,8 -1793,7 +1843,8 @@@ static inline size_t lfib_nlmsg_size(st
  {
        size_t payload =
                NLMSG_ALIGN(sizeof(struct rtmsg))
 -              + nla_total_size(4);                    /* RTA_DST */
 +              + nla_total_size(4)                     /* RTA_DST */
 +              + nla_total_size(1);                    /* RTA_TTL_PROPAGATE */
  
        if (rt->rt_nhn == 1) {
                struct mpls_nh *nh = rt->rt_nh;
@@@ -1927,7 -1877,6 +1928,7 @@@ static int resize_platform_label_table(
                RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
                rt0->rt_protocol = RTPROT_KERNEL;
                rt0->rt_payload_type = MPT_IPV4;
 +              rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
                rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
                rt0->rt_nh->nh_via_alen = lo->addr_len;
                memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
                RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
                rt2->rt_protocol = RTPROT_KERNEL;
                rt2->rt_payload_type = MPT_IPV6;
 +              rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
                rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
                rt2->rt_nh->nh_via_alen = lo->addr_len;
                memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
@@@ -2023,9 -1971,6 +2024,9 @@@ static int mpls_platform_labels(struct 
        return ret;
  }
  
 +#define MPLS_NS_SYSCTL_OFFSET(field)          \
 +      (&((struct net *)0)->field)
 +
  static const struct ctl_table mpls_table[] = {
        {
                .procname       = "platform_labels",
                .mode           = 0644,
                .proc_handler   = mpls_platform_labels,
        },
 +      {
 +              .procname       = "ip_ttl_propagate",
 +              .data           = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
 +              .maxlen         = sizeof(int),
 +              .mode           = 0644,
 +              .proc_handler   = proc_dointvec_minmax,
 +              .extra1         = &zero,
 +              .extra2         = &one,
 +      },
 +      {
 +              .procname       = "default_ttl",
 +              .data           = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
 +              .maxlen         = sizeof(int),
 +              .mode           = 0644,
 +              .proc_handler   = proc_dointvec_minmax,
 +              .extra1         = &one,
 +              .extra2         = &ttl_max,
 +      },
        { }
  };
  
  static int mpls_net_init(struct net *net)
  {
        struct ctl_table *table;
 +      int i;
  
        net->mpls.platform_labels = 0;
        net->mpls.platform_label = NULL;
 +      net->mpls.ip_ttl_propagate = 1;
 +      net->mpls.default_ttl = 255;
  
        table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
        if (table == NULL)
                return -ENOMEM;
  
 -      table[0].data = net;
 +      /* Table data contains only offsets relative to the base of
 +       * the mdev at this point, so make them absolute.
 +       */
 +      for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
 +              table[i].data = (char *)net + (uintptr_t)table[i].data;
 +
        net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
        if (net->mpls.ctl == NULL) {
                kfree(table);
@@@ -2110,6 -2029,7 +2111,7 @@@ static void mpls_net_exit(struct net *n
        for (index = 0; index < platform_labels; index++) {
                struct mpls_route *rt = rtnl_dereference(platform_label[index]);
                RCU_INIT_POINTER(platform_label[index], NULL);
+               mpls_notify_route(net, index, rt, NULL, NULL);
                mpls_rt_free(rt);
        }
        rtnl_unlock();
diff --combined net/rds/ib_cm.c
index 11d535b161255124510cd4ab2a8f7dcc62c2eaae,1c38d2c7caa8e955585b45f0c9218a0775013b4d..80fb6f63e768d3461c47533615c875526bb8bab9
@@@ -442,7 -442,7 +442,7 @@@ static int rds_ib_setup_qp(struct rds_c
                ic->i_send_cq = NULL;
                ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
                rdsdebug("ib_create_cq send failed: %d\n", ret);
-               goto out;
+               goto rds_ibdev_out;
        }
  
        ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
                ic->i_recv_cq = NULL;
                ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
                rdsdebug("ib_create_cq recv failed: %d\n", ret);
-               goto out;
+               goto send_cq_out;
        }
  
        ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
        if (ret) {
                rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
-               goto out;
+               goto recv_cq_out;
        }
  
        ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
        if (ret) {
                rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
-               goto out;
+               goto recv_cq_out;
        }
  
        /* XXX negotiate max send/recv with remote? */
        ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
        if (ret) {
                rdsdebug("rdma_create_qp failed: %d\n", ret);
-               goto out;
+               goto recv_cq_out;
        }
  
        ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
        if (!ic->i_send_hdrs) {
                ret = -ENOMEM;
                rdsdebug("ib_dma_alloc_coherent send failed\n");
-               goto out;
+               goto qp_out;
        }
  
        ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
        if (!ic->i_recv_hdrs) {
                ret = -ENOMEM;
                rdsdebug("ib_dma_alloc_coherent recv failed\n");
-               goto out;
+               goto send_hdrs_dma_out;
        }
  
        ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
        if (!ic->i_ack) {
                ret = -ENOMEM;
                rdsdebug("ib_dma_alloc_coherent ack failed\n");
-               goto out;
+               goto recv_hdrs_dma_out;
        }
  
        ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
        if (!ic->i_sends) {
                ret = -ENOMEM;
                rdsdebug("send allocation failed\n");
-               goto out;
+               goto ack_dma_out;
        }
  
        ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
        if (!ic->i_recvs) {
                ret = -ENOMEM;
                rdsdebug("recv allocation failed\n");
-               goto out;
+               goto sends_out;
        }
  
        rds_ib_recv_init_ack(ic);
        rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
                 ic->i_send_cq, ic->i_recv_cq);
  
- out:
+       return ret;
+ sends_out:
+       vfree(ic->i_sends);
+ ack_dma_out:
+       ib_dma_free_coherent(dev, sizeof(struct rds_header),
+                            ic->i_ack, ic->i_ack_dma);
+ recv_hdrs_dma_out:
+       ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
+                                       sizeof(struct rds_header),
+                                       ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
+ send_hdrs_dma_out:
+       ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
+                                       sizeof(struct rds_header),
+                                       ic->i_send_hdrs, ic->i_send_hdrs_dma);
+ qp_out:
+       rdma_destroy_qp(ic->i_cm_id);
+ recv_cq_out:
+       if (!ib_destroy_cq(ic->i_recv_cq))
+               ic->i_recv_cq = NULL;
+ send_cq_out:
+       if (!ib_destroy_cq(ic->i_send_cq))
+               ic->i_send_cq = NULL;
+ rds_ibdev_out:
+       rds_ib_remove_conn(rds_ibdev, conn);
        rds_ib_dev_put(rds_ibdev);
        return ret;
  }
  
@@@ -677,8 -702,9 +702,8 @@@ int rds_ib_cm_handle_connect(struct rdm
                event->param.conn.initiator_depth);
  
        /* rdma_accept() calls rdma_reject() internally if it fails */
 -      err = rdma_accept(cm_id, &conn_param);
 -      if (err)
 -              rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
 +      if (rdma_accept(cm_id, &conn_param))
 +              rds_ib_conn_error(conn, "rdma_accept failed\n");
  
  out:
        if (conn)
diff --combined net/sctp/socket.c
index 24e28cfb542b61c08061bfa63cb1b6b8820cd68f,0f378ea2ae38828d75dc215abdbc258e75cec431..72cc3ecf6516da8776a6167396f3dc659308030a
        return retval;
  }
  
 +static int sctp_setsockopt_reconfig_supported(struct sock *sk,
 +                                            char __user *optval,
 +                                            unsigned int optlen)
 +{
 +      struct sctp_assoc_value params;
 +      struct sctp_association *asoc;
 +      int retval = -EINVAL;
 +
 +      if (optlen != sizeof(params))
 +              goto out;
 +
 +      if (copy_from_user(&params, optval, optlen)) {
 +              retval = -EFAULT;
 +              goto out;
 +      }
 +
 +      asoc = sctp_id2assoc(sk, params.assoc_id);
 +      if (asoc) {
 +              asoc->reconf_enable = !!params.assoc_value;
 +      } else if (!params.assoc_id) {
 +              struct sctp_sock *sp = sctp_sk(sk);
 +
 +              sp->ep->reconf_enable = !!params.assoc_value;
 +      } else {
 +              goto out;
 +      }
 +
 +      retval = 0;
 +
 +out:
 +      return retval;
 +}
 +
  static int sctp_setsockopt_enable_strreset(struct sock *sk,
                                           char __user *optval,
                                           unsigned int optlen)
@@@ -4071,9 -4038,6 +4071,9 @@@ static int sctp_setsockopt(struct sock 
        case SCTP_DEFAULT_PRINFO:
                retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
                break;
 +      case SCTP_RECONFIG_SUPPORTED:
 +              retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
 +              break;
        case SCTP_ENABLE_STREAM_RESET:
                retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
                break;
@@@ -4152,7 -4116,7 +4152,7 @@@ static int sctp_disconnect(struct sock 
   * descriptor will be returned from accept() to represent the newly
   * formed association.
   */
- static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
+ static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
  {
        struct sctp_sock *sp;
        struct sctp_endpoint *ep;
         */
        asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
  
-       newsk = sp->pf->create_accept_sk(sk, asoc);
+       newsk = sp->pf->create_accept_sk(sk, asoc, kern);
        if (!newsk) {
                error = -ENOMEM;
                goto out;
        return retval;
  }
  
 +static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
 +                                            char __user *optval,
 +                                            int __user *optlen)
 +{
 +      struct sctp_assoc_value params;
 +      struct sctp_association *asoc;
 +      int retval = -EFAULT;
 +
 +      if (len < sizeof(params)) {
 +              retval = -EINVAL;
 +              goto out;
 +      }
 +
 +      len = sizeof(params);
 +      if (copy_from_user(&params, optval, len))
 +              goto out;
 +
 +      asoc = sctp_id2assoc(sk, params.assoc_id);
 +      if (asoc) {
 +              params.assoc_value = asoc->reconf_enable;
 +      } else if (!params.assoc_id) {
 +              struct sctp_sock *sp = sctp_sk(sk);
 +
 +              params.assoc_value = sp->ep->reconf_enable;
 +      } else {
 +              retval = -EINVAL;
 +              goto out;
 +      }
 +
 +      if (put_user(len, optlen))
 +              goto out;
 +
 +      if (copy_to_user(optval, &params, len))
 +              goto out;
 +
 +      retval = 0;
 +
 +out:
 +      return retval;
 +}
 +
  static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
                                           char __user *optval,
                                           int __user *optlen)
@@@ -6825,10 -6748,6 +6825,10 @@@ static int sctp_getsockopt(struct sock 
                retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
                                                        optlen);
                break;
 +      case SCTP_RECONFIG_SUPPORTED:
 +              retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
 +                                                          optlen);
 +              break;
        case SCTP_ENABLE_STREAM_RESET:
                retval = sctp_getsockopt_enable_strreset(sk, len, optval,
                                                         optlen);