]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/scsi/lpfc/lpfc_init.c
scsi: lpfc: Fix 16gb hbas failing cq create.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / lpfc / lpfc_init.c
index 2b7ea7e53e12fbaeb9824439c533b311f1cfc9fb..e10808a50963878861c66e9fdb002cec683b878d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
@@ -731,7 +731,9 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
             !(phba->lmt & LMT_16Gb)) ||
            ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
-            !(phba->lmt & LMT_32Gb))) {
+            !(phba->lmt & LMT_32Gb)) ||
+           ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
+            !(phba->lmt & LMT_64Gb))) {
                /* Reset link speed to auto */
                lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                        "1302 Invalid speed for this board:%d "
@@ -958,6 +960,7 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
        struct lpfc_sli_ring *pring;
        LIST_HEAD(completions);
        int i;
+       struct lpfc_iocbq *piocb, *next_iocb;
 
        if (phba->sli_rev != LPFC_SLI_REV4) {
                for (i = 0; i < psli->num_rings; i++) {
@@ -983,6 +986,9 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
                if (!pring)
                        continue;
                spin_lock_irq(&pring->ring_lock);
+               list_for_each_entry_safe(piocb, next_iocb,
+                                        &pring->txcmplq, list)
+                       piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
                list_splice_init(&pring->txcmplq, &completions);
                pring->txcmplq_cnt = 0;
                spin_unlock_irq(&pring->ring_lock);
@@ -1034,6 +1040,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
        LIST_HEAD(nvmet_aborts);
        unsigned long iflag = 0;
        struct lpfc_sglq *sglq_entry = NULL;
+       int cnt;
 
 
        lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1097,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
        spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               cnt = 0;
                list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
                        psb->pCmd = NULL;
                        psb->status = IOSTAT_SUCCESS;
+                       cnt++;
                }
                spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+               phba->put_nvme_bufs += cnt;
                list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
                spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
 
@@ -1753,7 +1763,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
        int rc;
        uint32_t intr_mode;
 
-       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
            LPFC_SLI_INTF_IF_TYPE_2) {
                /*
                 * On error status condition, driver need to wait for port
@@ -1884,6 +1894,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
                break;
 
        case LPFC_SLI_INTF_IF_TYPE_2:
+       case LPFC_SLI_INTF_IF_TYPE_6:
                pci_rd_rc1 = lpfc_readl(
                                phba->sli4_hba.u.if_type2.STATUSregaddr,
                                &portstat_reg.word0);
@@ -2265,7 +2276,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
                && descp && descp[0] != '\0')
                return;
 
-       if (phba->lmt & LMT_32Gb)
+       if (phba->lmt & LMT_64Gb)
+               max_speed = 64;
+       else if (phba->lmt & LMT_32Gb)
                max_speed = 32;
        else if (phba->lmt & LMT_16Gb)
                max_speed = 16;
@@ -2464,6 +2477,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
        case PCI_DEVICE_ID_LANCER_G6_FC:
                m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
                break;
+       case PCI_DEVICE_ID_LANCER_G7_FC:
+               m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
+               break;
        case PCI_DEVICE_ID_SKYHAWK:
        case PCI_DEVICE_ID_SKYHAWK_VF:
                oneConnect = 1;
@@ -3339,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
                                 &phba->lpfc_nvme_buf_list_put, list) {
                list_del(&lpfc_ncmd->list);
+               phba->put_nvme_bufs--;
                dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
                              lpfc_ncmd->dma_handle);
                kfree(lpfc_ncmd);
@@ -3350,6 +3367,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
        list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
                                 &phba->lpfc_nvme_buf_list_get, list) {
                list_del(&lpfc_ncmd->list);
+               phba->get_nvme_bufs--;
                dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
                              lpfc_ncmd->dma_handle);
                kfree(lpfc_ncmd);
@@ -3754,9 +3772,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
        uint16_t i, lxri, els_xri_cnt;
        uint16_t nvme_xri_cnt, nvme_xri_max;
        LIST_HEAD(nvme_sgl_list);
-       int rc;
+       int rc, cnt;
 
        phba->total_nvme_bufs = 0;
+       phba->get_nvme_bufs = 0;
+       phba->put_nvme_bufs = 0;
 
        if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
                return 0;
@@ -3780,6 +3800,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
        spin_lock(&phba->nvme_buf_list_put_lock);
        list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
        list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+       cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+       phba->get_nvme_bufs = 0;
+       phba->put_nvme_bufs = 0;
        spin_unlock(&phba->nvme_buf_list_put_lock);
        spin_unlock_irq(&phba->nvme_buf_list_get_lock);
 
@@ -3824,6 +3847,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
        spin_lock_irq(&phba->nvme_buf_list_get_lock);
        spin_lock(&phba->nvme_buf_list_put_lock);
        list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+       phba->get_nvme_bufs = cnt;
        INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
        spin_unlock(&phba->nvme_buf_list_put_lock);
        spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -4092,6 +4116,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
                                 sizeof fc_host_symbolic_name(shost));
 
        fc_host_supported_speeds(shost) = 0;
+       if (phba->lmt & LMT_64Gb)
+               fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
        if (phba->lmt & LMT_32Gb)
                fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
        if (phba->lmt & LMT_16Gb)
@@ -4428,6 +4454,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
                case LPFC_FC_LA_SPEED_32G:
                        port_speed = 32000;
                        break;
+               case LPFC_FC_LA_SPEED_64G:
+                       port_speed = 64000;
+                       break;
                default:
                        port_speed = 0;
                }
@@ -5609,8 +5638,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
                /* Initialize the NVME buffer list used by driver for NVME IO */
                spin_lock_init(&phba->nvme_buf_list_get_lock);
                INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+               phba->get_nvme_bufs = 0;
                spin_lock_init(&phba->nvme_buf_list_put_lock);
                INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+               phba->put_nvme_bufs = 0;
        }
 
        /* Initialize the fabric iocb list */
@@ -5806,6 +5837,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        struct lpfc_mqe *mqe;
        int longs;
        int fof_vectors = 0;
+       int extra;
        uint64_t wwn;
 
        phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5859,14 +5891,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
         * The WQ create will allocate the ring.
         */
 
+       /*
+        * 1 for cmd, 1 for rsp, NVME adds an extra one
+        * for boundary conditions in its max_sgl_segment template.
+        */
+       extra = 2;
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+               extra++;
+
        /*
         * It doesn't matter what family our adapter is in, we are
         * limited to 2 Pages, 512 SGEs, for our SGL.
         * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
         */
        max_buf_size = (2 * SLI4_PAGE_SIZE);
-       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
 
        /*
         * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5939,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                 */
                phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
                                sizeof(struct fcp_rsp) +
-                               ((phba->cfg_sg_seg_cnt + 2) *
+                               ((phba->cfg_sg_seg_cnt + extra) *
                                sizeof(struct sli4_sge));
 
                /* Total SGEs for scsi_sg_list */
-               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
 
                /*
-                * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+                * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
                 * need to post 1 page for the SGL.
                 */
        }
@@ -5947,9 +5987,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
-               /* Fast-path XRI aborted CQ Event work queue list */
-               INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
        }
 
        /* This abort list used by worker thread */
@@ -5994,7 +6031,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                return -ENOMEM;
 
        /* IF Type 2 ports get initialized now. */
-       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
            LPFC_SLI_INTF_IF_TYPE_2) {
                rc = lpfc_pci_function_reset(phba);
                if (unlikely(rc)) {
@@ -7324,6 +7361,7 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
                        }
                        break;
                case LPFC_SLI_INTF_IF_TYPE_2:
+               case LPFC_SLI_INTF_IF_TYPE_6:
                        /* Final checks.  The port status should be clean. */
                        if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
                                &reg_data.word0) ||
@@ -7406,13 +7444,36 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
                phba->sli4_hba.WQDBregaddr =
                        phba->sli4_hba.conf_regs_memmap_p +
                                                LPFC_ULP0_WQ_DOORBELL;
-               phba->sli4_hba.EQCQDBregaddr =
+               phba->sli4_hba.CQDBregaddr =
                        phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
+               phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
                phba->sli4_hba.MQDBregaddr =
                        phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
                phba->sli4_hba.BMBXregaddr =
                        phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
                break;
+       case LPFC_SLI_INTF_IF_TYPE_6:
+               phba->sli4_hba.u.if_type2.EQDregaddr =
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_CTL_PORT_EQ_DELAY_OFFSET;
+               phba->sli4_hba.u.if_type2.ERR1regaddr =
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_CTL_PORT_ER1_OFFSET;
+               phba->sli4_hba.u.if_type2.ERR2regaddr =
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_CTL_PORT_ER2_OFFSET;
+               phba->sli4_hba.u.if_type2.CTRLregaddr =
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_CTL_PORT_CTL_OFFSET;
+               phba->sli4_hba.u.if_type2.STATUSregaddr =
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_CTL_PORT_STA_OFFSET;
+               phba->sli4_hba.PSMPHRregaddr =
+                       phba->sli4_hba.conf_regs_memmap_p +
+                                               LPFC_CTL_PORT_SEM_OFFSET;
+               phba->sli4_hba.BMBXregaddr =
+                       phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
+               break;
        case LPFC_SLI_INTF_IF_TYPE_1:
        default:
                dev_printk(KERN_ERR, &phba->pcidev->dev,
@@ -7426,20 +7487,43 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
- * memory map.
+ * This routine is invoked to set up SLI4 BAR1 register memory map.
  **/
 static void
-lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
 {
-       phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-               LPFC_SLIPORT_IF0_SMPHR;
-       phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-               LPFC_HST_ISR0;
-       phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-               LPFC_HST_IMR0;
-       phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-               LPFC_HST_ISCR0;
+       switch (if_type) {
+       case LPFC_SLI_INTF_IF_TYPE_0:
+               phba->sli4_hba.PSMPHRregaddr =
+                       phba->sli4_hba.ctrl_regs_memmap_p +
+                       LPFC_SLIPORT_IF0_SMPHR;
+               phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                       LPFC_HST_ISR0;
+               phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                       LPFC_HST_IMR0;
+               phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                       LPFC_HST_ISCR0;
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_6:
+               phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+                       LPFC_IF6_RQ_DOORBELL;
+               phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+                       LPFC_IF6_WQ_DOORBELL;
+               phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+                       LPFC_IF6_CQ_DOORBELL;
+               phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+                       LPFC_IF6_EQ_DOORBELL;
+               phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+                       LPFC_IF6_MQ_DOORBELL;
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_2:
+       case LPFC_SLI_INTF_IF_TYPE_1:
+       default:
+               dev_err(&phba->pcidev->dev,
+                          "FATAL - unsupported SLI4 interface type - %d\n",
+                          if_type);
+               break;
+       }
 }
 
 /**
@@ -7464,8 +7548,10 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
        phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
                                vf * LPFC_VFR_PAGE_SIZE +
                                        LPFC_ULP0_WQ_DOORBELL);
-       phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
-                               vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+       phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+                               vf * LPFC_VFR_PAGE_SIZE +
+                                       LPFC_EQCQ_DOORBELL);
+       phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
        phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
                                vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
        phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -7702,7 +7788,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 
        /* Update link speed if forced link speed is supported */
        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
-       if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+       if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
                forced_link_speed =
                        bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
                if (forced_link_speed) {
@@ -7737,6 +7823,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                                phba->cfg_link_speed =
                                        LPFC_USER_LINK_SPEED_32G;
                                break;
+                       case LINK_SPEED_64G:
+                               phba->cfg_link_speed =
+                                       LPFC_USER_LINK_SPEED_64G;
+                               break;
                        case 0xffff:
                                phba->cfg_link_speed =
                                        LPFC_USER_LINK_SPEED_AUTO;
@@ -7762,7 +7852,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                phba->cfg_hba_queue_depth = length;
        }
 
-       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
            LPFC_SLI_INTF_IF_TYPE_2)
                goto read_cfg_out;
 
@@ -7876,6 +7966,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
                }
                mempool_free(mboxq, phba->mbox_mem_pool);
                break;
+       case LPFC_SLI_INTF_IF_TYPE_6:
        case LPFC_SLI_INTF_IF_TYPE_2:
        case LPFC_SLI_INTF_IF_TYPE_1:
        default:
@@ -7936,8 +8027,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
                phba->cfg_fcp_io_channel = io_channel;
        if (phba->cfg_nvme_io_channel > io_channel)
                phba->cfg_nvme_io_channel = io_channel;
-       if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
-               phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+       if (phba->nvmet_support) {
+               if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+                       phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+       }
+       if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
 
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,20 +8053,21 @@ static int
 lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
 {
        struct lpfc_queue *qdesc;
-       int cnt;
 
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                           phba->sli4_hba.cq_ecount);
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
+                                     LPFC_CQE_EXP_COUNT);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0508 Failed allocate fast-path NVME CQ (%d)\n",
                                wqidx);
                return 1;
        }
+       qdesc->qe_valid = 1;
        phba->sli4_hba.nvme_cq[wqidx] = qdesc;
 
-       cnt = LPFC_NVME_WQSIZE;
-       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+                                     LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7990,19 +8086,37 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
        uint32_t wqesize;
 
        /* Create Fast Path FCP CQs */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
-                                       phba->sli4_hba.cq_ecount);
+       if (phba->enab_exp_wqcq_pages)
+               /* Increase the CQ size when WQEs contain an embedded cdb */
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+                                             phba->sli4_hba.cq_esize,
+                                             LPFC_CQE_EXP_COUNT);
+
+       else
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.cq_esize,
+                                             phba->sli4_hba.cq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
                return 1;
        }
+       qdesc->qe_valid = 1;
        phba->sli4_hba.fcp_cq[wqidx] = qdesc;
 
        /* Create Fast Path FCP WQs */
-       wqesize = (phba->fcp_embed_io) ?
-               LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
-       qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+       if (phba->enab_exp_wqcq_pages) {
+               /* Increase the WQ size when WQEs contain an embedded cdb */
+               wqesize = (phba->fcp_embed_io) ?
+                       LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+                                             wqesize,
+                                             LPFC_WQE_EXP_COUNT);
+       } else
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.wq_esize,
+                                             phba->sli4_hba.wq_ecount);
+
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,13 +8287,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        /* Create HBA Event Queues (EQs) */
        for (idx = 0; idx < io_channel; idx++) {
                /* Create EQs */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.eq_esize,
                                              phba->sli4_hba.eq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "0497 Failed allocate EQ (%d)\n", idx);
                        goto out_error;
                }
+               qdesc->qe_valid = 1;
                phba->sli4_hba.hba_eq[idx] = qdesc;
        }
 
@@ -8196,14 +8312,16 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        if (phba->nvmet_support) {
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
                        qdesc = lpfc_sli4_queue_alloc(phba,
-                                       phba->sli4_hba.cq_esize,
-                                       phba->sli4_hba.cq_ecount);
+                                                     LPFC_DEFAULT_PAGE_SIZE,
+                                                     phba->sli4_hba.cq_esize,
+                                                     phba->sli4_hba.cq_ecount);
                        if (!qdesc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "3142 Failed allocate NVME "
                                        "CQ Set (%d)\n", idx);
                                goto out_error;
                        }
+                       qdesc->qe_valid = 1;
                        phba->sli4_hba.nvmet_cqset[idx] = qdesc;
                }
        }
@@ -8213,23 +8331,27 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
         */
 
        /* Create slow-path Mailbox Command Complete Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
                                      phba->sli4_hba.cq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0500 Failed allocate slow-path mailbox CQ\n");
                goto out_error;
        }
+       qdesc->qe_valid = 1;
        phba->sli4_hba.mbx_cq = qdesc;
 
        /* Create slow-path ELS Complete Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.cq_esize,
                                      phba->sli4_hba.cq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0501 Failed allocate slow-path ELS CQ\n");
                goto out_error;
        }
+       qdesc->qe_valid = 1;
        phba->sli4_hba.els_cq = qdesc;
 
 
@@ -8239,7 +8361,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
        /* Create Mailbox Command Queue */
 
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.mq_esize,
                                      phba->sli4_hba.mq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8376,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
         */
 
        /* Create slow-path ELS Work Queue */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.wq_esize,
                                      phba->sli4_hba.wq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,17 +8389,20 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                /* Create NVME LS Complete Queue */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.cq_esize,
                                              phba->sli4_hba.cq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                        "6079 Failed allocate NVME LS CQ\n");
                        goto out_error;
                }
+               qdesc->qe_valid = 1;
                phba->sli4_hba.nvmels_cq = qdesc;
 
                /* Create NVME LS Work Queue */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+               qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                             phba->sli4_hba.wq_esize,
                                              phba->sli4_hba.wq_ecount);
                if (!qdesc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8418,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
         */
 
        /* Create Receive Queue for header */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.rq_esize,
                                      phba->sli4_hba.rq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8429,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
        phba->sli4_hba.hdr_rq = qdesc;
 
        /* Create Receive Queue for data */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.rq_esize,
                                      phba->sli4_hba.rq_ecount);
        if (!qdesc) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8443,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
                        /* Create NVMET Receive Queue for header */
                        qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.rq_esize,
                                                      LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
@@ -8339,6 +8469,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 
                        /* Create NVMET Receive Queue for data */
                        qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_DEFAULT_PAGE_SIZE,
                                                      phba->sli4_hba.rq_esize,
                                                      LPFC_NVMET_RQE_DEF_COUNT);
                        if (!qdesc) {
@@ -8437,13 +8568,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
        /* Release NVME CQ mapping array */
        lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
 
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
-                                       phba->cfg_nvmet_mrq);
+       if (phba->nvmet_support) {
+               lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+                                        phba->cfg_nvmet_mrq);
 
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
-                                       phba->cfg_nvmet_mrq);
-       lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
-                                       phba->cfg_nvmet_mrq);
+               lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+                                        phba->cfg_nvmet_mrq);
+               lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+                                        phba->cfg_nvmet_mrq);
+       }
 
        /* Release mailbox command work queue */
        __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8647,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
                        qidx, (uint32_t)rc);
                return rc;
        }
+       cq->chann = qidx;
 
        if (qtype != LPFC_MBOX) {
                /* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8667,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
                        /* no need to tear down cq - caller will do so */
                        return rc;
                }
+               wq->chann = qidx;
 
                /* Bind this CQ/WQ to the NVME ring */
                pring = wq->pring;
@@ -8773,6 +8908,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
                                                "rc = 0x%x\n", (uint32_t)rc);
                                goto out_destroy;
                        }
+                       phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                        "6090 NVMET CQ setup: cq-id=%d, "
                                        "parent eq-id=%d\n",
@@ -8994,19 +9131,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
                for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
                        lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
 
-       /* Unset NVMET MRQ queue */
-       if (phba->sli4_hba.nvmet_mrq_hdr) {
-               for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
-                       lpfc_rq_destroy(phba,
+       if (phba->nvmet_support) {
+               /* Unset NVMET MRQ queue */
+               if (phba->sli4_hba.nvmet_mrq_hdr) {
+                       for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+                               lpfc_rq_destroy(
+                                       phba,
                                        phba->sli4_hba.nvmet_mrq_hdr[qidx],
                                        phba->sli4_hba.nvmet_mrq_data[qidx]);
-       }
+               }
 
-       /* Unset NVMET CQ Set complete queue */
-       if (phba->sli4_hba.nvmet_cqset) {
-               for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
-                       lpfc_cq_destroy(phba,
-                                       phba->sli4_hba.nvmet_cqset[qidx]);
+               /* Unset NVMET CQ Set complete queue */
+               if (phba->sli4_hba.nvmet_cqset) {
+                       for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+                               lpfc_cq_destroy(
+                                       phba, phba->sli4_hba.nvmet_cqset[qidx]);
+               }
        }
 
        /* Unset FCP response complete queue */
@@ -9175,11 +9315,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
        /* Pending ELS XRI abort events */
        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
                         &cqelist);
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               /* Pending NVME XRI abort events */
-               list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
-                                &cqelist);
-       }
        /* Pending asynnc events */
        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
                         &cqelist);
@@ -9250,6 +9385,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
                }
                break;
        case LPFC_SLI_INTF_IF_TYPE_2:
+       case LPFC_SLI_INTF_IF_TYPE_6:
 wait:
                /*
                 * Poll the Port Status Register and wait for RDY for
@@ -9405,7 +9541,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
        } else {
                phba->pci_bar0_map = pci_resource_start(pdev, 1);
                bar0map_len = pci_resource_len(pdev, 1);
-               if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+               if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
                        dev_printk(KERN_ERR, &pdev->dev,
                           "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
                        goto out;
@@ -9421,44 +9557,116 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
                lpfc_sli4_bar0_register_memmap(phba, if_type);
        }
 
-       if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
+       if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+               if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+                       /*
+                        * Map SLI4 if type 0 HBA Control Register base to a
+                        * kernel virtual address and setup the registers.
+                        */
+                       phba->pci_bar1_map = pci_resource_start(pdev,
+                                                               PCI_64BIT_BAR2);
+                       bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+                       phba->sli4_hba.ctrl_regs_memmap_p =
+                                       ioremap(phba->pci_bar1_map,
+                                               bar1map_len);
+                       if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+                               dev_err(&pdev->dev,
+                                          "ioremap failed for SLI4 HBA "
+                                           "control registers.\n");
+                               error = -ENOMEM;
+                               goto out_iounmap_conf;
+                       }
+                       phba->pci_bar2_memmap_p =
+                                        phba->sli4_hba.ctrl_regs_memmap_p;
+                       lpfc_sli4_bar1_register_memmap(phba, if_type);
+               } else {
+                       error = -ENOMEM;
+                       goto out_iounmap_conf;
+               }
+       }
+
+       if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
            (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
                /*
-                * Map SLI4 if type 0 HBA Control Register base to a kernel
+                * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
                 * virtual address and setup the registers.
                 */
                phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
                bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
-               phba->sli4_hba.ctrl_regs_memmap_p =
+               phba->sli4_hba.drbl_regs_memmap_p =
                                ioremap(phba->pci_bar1_map, bar1map_len);
-               if (!phba->sli4_hba.ctrl_regs_memmap_p) {
-                       dev_printk(KERN_ERR, &pdev->dev,
-                          "ioremap failed for SLI4 HBA control registers.\n");
+               if (!phba->sli4_hba.drbl_regs_memmap_p) {
+                       dev_err(&pdev->dev,
+                          "ioremap failed for SLI4 HBA doorbell registers.\n");
                        goto out_iounmap_conf;
                }
-               phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
-               lpfc_sli4_bar1_register_memmap(phba);
+               phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
+               lpfc_sli4_bar1_register_memmap(phba, if_type);
        }
 
-       if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
-           (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
+       if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+               if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+                       /*
+                        * Map SLI4 if type 0 HBA Doorbell Register base to
+                        * a kernel virtual address and setup the registers.
+                        */
+                       phba->pci_bar2_map = pci_resource_start(pdev,
+                                                               PCI_64BIT_BAR4);
+                       bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+                       phba->sli4_hba.drbl_regs_memmap_p =
+                                       ioremap(phba->pci_bar2_map,
+                                               bar2map_len);
+                       if (!phba->sli4_hba.drbl_regs_memmap_p) {
+                               dev_err(&pdev->dev,
+                                          "ioremap failed for SLI4 HBA"
+                                          " doorbell registers.\n");
+                               error = -ENOMEM;
+                               goto out_iounmap_ctrl;
+                       }
+                       phba->pci_bar4_memmap_p =
+                                       phba->sli4_hba.drbl_regs_memmap_p;
+                       error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+                       if (error)
+                               goto out_iounmap_all;
+               } else {
+                       error = -ENOMEM;
+                       goto out_iounmap_all;
+               }
+       }
+
+       if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
+           pci_resource_start(pdev, PCI_64BIT_BAR4)) {
                /*
-                * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
+                * Map SLI4 if type 6 HBA DPP Register base to a kernel
                 * virtual address and setup the registers.
                 */
                phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
                bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
-               phba->sli4_hba.drbl_regs_memmap_p =
+               phba->sli4_hba.dpp_regs_memmap_p =
                                ioremap(phba->pci_bar2_map, bar2map_len);
-               if (!phba->sli4_hba.drbl_regs_memmap_p) {
-                       dev_printk(KERN_ERR, &pdev->dev,
-                          "ioremap failed for SLI4 HBA doorbell registers.\n");
+               if (!phba->sli4_hba.dpp_regs_memmap_p) {
+                       dev_err(&pdev->dev,
+                          "ioremap failed for SLI4 HBA dpp registers.\n");
                        goto out_iounmap_ctrl;
                }
-               phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
-               error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
-               if (error)
-                       goto out_iounmap_all;
+               phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
+       }
+
+       /* Set up the EQ/CQ register handeling functions now */
+       switch (if_type) {
+       case LPFC_SLI_INTF_IF_TYPE_0:
+       case LPFC_SLI_INTF_IF_TYPE_2:
+               phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
+               phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
+               phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
+               break;
+       case LPFC_SLI_INTF_IF_TYPE_6:
+               phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
+               phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
+               phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
+               break;
+       default:
+               break;
        }
 
        return 0;
@@ -9495,6 +9703,10 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
        case LPFC_SLI_INTF_IF_TYPE_2:
                iounmap(phba->sli4_hba.conf_regs_memmap_p);
                break;
+       case LPFC_SLI_INTF_IF_TYPE_6:
+               iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+               iounmap(phba->sli4_hba.conf_regs_memmap_p);
+               break;
        case LPFC_SLI_INTF_IF_TYPE_1:
        default:
                dev_printk(KERN_ERR, &phba->pcidev->dev,
@@ -10093,6 +10305,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
        int fcp_xri_cmpl = 1;
        int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
 
+       /* Driver just aborted IOs during the hba_unset process.  Pause
+        * here to give the HBA time to complete the IO and get entries
+        * into the abts lists.
+        */
+       msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+       /* Wait for NVME pending IO to flush back to transport. */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+               lpfc_nvme_wait_for_io_drain(phba);
+
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
                fcp_xri_cmpl =
                        list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10311,6 +10533,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        struct lpfc_pc_sli4_params *sli4_params;
        uint32_t mbox_tmo;
        int length;
+       bool exp_wqcq_pages = true;
        struct lpfc_sli4_parameters *mbx_sli4_parameters;
 
        /*
@@ -10354,6 +10577,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
        sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
        sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+       sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
+       sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
        sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
        sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
                                            mbx_sli4_parameters);
@@ -10369,7 +10594,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
            !phba->nvme_support) {
                phba->nvme_support = 0;
                phba->nvmet_support = 0;
-               phba->cfg_nvmet_mrq = 0;
+               phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
                phba->cfg_nvme_io_channel = 0;
                phba->io_channel_irqs = phba->cfg_fcp_io_channel;
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -10384,8 +10609,32 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
        }
 
-       if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
+       /* Only embed PBDE for if_type 6 */
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+           LPFC_SLI_INTF_IF_TYPE_6) {
+               phba->fcp_embed_pbde = 1;
+               phba->nvme_embed_pbde = 1;
+       }
+
+       /* PBDE support requires xib be set */
+       if (!bf_get(cfg_xib, mbx_sli4_parameters)) {
+               phba->fcp_embed_pbde = 0;
+               phba->nvme_embed_pbde = 0;
+       }
+
+       /*
+        * To support Suppress Response feature we must satisfy 3 conditions.
+        * lpfc_suppress_rsp module parameter must be set (default).
+        * In SLI4-Parameters Descriptor:
+        * Extended Inline Buffers (XIB) must be supported.
+        * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
+        * (double negative).
+        */
+       if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
+           !(bf_get(cfg_nosr, mbx_sli4_parameters)))
                phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
+       else
+               phba->cfg_suppress_rsp = 0;
 
        if (bf_get(cfg_eqdr, mbx_sli4_parameters))
                phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
@@ -10395,15 +10644,35 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
 
        /*
-        * Issue IOs with CDB embedded in WQE to minimized the number
-        * of DMAs the firmware has to do. Setting this to 1 also forces
-        * the driver to use 128 bytes WQEs for FCP IOs.
+        * Check whether the adapter supports an embedded copy of the
+        * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
+        * to use this option, 128-byte WQEs must be used.
         */
        if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
                phba->fcp_embed_io = 1;
        else
                phba->fcp_embed_io = 0;
 
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
+                       "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
+                       bf_get(cfg_xib, mbx_sli4_parameters),
+                       phba->fcp_embed_pbde, phba->fcp_embed_io,
+                       phba->nvme_support, phba->nvme_embed_pbde,
+                       phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
+
+       if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+           LPFC_SLI_INTF_IF_TYPE_2) &&
+           (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+                LPFC_SLI_INTF_FAMILY_LNCR_A0))
+               exp_wqcq_pages = false;
+
+       if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
+           (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
+           exp_wqcq_pages &&
+           (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
+               phba->enab_exp_wqcq_pages = 1;
+       else
+               phba->enab_exp_wqcq_pages = 0;
        /*
         * Check if the SLI port supports MDS Diagnostics
         */
@@ -11056,6 +11325,27 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
 }
 
 
+static void
+lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
+       uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
+       const struct firmware *fw)
+{
+       if (offset == ADD_STATUS_FW_NOT_SUPPORTED)
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3030 This firmware version is not supported on "
+                       "this HBA model. Device:%x Magic:%x Type:%x "
+                       "ID:%x Size %d %zd\n",
+                       phba->pcidev->device, magic_number, ftype, fid,
+                       fsize, fw->size);
+       else
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3022 FW Download failed. Device:%x Magic:%x Type:%x "
+                       "ID:%x Size %d %zd\n",
+                       phba->pcidev->device, magic_number, ftype, fid,
+                       fsize, fw->size);
+}
+
+
 /**
  * lpfc_write_firmware - attempt to write a firmware image to the port
  * @fw: pointer to firmware image returned from request_firmware.
@@ -11083,20 +11373,10 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
 
        magic_number = be32_to_cpu(image->magic_number);
        ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
-       fid = bf_get_be32(lpfc_grp_hdr_id, image),
+       fid = bf_get_be32(lpfc_grp_hdr_id, image);
        fsize = be32_to_cpu(image->size);
 
        INIT_LIST_HEAD(&dma_buffer_list);
-       if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
-            magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
-           ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "3022 Invalid FW image found. "
-                               "Magic:%x Type:%x ID:%x Size %d %zd\n",
-                               magic_number, ftype, fid, fsize, fw->size);
-               rc = -EINVAL;
-               goto release_out;
-       }
        lpfc_decode_firmware_rev(phba, fwrev, 1);
        if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11137,11 +11417,18 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
                        }
                        rc = lpfc_wr_object(phba, &dma_buffer_list,
                                    (fw->size - offset), &offset);
-                       if (rc)
+                       if (rc) {
+                               lpfc_log_write_firmware_error(phba, offset,
+                                       magic_number, ftype, fid, fsize, fw);
                                goto release_out;
+                       }
                }
                rc = offset;
-       }
+       } else
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3029 Skipped Firmware update, Current "
+                               "Version:%s New Version:%s\n",
+                               fwrev, image->revision);
 
 release_out:
        list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
@@ -11172,7 +11459,7 @@ lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
        const struct firmware *fw;
 
        /* Only supported on SLI4 interface type 2 for now */
-       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+       if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
            LPFC_SLI_INTF_IF_TYPE_2)
                return -EPERM;
 
@@ -11412,13 +11699,6 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
        /* Remove FC host and then SCSI host with the physical port */
        fc_remove_host(shost);
        scsi_remove_host(shost);
-       /*
-        * Bring down the SLI Layer. This step disables all interrupts,
-        * clears the rings, discards all mailbox commands, and resets
-        * the HBA FCoE function.
-        */
-       lpfc_debugfs_terminate(vport);
-       lpfc_sli4_hba_unset(phba);
 
        /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
         * localports are destroyed after to cleanup all transport memory.
@@ -11427,6 +11707,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
        lpfc_nvmet_destroy_targetport(phba);
        lpfc_nvme_destroy_localport(vport);
 
+       /*
+        * Bring down the SLI Layer. This step disables all interrupts,
+        * clears the rings, discards all mailbox commands, and resets
+        * the HBA FCoE function.
+        */
+       lpfc_debugfs_terminate(vport);
+       lpfc_sli4_hba_unset(phba);
 
        lpfc_stop_hba_timers(phba);
        spin_lock_irq(&phba->hbalock);
@@ -11616,6 +11903,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Flush all driver's outstanding SCSI I/Os as we are to reset */
        lpfc_sli_flush_fcp_rings(phba);
 
+       /* Flush the outstanding NVME IOs if fc4 type enabled. */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+               lpfc_sli_flush_nvme_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
@@ -11647,6 +11938,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
 
        /* Clean up all driver's outstanding SCSI I/Os */
        lpfc_sli_flush_fcp_rings(phba);
+
+       /* Flush the outstanding NVME IOs if fc4 type enabled. */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+               lpfc_sli_flush_nvme_rings(phba);
 }
 
 /**
@@ -12141,28 +12436,47 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
        uint32_t wqesize;
 
        /* Create FOF EQ */
-       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+       qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+                                     phba->sli4_hba.eq_esize,
                                      phba->sli4_hba.eq_ecount);
        if (!qdesc)
                goto out_error;
 
+       qdesc->qe_valid = 1;
        phba->sli4_hba.fof_eq = qdesc;
 
        if (phba->cfg_fof) {
 
                /* Create OAS CQ */
-               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+               if (phba->enab_exp_wqcq_pages)
+                       qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_EXPANDED_PAGE_SIZE,
+                                                     phba->sli4_hba.cq_esize,
+                                                     LPFC_CQE_EXP_COUNT);
+               else
+                       qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_DEFAULT_PAGE_SIZE,
+                                                     phba->sli4_hba.cq_esize,
                                                      phba->sli4_hba.cq_ecount);
                if (!qdesc)
                        goto out_error;
 
+               qdesc->qe_valid = 1;
                phba->sli4_hba.oas_cq = qdesc;
 
                /* Create OAS WQ */
-               wqesize = (phba->fcp_embed_io) ?
+               if (phba->enab_exp_wqcq_pages) {
+                       wqesize = (phba->fcp_embed_io) ?
                                LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
-               qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
-                                             phba->sli4_hba.wq_ecount);
+                       qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_EXPANDED_PAGE_SIZE,
+                                                     wqesize,
+                                                     LPFC_WQE_EXP_COUNT);
+               } else
+                       qdesc = lpfc_sli4_queue_alloc(phba,
+                                                     LPFC_DEFAULT_PAGE_SIZE,
+                                                     phba->sli4_hba.wq_esize,
+                                                     phba->sli4_hba.wq_ecount);
 
                if (!qdesc)
                        goto out_error;