/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
!(phba->lmt & LMT_16Gb)) ||
((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
- !(phba->lmt & LMT_32Gb))) {
+ !(phba->lmt & LMT_32Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
+ !(phba->lmt & LMT_64Gb))) {
/* Reset link speed to auto */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1302 Invalid speed for this board:%d "
struct lpfc_sli_ring *pring;
LIST_HEAD(completions);
int i;
+ struct lpfc_iocbq *piocb, *next_iocb;
if (phba->sli_rev != LPFC_SLI_REV4) {
for (i = 0; i < psli->num_rings; i++) {
if (!pring)
continue;
spin_lock_irq(&pring->ring_lock);
+ list_for_each_entry_safe(piocb, next_iocb,
+ &pring->txcmplq, list)
+ piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&pring->ring_lock);
LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
int rc;
uint32_t intr_mode;
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
/*
* On error status condition, driver need to wait for port
break;
case LPFC_SLI_INTF_IF_TYPE_2:
+ case LPFC_SLI_INTF_IF_TYPE_6:
pci_rd_rc1 = lpfc_readl(
phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
&& descp && descp[0] != '\0')
return;
- if (phba->lmt & LMT_32Gb)
+ if (phba->lmt & LMT_64Gb)
+ max_speed = 64;
+ else if (phba->lmt & LMT_32Gb)
max_speed = 32;
else if (phba->lmt & LMT_16Gb)
max_speed = 16;
case PCI_DEVICE_ID_LANCER_G6_FC:
m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
break;
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
+ break;
case PCI_DEVICE_ID_SKYHAWK:
case PCI_DEVICE_ID_SKYHAWK_VF:
oneConnect = 1;
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
+ if (phba->lmt & LMT_64Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
if (phba->lmt & LMT_32Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
if (phba->lmt & LMT_16Gb)
case LPFC_FC_LA_SPEED_32G:
port_speed = 32000;
break;
+ case LPFC_FC_LA_SPEED_64G:
+ port_speed = 64000;
+ break;
default:
port_speed = 0;
}
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
* The WQ create will allocate the ring.
*/
+ /*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
/*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
return -ENOMEM;
/* IF Type 2 ports get initialized now. */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2) {
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc)) {
}
break;
case LPFC_SLI_INTF_IF_TYPE_2:
+ case LPFC_SLI_INTF_IF_TYPE_6:
/* Final checks. The port status should be clean. */
if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
®_data.word0) ||
phba->sli4_hba.WQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p +
LPFC_ULP0_WQ_DOORBELL;
- phba->sli4_hba.EQCQDBregaddr =
+ phba->sli4_hba.CQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
+ phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
phba->sli4_hba.MQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
phba->sli4_hba.BMBXregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
break;
+ case LPFC_SLI_INTF_IF_TYPE_6:
+ phba->sli4_hba.u.if_type2.EQDregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_EQ_DELAY_OFFSET;
+ phba->sli4_hba.u.if_type2.ERR1regaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
+ phba->sli4_hba.u.if_type2.ERR2regaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
+ phba->sli4_hba.u.if_type2.CTRLregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
+ phba->sli4_hba.u.if_type2.STATUSregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
+ phba->sli4_hba.PSMPHRregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
+ phba->sli4_hba.BMBXregaddr =
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
+ break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
dev_printk(KERN_ERR, &phba->pcidev->dev,
* lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
- * memory map.
+ * This routine is invoked to set up SLI4 BAR1 register memory map.
**/
static void
-lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
{
- phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
- LPFC_SLIPORT_IF0_SMPHR;
- phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
- LPFC_HST_ISR0;
- phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
- LPFC_HST_IMR0;
- phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
- LPFC_HST_ISCR0;
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ phba->sli4_hba.PSMPHRregaddr =
+ phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_SLIPORT_IF0_SMPHR;
+ phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_ISR0;
+ phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_IMR0;
+ phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+ LPFC_HST_ISCR0;
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_6:
+ phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+ LPFC_IF6_RQ_DOORBELL;
+ phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+ LPFC_IF6_WQ_DOORBELL;
+ phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+ LPFC_IF6_CQ_DOORBELL;
+ phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+ LPFC_IF6_EQ_DOORBELL;
+ phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
+ LPFC_IF6_MQ_DOORBELL;
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ dev_err(&phba->pcidev->dev,
+ "FATAL - unsupported SLI4 interface type - %d\n",
+ if_type);
+ break;
+ }
}
/**
phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
vf * LPFC_VFR_PAGE_SIZE +
LPFC_ULP0_WQ_DOORBELL);
- phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+ phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_EQCQ_DOORBELL);
+ phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
/* Update link speed if forced link speed is supported */
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
- if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
forced_link_speed =
bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
if (forced_link_speed) {
phba->cfg_link_speed =
LPFC_USER_LINK_SPEED_32G;
break;
+ case LINK_SPEED_64G:
+ phba->cfg_link_speed =
+ LPFC_USER_LINK_SPEED_64G;
+ break;
case 0xffff:
phba->cfg_link_speed =
LPFC_USER_LINK_SPEED_AUTO;
phba->cfg_hba_queue_depth = length;
}
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2)
goto read_cfg_out;
}
mempool_free(mboxq, phba->mbox_mem_pool);
break;
+ case LPFC_SLI_INTF_IF_TYPE_6:
case LPFC_SLI_INTF_IF_TYPE_2:
case LPFC_SLI_INTF_IF_TYPE_1:
default:
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
wqidx);
return 1;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->enab_exp_wqcq_pages)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
return 1;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->enab_exp_wqcq_pages) {
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ wqesize,
+ LPFC_WQE_EXP_COUNT);
+ } else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0497 Failed allocate EQ (%d)\n", idx);
goto out_error;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.hba_eq[idx] = qdesc;
}
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
"CQ Set (%d)\n", idx);
goto out_error;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
}
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0500 Failed allocate slow-path mailbox CQ\n");
goto out_error;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0501 Failed allocate slow-path ELS CQ\n");
goto out_error;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.els_cq = qdesc;
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
+ qdesc->qe_valid = 1;
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
}
break;
case LPFC_SLI_INTF_IF_TYPE_2:
+ case LPFC_SLI_INTF_IF_TYPE_6:
wait:
/*
* Poll the Port Status Register and wait for RDY for
} else {
phba->pci_bar0_map = pci_resource_start(pdev, 1);
bar0map_len = pci_resource_len(pdev, 1);
- if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
dev_printk(KERN_ERR, &pdev->dev,
"FATAL - No BAR0 mapping for SLI4, if_type 2\n");
goto out;
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a
+ * kernel virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map,
+ bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA "
+ "control registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p =
+ phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba, if_type);
+ } else {
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ }
+
+ if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
(pci_resource_start(pdev, PCI_64BIT_BAR2))) {
/*
- * Map SLI4 if type 0 HBA Control Register base to a kernel
+ * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
* virtual address and setup the registers.
*/
phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->sli4_hba.ctrl_regs_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p =
ioremap(phba->pci_bar1_map, bar1map_len);
- if (!phba->sli4_hba.ctrl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA control registers.\n");
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA doorbell registers.\n");
goto out_iounmap_conf;
}
- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
- lpfc_sli4_bar1_register_memmap(phba);
+ phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to
+ * a kernel virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map,
+ bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA"
+ " doorbell registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ } else {
+ error = -ENOMEM;
+ goto out_iounmap_all;
+ }
+ }
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
+ pci_resource_start(pdev, PCI_64BIT_BAR4)) {
/*
- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
+ * Map SLI4 if type 6 HBA DPP Register base to a kernel
* virtual address and setup the registers.
*/
phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->sli4_hba.drbl_regs_memmap_p =
+ phba->sli4_hba.dpp_regs_memmap_p =
ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->sli4_hba.drbl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA doorbell registers.\n");
+ if (!phba->sli4_hba.dpp_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA dpp registers.\n");
goto out_iounmap_ctrl;
}
- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
- if (error)
- goto out_iounmap_all;
+ phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
+ }
+
+ /* Set up the EQ/CQ register handeling functions now */
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
+ phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
+ phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_6:
+ phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
+ phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
+ phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
+ break;
+ default:
+ break;
}
return 0;
case LPFC_SLI_INTF_IF_TYPE_2:
iounmap(phba->sli4_hba.conf_regs_memmap_p);
break;
+ case LPFC_SLI_INTF_IF_TYPE_6:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
dev_printk(KERN_ERR, &phba->pcidev->dev,
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
struct lpfc_pc_sli4_params *sli4_params;
uint32_t mbox_tmo;
int length;
+ bool exp_wqcq_pages = true;
struct lpfc_sli4_parameters *mbx_sli4_parameters;
/*
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
+ sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
}
- if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
+ /* Only embed PBDE for if_type 6 */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_6) {
+ phba->fcp_embed_pbde = 1;
+ phba->nvme_embed_pbde = 1;
+ }
+
+ /* PBDE support requires xib be set */
+ if (!bf_get(cfg_xib, mbx_sli4_parameters)) {
+ phba->fcp_embed_pbde = 0;
+ phba->nvme_embed_pbde = 0;
+ }
+
+ /*
+ * To support Suppress Response feature we must satisfy 3 conditions.
+ * lpfc_suppress_rsp module parameter must be set (default).
+ * In SLI4-Parameters Descriptor:
+ * Extended Inline Buffers (XIB) must be supported.
+ * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
+ * (double negative).
+ */
+ if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
+ !(bf_get(cfg_nosr, mbx_sli4_parameters)))
phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
+ else
+ phba->cfg_suppress_rsp = 0;
if (bf_get(cfg_eqdr, mbx_sli4_parameters))
phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
/*
- * Issue IOs with CDB embedded in WQE to minimized the number
- * of DMAs the firmware has to do. Setting this to 1 also forces
- * the driver to use 128 bytes WQEs for FCP IOs.
+ * Check whether the adapter supports an embedded copy of the
+ * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
+ * to use this option, 128-byte WQEs must be used.
*/
if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
phba->fcp_embed_io = 1;
else
phba->fcp_embed_io = 0;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
+ "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
+ bf_get(cfg_xib, mbx_sli4_parameters),
+ phba->fcp_embed_pbde, phba->fcp_embed_io,
+ phba->nvme_support, phba->nvme_embed_pbde,
+ phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
+
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FAMILY_LNCR_A0))
+ exp_wqcq_pages = false;
+
+ if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
+ (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
+ exp_wqcq_pages &&
+ (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
+ phba->enab_exp_wqcq_pages = 1;
+ else
+ phba->enab_exp_wqcq_pages = 0;
/*
* Check if the SLI port supports MDS Diagnostics
*/
}
+static void
+lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
+ uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
+ const struct firmware *fw)
+{
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3030 This firmware version is not supported on "
+ "this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 FW Download failed. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+}
+
+
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
magic_number = be32_to_cpu(image->magic_number);
ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
- fid = bf_get_be32(lpfc_grp_hdr_id, image),
+ fid = bf_get_be32(lpfc_grp_hdr_id, image);
fsize = be32_to_cpu(image->size);
INIT_LIST_HEAD(&dma_buffer_list);
- if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
- magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
- ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 Invalid FW image found. "
- "Magic:%x Type:%x ID:%x Size %d %zd\n",
- magic_number, ftype, fid, fsize, fw->size);
- rc = -EINVAL;
- goto release_out;
- }
lpfc_decode_firmware_rev(phba, fwrev, 1);
if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
}
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
- if (rc)
+ if (rc) {
+ lpfc_log_write_firmware_error(phba, offset,
+ magic_number, ftype, fid, fsize, fw);
goto release_out;
+ }
}
rc = offset;
- }
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3029 Skipped Firmware update, Current "
+ "Version:%s New Version:%s\n",
+ fwrev, image->revision);
release_out:
list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
const struct firmware *fw;
/* Only supported on SLI4 interface type 2 for now */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2)
return -EPERM;
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
- /*
- * Bring down the SLI Layer. This step disables all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA FCoE function.
- */
- lpfc_debugfs_terminate(vport);
- lpfc_sli4_hba_unset(phba);
/* Perform ndlp cleanup on the physical port. The nvme and nvmet
* localports are destroyed after to cleanup all transport memory.
lpfc_nvmet_destroy_targetport(phba);
lpfc_nvme_destroy_localport(vport);
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
+ qdesc->qe_valid = 1;
phba->sli4_hba.fof_eq = qdesc;
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->enab_exp_wqcq_pages)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
+ qdesc->qe_valid = 1;
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
+ if (phba->enab_exp_wqcq_pages) {
+ wqesize = (phba->fcp_embed_io) ?
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ wqesize,
+ LPFC_WQE_EXP_COUNT);
+ } else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;