LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
* The WQ create will allocate the ring.
*/
+ /*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
/*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
}
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->enab_exp_wqcq_pages)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->enab_exp_wqcq_pages) {
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ wqesize,
+ LPFC_WQE_EXP_COUNT);
+ } else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
/*
- * Issue IOs with CDB embedded in WQE to minimized the number
- * of DMAs the firmware has to do. Setting this to 1 also forces
- * the driver to use 128 bytes WQEs for FCP IOs.
+ * Check whether the adapter supports an embedded copy of the
+ * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
+ * to use this option, 128-byte WQEs must be used.
*/
if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
phba->fcp_embed_io = 1;
else
phba->fcp_embed_io = 0;
+ if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
+ (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
+ (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
+ phba->enab_exp_wqcq_pages = 1;
+ else
+ phba->enab_exp_wqcq_pages = 0;
/*
* Check if the SLI port supports MDS Diagnostics
*/
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
- /*
- * Bring down the SLI Layer. This step disables all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA FCoE function.
- */
- lpfc_debugfs_terminate(vport);
- lpfc_sli4_hba_unset(phba);
/* Perform ndlp cleanup on the physical port. The nvme and nvmet
* localports are destroyed after to cleanup all transport memory.
lpfc_nvmet_destroy_targetport(phba);
lpfc_nvme_destroy_localport(vport);
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->enab_exp_wqcq_pages)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
+ if (phba->enab_exp_wqcq_pages) {
+ wqesize = (phba->fcp_embed_io) ?
LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ wqesize,
+ LPFC_WQE_EXP_COUNT);
+ } else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;