atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
- spin_lock(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
tot = phba->sli4_hba.nvmet_xri_cnt -
- phba->sli4_hba.nvmet_ctx_cnt;
- spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
len += snprintf(buf + len, PAGE_SIZE - len,
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
"CTX Outstanding %08llx\n",
- phba->sli4_hba.nvmet_ctx_cnt,
+ phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
phba->sli4_hba.nvmet_io_wait_total,
tot);
/* Check outstanding IO count */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
if (phba->nvmet_support) {
- spin_lock(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
tot = phba->sli4_hba.nvmet_xri_cnt -
- phba->sli4_hba.nvmet_ctx_cnt;
- spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
} else {
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(
/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
- spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/*
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
- spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_list);
- phba->sli4_hba.nvmet_ctx_cnt++;
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
+ phba->sli4_hba.nvmet_ctx_put_cnt++;
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
#endif
}
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
unsigned long flags;
- list_for_each_entry_safe(
- ctx_buf, next_ctx_buf,
- &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
- spin_lock_irqsave(
- &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+ spin_lock_irq(&phba->sli4_hba.nvmet_ctx_put_lock);
+ list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+ spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
- spin_unlock_irqrestore(
- &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
- spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
- flags);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
+ list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
+ spin_lock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_del_init(&ctx_buf->list);
+ spin_unlock_irq(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ __lpfc_clear_active_sglq(phba,
+ ctx_buf->sglq->sli4_lxritag);
+ ctx_buf->sglq->state = SGL_FREED;
+ ctx_buf->sglq->ndlp = NULL;
+
+ spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
+ list_add_tail(&ctx_buf->sglq->list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
+
+ lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+ kfree(ctx_buf->context);
+ }
+ spin_unlock_irq(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
}
static int
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
- spin_lock(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_list);
- spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
}
- phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
+ phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
return 0;
}
goto dropit;
}
- spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
- if (phba->sli4_hba.nvmet_ctx_cnt) {
- list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+ if (phba->sli4_hba.nvmet_ctx_get_cnt) {
+ list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
- phba->sli4_hba.nvmet_ctx_cnt--;
+ phba->sli4_hba.nvmet_ctx_get_cnt--;
+ } else {
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ if (phba->sli4_hba.nvmet_ctx_put_cnt) {
+ list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
+ phba->sli4_hba.nvmet_ctx_get_cnt =
+ phba->sli4_hba.nvmet_ctx_put_cnt;
+ phba->sli4_hba.nvmet_ctx_put_cnt = 0;
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+
+ list_remove_head(
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+ ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+ phba->sli4_hba.nvmet_ctx_get_cnt--;
+ } else {
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ }
}
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
uint16_t scsi_xri_start;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
- uint16_t nvmet_ctx_cnt;
+ uint16_t nvmet_ctx_get_cnt;
+ uint16_t nvmet_ctx_put_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
- struct list_head lpfc_nvmet_ctx_list;
+ struct list_head lpfc_nvmet_ctx_get_list;
+ struct list_head lpfc_nvmet_ctx_put_list;
struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
- spinlock_t nvmet_io_lock;
+ spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
+ spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port;