]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
nvme: don't pass the full CQE to nvme_complete_async_event
authorChristoph Hellwig <hch@lst.de>
Thu, 10 Nov 2016 15:32:34 +0000 (07:32 -0800)
committerJens Axboe <axboe@fb.com>
Thu, 10 Nov 2016 17:06:26 +0000 (10:06 -0700)
We only need the status and result fields, and passing them explicitly
makes life a lot easier for the Fibre Channel transport which doesn't
have a full CQE for the fast path case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c

index 2fd632bcd97501b3f1076bb3dfac31c0567e7203..53584d21c805222a5466a5777d44846e9abae8cd 100644 (file)
@@ -1895,18 +1895,25 @@ static void nvme_async_event_work(struct work_struct *work)
        spin_unlock_irq(&ctrl->lock);
 }
 
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
-               struct nvme_completion *cqe)
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+               union nvme_result *res)
 {
-       u16 status = le16_to_cpu(cqe->status) >> 1;
-       u32 result = le32_to_cpu(cqe->result.u32);
+       u32 result = le32_to_cpu(res->u32);
+       bool done = true;
 
-       if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+       switch (le16_to_cpu(status) >> 1) {
+       case NVME_SC_SUCCESS:
+               done = false;
+               /*FALLTHRU*/
+       case NVME_SC_ABORT_REQ:
                ++ctrl->event_limit;
                schedule_work(&ctrl->async_event_work);
+               break;
+       default:
+               break;
        }
 
-       if (status != NVME_SC_SUCCESS)
+       if (done)
                return;
 
        switch (result & 0xff07) {
index 5e64957a9b96ecd3808e9c59d96a37df4f9dfab0..468fc445bf3512dd764cba899fe9dc7790505170 100644 (file)
@@ -275,8 +275,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl);
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
 #define NVME_NR_AERS   1
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
-               struct nvme_completion *cqe);
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+               union nvme_result *res);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
index de8e0505d9797843d5ec60557fab5b5e717aa01b..51d13d5ec7a84b613a351944a045389d30387b1f 100644 (file)
@@ -703,7 +703,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
                 */
                if (unlikely(nvmeq->qid == 0 &&
                                cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
-                       nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
+                       nvme_complete_async_event(&nvmeq->dev->ctrl,
+                                       cqe.status, &cqe.result);
                        continue;
                }
 
index 0b8a161cf8810593630d41b1c987b18d141614f5..c4700efc03906dabeccf44a3cae0060803fcddb0 100644 (file)
@@ -1168,7 +1168,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
         */
        if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
                        cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
-               nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+               nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+                               &cqe->result);
        else
                ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
        ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
index 757e21a31128f20f68c01c44929024ddb6bc6687..26aa3a5afb0dbf0b79a13cf606930af9df49479d 100644 (file)
@@ -127,7 +127,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
         */
        if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
                        cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
-               nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+               nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
+                               &cqe->result);
        } else {
                struct request *rq = blk_mq_rq_from_pdu(iod);