]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/nvme/host/core.c
nvme: remove the GENHD_FL_UP check in nvme_ns_remove
[mirror_ubuntu-jammy-kernel.git] / drivers / nvme / host / core.c
index 80c656dcbbacfebf8c68c1840bd42cd3c94ee257..dbe7144f0026cf9e73709555863fc781e4f27710 100644 (file)
@@ -609,6 +609,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
 
 static inline void nvme_clear_nvme_request(struct request *req)
 {
+       nvme_req(req)->status = 0;
        nvme_req(req)->retries = 0;
        nvme_req(req)->flags = 0;
        req->rq_flags |= RQF_DONTPREP;
@@ -631,6 +632,8 @@ static inline void nvme_init_request(struct request *req,
        cmd->common.flags &= ~NVME_CMD_SGL_ALL;
 
        req->cmd_flags |= REQ_FAILFAST_DRIVER;
+       if (req->mq_hctx->type == HCTX_TYPE_POLL)
+               req->cmd_flags |= REQ_HIPRI;
        nvme_clear_nvme_request(req);
        memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
 }
@@ -897,7 +900,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
                cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
        cmnd->write_zeroes.length =
                cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-       cmnd->write_zeroes.control = 0;
+       if (nvme_ns_has_pi(ns))
+               cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
+       else
+               cmnd->write_zeroes.control = 0;
        return BLK_STS_OK;
 }
 
@@ -1029,29 +1035,23 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
 
-static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
-{
-       struct completion *waiting = rq->end_io_data;
-
-       rq->end_io_data = NULL;
-       complete(waiting);
-}
-
-static void nvme_execute_rq_polled(struct request_queue *q,
-               struct gendisk *bd_disk, struct request *rq, int at_head)
+/*
+ * Return values:
+ * 0:  success
+ * >0: nvme controller's cqe status response
+ * <0: kernel error in lieu of controller response
+ */
+static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
+               bool at_head)
 {
-       DECLARE_COMPLETION_ONSTACK(wait);
-
-       WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
-
-       rq->cmd_flags |= REQ_HIPRI;
-       rq->end_io_data = &wait;
-       blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
+       blk_status_t status;
 
-       while (!completion_done(&wait)) {
-               blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
-               cond_resched();
-       }
+       status = blk_execute_rq(disk, rq, at_head);
+       if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+               return -EINTR;
+       if (nvme_req(rq)->status)
+               return nvme_req(rq)->status;
+       return blk_status_to_errno(status);
 }
 
 /*
@@ -1061,7 +1061,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                union nvme_result *result, void *buffer, unsigned bufflen,
                unsigned timeout, int qid, int at_head,
-               blk_mq_req_flags_t flags, bool poll)
+               blk_mq_req_flags_t flags)
 {
        struct request *req;
        int ret;
@@ -1082,16 +1082,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out;
        }
 
-       if (poll)
-               nvme_execute_rq_polled(req->q, NULL, req, at_head);
-       else
-               blk_execute_rq(NULL, req, at_head);
-       if (result)
+       ret = nvme_execute_rq(NULL, req, at_head);
+       if (result && ret >= 0)
                *result = nvme_req(req)->result;
-       if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-               ret = -EINTR;
-       else
-               ret = nvme_req(req)->status;
  out:
        blk_mq_free_request(req);
        return ret;
@@ -1102,7 +1095,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buffer, unsigned bufflen)
 {
        return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
-                       NVME_QID_ANY, 0, 0, false);
+                       NVME_QID_ANY, 0, 0);
 }
 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
@@ -1179,18 +1172,21 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
        }
 }
 
-void nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq)
 {
        struct nvme_command *cmd = nvme_req(rq)->cmd;
        struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
        struct nvme_ns *ns = rq->q->queuedata;
        struct gendisk *disk = ns ? ns->disk : NULL;
        u32 effects;
+       int  ret;
 
        effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
-       blk_execute_rq(disk, rq, 0);
+       ret = nvme_execute_rq(disk, rq, false);
        if (effects) /* nothing to be done for zero cmd effects */
                nvme_passthru_end(ctrl, effects);
+
+       return ret;
 }
 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
 
@@ -1465,7 +1461,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
        c.features.dword11 = cpu_to_le32(dword11);
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
-                       buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
+                       buffer, buflen, 0, NVME_QID_ANY, 0, 0);
        if (ret >= 0 && result)
                *result = le32_to_cpu(res.u32);
        return ret;
@@ -1894,7 +1890,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_stack_limits(&ns->head->disk->queue->limits,
                                 &ns->queue->limits, 0);
-               blk_queue_update_readahead(ns->head->disk->queue);
+               disk_update_readahead(ns->head->disk);
                blk_mq_unfreeze_queue(ns->head->disk->queue);
        }
        return 0;
@@ -2047,7 +2043,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
        cmd.common.cdw11 = cpu_to_le32(len);
 
        return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
-                       NVME_QID_ANY, 1, 0, false);
+                       NVME_QID_ANY, 1, 0);
 }
 EXPORT_SYMBOL_GPL(nvme_sec_submit);
 #endif /* CONFIG_BLK_SED_OPAL */
@@ -3814,6 +3810,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
+       bool last_path = false;
+
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -3822,28 +3820,32 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        mutex_lock(&ns->ctrl->subsys->lock);
        list_del_rcu(&ns->siblings);
-       if (list_empty(&ns->head->list))
-               list_del_init(&ns->head->entry);
        mutex_unlock(&ns->ctrl->subsys->lock);
 
        synchronize_rcu(); /* guarantee not available in head->list */
        nvme_mpath_clear_current_path(ns);
        synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
 
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (!nvme_ns_head_multipath(ns->head))
-                       nvme_cdev_del(&ns->cdev, &ns->cdev_device);
-               del_gendisk(ns->disk);
-               blk_cleanup_queue(ns->queue);
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
-       }
+       if (!nvme_ns_head_multipath(ns->head))
+               nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+       del_gendisk(ns->disk);
+       blk_cleanup_queue(ns->queue);
+       if (blk_get_integrity(ns->disk))
+               blk_integrity_unregister(ns->disk);
 
        down_write(&ns->ctrl->namespaces_rwsem);
        list_del_init(&ns->list);
        up_write(&ns->ctrl->namespaces_rwsem);
 
-       nvme_mpath_check_last_path(ns);
+       /* Synchronize with nvme_init_ns_head() */
+       mutex_lock(&ns->head->subsys->lock);
+       if (list_empty(&ns->head->list)) {
+               list_del_init(&ns->head->entry);
+               last_path = true;
+       }
+       mutex_unlock(&ns->head->subsys->lock);
+       if (last_path)
+               nvme_mpath_shutdown_disk(ns->head);
        nvme_put_ns(ns);
 }