]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
block: change request end_io handler to pass back a return value
authorJens Axboe <axboe@kernel.dk>
Wed, 21 Sep 2022 21:19:54 +0000 (15:19 -0600)
committerJens Axboe <axboe@kernel.dk>
Fri, 30 Sep 2022 13:49:09 +0000 (07:49 -0600)
Everything is just converted to returning RQ_END_IO_NONE, and there
should be no functional changes with this patch.

In preparation for allowing the end_io handler to pass ownership back
to the block layer, rather than retain ownership of the request.

Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
13 files changed:
block/blk-flush.c
block/blk-mq.c
drivers/md/dm-rq.c
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c
drivers/nvme/target/passthru.c
drivers/scsi/scsi_error.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/target/target_core_pscsi.c
drivers/ufs/core/ufshpb.c
include/linux/blk-mq.h

index 27705fc584a0d59542a2dc69a3f42d9bacf358cb..53202eff545efb7097e7ae8316541edf88e18b0e 100644 (file)
@@ -217,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
        blk_kick_flush(q, fq, cmd_flags);
 }
 
-static void flush_end_io(struct request *flush_rq, blk_status_t error)
+static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
+                                      blk_status_t error)
 {
        struct request_queue *q = flush_rq->q;
        struct list_head *running;
@@ -231,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        if (!req_ref_put_and_test(flush_rq)) {
                fq->rq_status = error;
                spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
-               return;
+               return RQ_END_IO_NONE;
        }
 
        blk_account_io_flush(flush_rq);
@@ -268,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        }
 
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+       return RQ_END_IO_NONE;
 }
 
 bool is_flush_rq(struct request *rq)
@@ -353,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        blk_flush_queue_rq(flush_rq, false);
 }
 
-static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
+static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
+                                              blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
@@ -375,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 
        blk_mq_sched_restart(hctx);
+       return RQ_END_IO_NONE;
 }
 
 /**
index b32f70f38c6e264a58bce72fd6f2fd7d85b71a0a..a21631de45b3e80305f3c066f816991e20e5c3b2 100644 (file)
@@ -1001,7 +1001,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 
        if (rq->end_io) {
                rq_qos_done(rq->q, rq);
-               rq->end_io(rq, error);
+               if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+                       blk_mq_free_request(rq);
        } else {
                blk_mq_free_request(rq);
        }
@@ -1295,12 +1296,13 @@ struct blk_rq_wait {
        blk_status_t ret;
 };
 
-static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
 {
        struct blk_rq_wait *wait = rq->end_io_data;
 
        wait->ret = ret;
        complete(&wait->done);
+       return RQ_END_IO_NONE;
 }
 
 bool blk_rq_is_poll(struct request *rq)
@@ -1534,10 +1536,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 
 void blk_mq_put_rq_ref(struct request *rq)
 {
-       if (is_flush_rq(rq))
-               rq->end_io(rq, 0);
-       else if (req_ref_put_and_test(rq))
+       if (is_flush_rq(rq)) {
+               if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
+                       blk_mq_free_request(rq);
+       } else if (req_ref_put_and_test(rq)) {
                __blk_mq_free_request(rq);
+       }
 }
 
 static bool blk_mq_check_expired(struct request *rq, void *priv)
index 4f49bbcce4f1a34f2db215ac05c83c12a816e555..3001b10a3fbfba7a11eab783ab63b825f3f9cbd9 100644 (file)
@@ -292,11 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
        dm_complete_request(rq, error);
 }
 
-static void end_clone_request(struct request *clone, blk_status_t error)
+static enum rq_end_io_ret end_clone_request(struct request *clone,
+                                           blk_status_t error)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
 
        dm_complete_request(tio->orig, error);
+       return RQ_END_IO_NONE;
 }
 
 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
index 0f05b61a30fea6fc6e5eab178cca664224c06d8e..965a4c3e9d44c5d39679a45332dc237d3b4a29bb 100644 (file)
@@ -1172,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
        queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
 }
 
-static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+                                                blk_status_t status)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
        unsigned long flags;
@@ -1184,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
                dev_err(ctrl->device,
                        "failed nvme_keep_alive_end_io error=%d\n",
                                status);
-               return;
+               return RQ_END_IO_NONE;
        }
 
        ctrl->comp_seen = false;
@@ -1195,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
        spin_unlock_irqrestore(&ctrl->lock, flags);
        if (startka)
                nvme_queue_keep_alive_work(ctrl);
+       return RQ_END_IO_NONE;
 }
 
 static void nvme_keep_alive_work(struct work_struct *work)
index 357791ff0623880a340c8b4908f30b39cc199b28..2995789d5f9db61843f4506ad9a956a6274c8f37 100644 (file)
@@ -392,7 +392,8 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
        io_uring_cmd_done(ioucmd, status, result);
 }
 
-static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+                                               blk_status_t err)
 {
        struct io_uring_cmd *ioucmd = req->end_io_data;
        struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
@@ -411,6 +412,8 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
                nvme_uring_task_cb(ioucmd);
        else
                io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+       return RQ_END_IO_NONE;
 }
 
 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
index f9af99b7e6724769a087c87d798a641cb9403650..7bbffd2a9beb9e92551a0ee3bb095aee91b1cebc 100644 (file)
@@ -1268,7 +1268,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-static void abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 
@@ -1276,6 +1276,7 @@ static void abort_endio(struct request *req, blk_status_t error)
                 "Abort status: 0x%x", nvme_req(req)->status);
        atomic_inc(&nvmeq->dev->ctrl.abort_limit);
        blk_mq_free_request(req);
+       return RQ_END_IO_NONE;
 }
 
 static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -2447,22 +2448,25 @@ out_unlock:
        return result;
 }
 
-static void nvme_del_queue_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+                                            blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
        blk_mq_free_request(req);
        complete(&nvmeq->delete_done);
+       return RQ_END_IO_NONE;
 }
 
-static void nvme_del_cq_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+                                         blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
        if (error)
                set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
 
-       nvme_del_queue_end(req, error);
+       return nvme_del_queue_end(req, error);
 }
 
 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
index 94d3153bae54d092f325b2c2114486a9e1bc3071..79af5140af8bfe5cd10c0a3a7bc32fdaeae631ca 100644 (file)
@@ -245,14 +245,15 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
                nvme_passthru_end(ctrl, effects, req->cmd, status);
 }
 
-static void nvmet_passthru_req_done(struct request *rq,
-                                   blk_status_t blk_status)
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+                                                 blk_status_t blk_status)
 {
        struct nvmet_req *req = rq->end_io_data;
 
        req->cqe->result = nvme_req(rq)->result;
        nvmet_req_complete(req, nvme_req(rq)->status);
        blk_mq_free_request(rq);
+       return RQ_END_IO_NONE;
 }
 
 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
index 448748e3fba5e156243d6f0e54276739ac734c82..786fb963cf3fc96695112c47299765ec6d298392 100644 (file)
@@ -2004,9 +2004,11 @@ maybe_retry:
        }
 }
 
-static void eh_lock_door_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret eh_lock_door_done(struct request *req,
+                                           blk_status_t status)
 {
        blk_mq_free_request(req);
+       return RQ_END_IO_NONE;
 }
 
 /**
index 340b050ad28d15dc61ae4d5864e6d1f36e32d083..94c5e9a9309c8a7958e4d93d10ea9de8de80bddb 100644 (file)
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
 } Sg_device;
 
 /* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
 static int sg_finish_rem_req(Sg_request * srp);
 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -1311,7 +1311,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
  * This function is a "bottom half" handler that is called by the mid
  * level when a command is completed (or has failed).
  */
-static void
+static enum rq_end_io_ret
 sg_rq_end_io(struct request *rq, blk_status_t status)
 {
        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -1324,11 +1324,11 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
        int result, resid, done = 1;
 
        if (WARN_ON(srp->done != 0))
-               return;
+               return RQ_END_IO_NONE;
 
        sfp = srp->parentfp;
        if (WARN_ON(sfp == NULL))
-               return;
+               return RQ_END_IO_NONE;
 
        sdp = sfp->parentdp;
        if (unlikely(atomic_read(&sdp->detaching)))
@@ -1406,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
                INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
                schedule_work(&srp->ew.work);
        }
+       return RQ_END_IO_NONE;
 }
 
 static const struct file_operations sg_fops = {
index 850172a2b8f14083f80e7a4a4ad40cf8dfebdae6..55e7c07ebe4c57fa6ff84024152891bbaa81d1fd 100644 (file)
@@ -512,7 +512,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
        atomic64_dec(&STp->stats->in_flight);
 }
 
-static void st_scsi_execute_end(struct request *req, blk_status_t status)
+static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
+                                             blk_status_t status)
 {
        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
        struct st_request *SRpnt = req->end_io_data;
@@ -532,6 +533,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
 
        blk_rq_unmap_user(tmp);
        blk_mq_free_request(req);
+       return RQ_END_IO_NONE;
 }
 
 static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
index e6a967ddc08ce6d4de42edb9d9e28bc6bafb7429..8a7306e5e133af30c9e8710ae52a673fce1f4c44 100644 (file)
@@ -39,7 +39,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
 }
 
 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, blk_status_t);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
 
 /*     pscsi_attach_hba():
  *
@@ -1002,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
        return 0;
 }
 
-static void pscsi_req_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret pscsi_req_done(struct request *req,
+                                        blk_status_t status)
 {
        struct se_cmd *cmd = req->end_io_data;
        struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
@@ -1029,6 +1030,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
        }
 
        blk_mq_free_request(req);
+       return RQ_END_IO_NONE;
 }
 
 static const struct target_backend_ops pscsi_ops = {
index a1a7a1175a5a61da3f35d39d93fc196fc896529a..3d69a81c5b1783e48b5b92a0f37f3ab8af912590 100644 (file)
@@ -613,14 +613,17 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
        srgn->srgn_state = HPB_SRGN_VALID;
 }
 
-static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
+                                                  blk_status_t error)
 {
        struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
 
        ufshpb_put_req(umap_req->hpb, umap_req);
+       return RQ_END_IO_NONE;
 }
 
-static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
+                                                 blk_status_t error)
 {
        struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
        struct ufshpb_lu *hpb = map_req->hpb;
@@ -636,6 +639,7 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
        spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
 
        ufshpb_put_map_req(map_req->hpb, map_req);
+       return RQ_END_IO_NONE;
 }
 
 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
index 00a15808c137193ef74c63cbcbcee0f1ed23c784..e6fa49dd61960063b2f495886759969faa77adb1 100644 (file)
@@ -14,7 +14,12 @@ struct blk_flush_queue;
 #define BLKDEV_MIN_RQ  4
 #define BLKDEV_DEFAULT_RQ      128
 
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+enum rq_end_io_ret {
+       RQ_END_IO_NONE,
+       RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
 
 /*
  * request flags */