*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
+ blk_qc_t unused;
+
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
* bypass a potential scheduler on the bottom device for
* insert.
*/
- return blk_mq_request_issue_directly(rq, true);
+ return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
return ret;
}
-static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
bool bypass, bool last)
return ret;
}
-blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
-{
- blk_qc_t unused;
-
- return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
-}
-
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
-/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie,
+ bool bypass, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);