* hctx_lock() covers both issue and completion paths.
*/
hctx_lock(hctx, &srcu_idx);
- if (blk_mq_rq_aborted_gstate(rq) != rq->gstate &&
- !blk_mark_rq_complete(rq))
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
hctx_unlock(hctx, srcu_idx);
}
preempt_enable();
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
return;
+ req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
+
if (ops->timeout)
ret = ops->timeout(req, reserved);
*/
blk_mq_rq_update_aborted_gstate(req, 0);
blk_add_timer(req);
- blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
break;
might_sleep();
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ if ((rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) ||
+ !test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return;
/* read coherent snapshots of @rq->state_gen and @rq->deadline */
* now guaranteed to see @rq->aborted_gstate and yield. If
* @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
*/
- if (READ_ONCE(rq->gstate) == rq->aborted_gstate &&
- !blk_mark_rq_complete(rq))
+ if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
+ READ_ONCE(rq->gstate) == rq->aborted_gstate)
blk_mq_rq_timed_out(rq, reserved);
}
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
/* The per-zone write lock is held for this request */
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* timeout is expired */
+#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \