BugLink: https://bugs.launchpad.net/bugs/1848739
This status is returned from driver to block layer if device related
resource is unavailable, but driver can guarantee that IO dispatch
will be triggered in future when the resource is available.
Convert some drivers to return BLK_STS_DEV_RESOURCE. Also, if driver
returns BLK_STS_RESOURCE and SCHED_RESTART is set, rerun queue after
a delay (BLK_MQ_DELAY_QUEUE) to avoid IO stalls. BLK_MQ_DELAY_QUEUE is
3 ms because both scsi-mq and nvmefc are using that magic value.
If a driver can make sure there is in-flight IO, it is safe to return
BLK_STS_DEV_RESOURCE because:
1) If all in-flight IOs complete before examining SCHED_RESTART in
blk_mq_dispatch_rq_list(), SCHED_RESTART must be cleared, so queue
is run immediately in this case by blk_mq_dispatch_rq_list();
2) if there is any in-flight IO after/when examining SCHED_RESTART
in blk_mq_dispatch_rq_list():
- if SCHED_RESTART isn't set, queue is run immediately as handled in 1)
- otherwise, this request will be dispatched after any in-flight IO is
completed via blk_mq_sched_restart()
3) if SCHED_RESTART is set concurently in context because of
BLK_STS_RESOURCE, blk_mq_delay_run_hw_queue() will cover the above two
cases and make sure IO hang can be avoided.
One invariant is that queue will be rerun if SCHED_RESTART is set.
Suggested-by: Jens Axboe <axboe@kernel.dk>
Tested-by: Laurence Oberman <loberman@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
(cherry picked from commit
86ff7c2a80cd357f6156a53b354f6a0b357dc0c9)
[marcelo.cerri@canonical.com: Fixed context in
include/linux/blk_types.h, the missing context is from commit
9111e5686c8c ("block: Provide blk_status_t decoding for path errors")
which is not necessary]
Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
+ [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
[BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
/* device mapper special case, should not leak out: */
}
}
+#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
+
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bool got_budget)
{
struct request *rq, *nxt;
bool no_tag = false;
int errors, queued;
+ blk_status_t ret = BLK_STS_OK;
if (list_empty(list))
return false;
errors = queued = 0;
do {
struct blk_mq_queue_data bd;
- blk_status_t ret;
rq = list_first_entry(list, struct request, queuelist);
}
ret = q->mq_ops->queue_rq(hctx, &bd);
- if (ret == BLK_STS_RESOURCE) {
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
/*
* If an I/O scheduler has been configured and we got a
* driver tag for the next request already, free it
* that is where we will continue on next queue run.
*/
if (!list_empty(list)) {
+ bool needs_restart;
+
spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
* - Some but not all block drivers stop a queue before
* returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
* and dm-rq.
+ *
+ * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
+ * bit is set, run queue after a delay to avoid IO stalls
+ * that could otherwise occur if the queue is idle.
*/
- if (!blk_mq_sched_needs_restart(hctx) ||
+ needs_restart = blk_mq_sched_needs_restart(hctx);
+ if (!needs_restart ||
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
blk_mq_run_hw_queue(hctx, true);
+ else if (needs_restart && (ret == BLK_STS_RESOURCE))
+ blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
}
return (queued + errors) != 0;
*cookie = new_cookie;
break;
case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
__blk_mq_requeue_request(rq);
break;
default:
hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
- if (ret == BLK_STS_RESOURCE)
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_sched_insert_request(rq, false, true, false);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);
return BLK_STS_OK;
} else
/* requeue request */
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
}
}
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
return BLK_STS_IOERR;
}
out_busy:
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
}
static void blkif_complete_rq(struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
return r;
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = dm_dispatch_clone_request(clone, rq);
- if (ret == BLK_STS_RESOURCE) {
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
blk_rq_unprep_clone(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
- blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
return BLK_STS_RESOURCE;
}
NVME_FC_Q_LIVE,
};
-#define NVMEFC_QUEUE_DELAY 3 /* ms units */
-
#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
struct nvme_fc_queue {
* the target device is present
*/
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
- goto busy;
+ return BLK_STS_RESOURCE;
if (!nvme_fc_ctrl_get(ctrl))
return BLK_STS_IOERR;
ret != -EBUSY)
return BLK_STS_IOERR;
- goto busy;
+ return BLK_STS_RESOURCE;
}
return BLK_STS_OK;
-
-busy:
- if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
- blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
-
- return BLK_STS_RESOURCE;
}
static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
- if (atomic_read(&sdev->device_busy) == 0 &&
- !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
+ if (atomic_read(&sdev->device_busy) ||
+ scsi_device_blocked(sdev))
+ ret = BLK_STS_DEV_RESOURCE;
break;
default:
if (unlikely(!scsi_device_online(sdev)))
#define BLK_STS_AGAIN ((__force blk_status_t)12)
+/*
+ * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
+ * device related resources are unavailable, but the driver can guarantee
+ * that the queue will be rerun in the future once resources become
+ * available again. This is typically the case for device specific
+ * resources that are consumed for IO. If the driver fails allocating these
+ * resources, we know that inflight (or pending) IO will free these
+ * resource upon completion.
+ *
+ * This is different from BLK_STS_RESOURCE in that it explicitly references
+ * a device specific resource. For resources of wider scope, allocation
+ * failure can happen without having pending IO. This means that we can't
+ * rely on request completions freeing these resources, as IO may not be in
+ * flight. Examples of that are kernel memory allocations, DMA mappings, or
+ * any other system wide resources.
+ */
+#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
+
struct blk_issue_stat {
u64 stat;
};