#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
+#include <linux/ratelimit.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
mempool_t *pool;
};
-#define SP(x) { x, "sgpool-" __stringify(x) }
+#define SP(x) { .size = x, "sgpool-" __stringify(x) }
#if (SCSI_MAX_SG_SEGMENTS < 32)
#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
#endif
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
- if (!req)
+ if (IS_ERR(req))
return ret;
blk_rq_set_block_pc(req);
if (req->mq_ctx) {
/*
- * In the MQ case the command gets freed by __blk_mq_end_io,
+ * In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
* We also can't kick the queues from irq context, so we
*/
scsi_mq_uninit_cmd(cmd);
- __blk_mq_end_io(req, error);
+ __blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list))
struct request *req = cmd->request;
int error = 0;
struct scsi_sense_hdr sshdr;
- int sense_valid = 0;
- int sense_deferred = 0;
+ bool sense_valid = false;
+ int sense_deferred = 0, level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
;
else if (!(req->cmd_flags & REQ_QUIET))
- scsi_print_sense("", cmd);
+ scsi_print_sense(cmd);
result = 0;
/* BLOCK_PC may have set error */
error = 0;
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
if (!(req->cmd_flags & REQ_QUIET)) {
- scsi_print_result(cmd);
- if (driver_byte(result) & DRIVER_SENSE)
- scsi_print_sense("", cmd);
- scsi_print_command(cmd);
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (unlikely(scsi_logging_level))
+ level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
+ SCSI_LOG_MLCOMPLETE_BITS);
+
+ /*
+ * if logging is enabled the failure will be printed
+ * in scsi_log_completion(), so avoid duplicate messages
+ */
+ if (!level && __ratelimit(&_rs)) {
+ scsi_print_result(cmd, NULL, FAILED);
+ if (driver_byte(result) & DRIVER_SENSE)
+ scsi_print_sense(cmd);
+ scsi_print_command(cmd);
+ }
}
if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
return;
next_rq->special = bidi_sdb;
}
+ blk_mq_start_request(req);
+
return scsi_setup_cmnd(sdev, req);
}
blk_mq_complete_request(cmd->request);
}
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
+static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
+ bool last)
{
struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
+
if (!(req->cmd_flags & REQ_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
if (ret)
goto out_dec_host_busy;
req->cmd_flags |= REQ_DONTPREP;
+ } else {
+ blk_mq_start_request(req);
}
+ if (blk_queue_tagged(q))
+ req->cmd_flags |= REQ_QUEUED;
+ else
+ req->cmd_flags &= ~REQ_QUEUED;
+
scsi_init_cmd_errh(cmd);
cmd->scsi_done = scsi_mq_done;
return ret;
}
+static enum blk_eh_timer_return scsi_timeout(struct request *req,
+ bool reserved)
+{
+ if (reserved)
+ return BLK_EH_RESET_TIMER;
+ return scsi_times_out(req);
+}
+
static int scsi_init_request(void *data, struct request *rq,
unsigned int hctx_idx, unsigned int request_idx,
unsigned int numa_node)
.map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
- .timeout = scsi_times_out,
+ .timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
};
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
shost->tag_set.ops = &scsi_mq_ops;
- shost->tag_set.nr_hw_queues = 1;
+ shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
shost->tag_set.queue_depth = shost->can_queue;
shost->tag_set.cmd_size = cmd_size;
shost->tag_set.numa_node = NUMA_NO_NODE;