]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/scsi/scsi_lib.c
block: split scsi_request out of struct request
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / scsi_lib.c
index e9e1e141af9cd287bcca730d05a7a62d58fb644a..8188e5c71f75e0a7396b1326ca9d27e1008f1572 100644 (file)
 #include "scsi_priv.h"
 #include "scsi_logging.h"
 
+static struct kmem_cache *scsi_sdb_cache;
+static struct kmem_cache *scsi_sense_cache;
+static struct kmem_cache *scsi_sense_isadma_cache;
+static DEFINE_MUTEX(scsi_sense_cache_mutex);
 
-struct kmem_cache *scsi_sdb_cache;
+static inline struct kmem_cache *
+scsi_select_sense_cache(struct Scsi_Host *shost)
+{
+       return shost->unchecked_isa_dma ?
+               scsi_sense_isadma_cache : scsi_sense_cache;
+}
+
+static void scsi_free_sense_buffer(struct Scsi_Host *shost,
+               unsigned char *sense_buffer)
+{
+       kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
+}
+
+static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
+       gfp_t gfp_mask, int numa_node)
+{
+       return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
+                       numa_node);
+}
+
+int scsi_init_sense_cache(struct Scsi_Host *shost)
+{
+       struct kmem_cache *cache;
+       int ret = 0;
+
+       cache = scsi_select_sense_cache(shost);
+       if (cache)
+               return 0;
+
+       mutex_lock(&scsi_sense_cache_mutex);
+       if (shost->unchecked_isa_dma) {
+               scsi_sense_isadma_cache =
+                       kmem_cache_create("scsi_sense_cache(DMA)",
+                       SCSI_SENSE_BUFFERSIZE, 0,
+                       SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
+               if (!scsi_sense_isadma_cache)
+                       ret = -ENOMEM;
+       } else {
+               scsi_sense_cache =
+                       kmem_cache_create("scsi_sense_cache",
+                       SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+               if (!scsi_sense_cache)
+                       ret = -ENOMEM;
+       }
+
+       mutex_unlock(&scsi_sense_cache_mutex);
+       return ret;
+}
 
 /*
  * When to reinvoke queueing after a resource shortage. It's 3 msecs to
@@ -169,21 +220,21 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 {
        struct request *req;
        int write = (data_direction == DMA_TO_DEVICE);
+       struct scsi_request *rq;
        int ret = DRIVER_ERROR << 24;
 
        req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
        if (IS_ERR(req))
                return ret;
-       blk_rq_set_block_pc(req);
+       rq = scsi_req(req);
+       scsi_req_init(req);
 
        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
                                        buffer, bufflen, __GFP_RECLAIM))
                goto out;
 
-       req->cmd_len = COMMAND_SIZE(cmd[0]);
-       memcpy(req->cmd, cmd, req->cmd_len);
-       req->sense = sense;
-       req->sense_len = 0;
+       rq->cmd_len = COMMAND_SIZE(cmd[0]);
+       memcpy(rq->cmd, cmd, rq->cmd_len);
        req->retries = retries;
        req->timeout = timeout;
        req->cmd_flags |= flags;
@@ -200,11 +251,13 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
         * is invalid.  Prevent the garbage from being misinterpreted
         * and prevent security leaks by zeroing out the excess data.
         */
-       if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
-               memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
+       if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
+               memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
 
        if (resid)
-               *resid = req->resid_len;
+               *resid = rq->resid_len;
+       if (sense && rq->sense_len)
+               memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
        ret = req->errors;
  out:
        blk_put_request(req);
@@ -645,14 +698,13 @@ static bool scsi_end_request(struct request *req, int error,
 
                if (bidi_bytes)
                        scsi_release_bidi_buffers(cmd);
+               scsi_release_buffers(cmd);
+               scsi_put_command(cmd);
 
                spin_lock_irqsave(q->queue_lock, flags);
                blk_finish_request(req, error);
                spin_unlock_irqrestore(q->queue_lock, flags);
 
-               scsi_release_buffers(cmd);
-
-               scsi_put_command(cmd);
                scsi_run_queue(q);
        }
 
@@ -756,16 +808,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 
        if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
                if (result) {
-                       if (sense_valid && req->sense) {
+                       if (sense_valid) {
                                /*
                                 * SG_IO wants current and deferred errors
                                 */
-                               int len = 8 + cmd->sense_buffer[7];
-
-                               if (len > SCSI_SENSE_BUFFERSIZE)
-                                       len = SCSI_SENSE_BUFFERSIZE;
-                               memcpy(req->sense, cmd->sense_buffer,  len);
-                               req->sense_len = len;
+                               scsi_req(req)->sense_len =
+                                       min(8 + cmd->sense_buffer[7],
+                                           SCSI_SENSE_BUFFERSIZE);
                        }
                        if (!sense_deferred)
                                error = __scsi_error_from_host_byte(cmd, result);
@@ -775,14 +824,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                 */
                req->errors = cmd->result;
 
-               req->resid_len = scsi_get_resid(cmd);
+               scsi_req(req)->resid_len = scsi_get_resid(cmd);
 
                if (scsi_bidi_cmnd(cmd)) {
                        /*
                         * Bidi commands Must be complete as a whole,
                         * both sides at once.
                         */
-                       req->next_rq->resid_len = scsi_in(cmd)->resid;
+                       scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
                        if (scsi_end_request(req, 0, blk_rq_bytes(req),
                                        blk_rq_bytes(req->next_rq)))
                                BUG();
@@ -1109,34 +1158,25 @@ err_exit:
 }
 EXPORT_SYMBOL(scsi_init_io);
 
-static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
-               struct request *req)
+void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 {
-       struct scsi_cmnd *cmd;
-
-       if (!req->special) {
-               /* Bail if we can't get a reference to the device */
-               if (!get_device(&sdev->sdev_gendev))
-                       return NULL;
-
-               cmd = scsi_get_command(sdev, GFP_ATOMIC);
-               if (unlikely(!cmd)) {
-                       put_device(&sdev->sdev_gendev);
-                       return NULL;
-               }
-               req->special = cmd;
-       } else {
-               cmd = req->special;
-       }
+       void *buf = cmd->sense_buffer;
+       void *prot = cmd->prot_sdb;
+       unsigned long flags;
 
-       /* pull a tag out of the request if we have one */
-       cmd->tag = req->tag;
-       cmd->request = req;
+       /* zero out the cmd, except for the embedded scsi_request */
+       memset((char *)cmd + sizeof(cmd->req), 0,
+               sizeof(*cmd) - sizeof(cmd->req));
 
-       cmd->cmnd = req->cmd;
-       cmd->prot_op = SCSI_PROT_NORMAL;
+       cmd->device = dev;
+       cmd->sense_buffer = buf;
+       cmd->prot_sdb = prot;
+       INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+       cmd->jiffies_at_alloc = jiffies;
 
-       return cmd;
+       spin_lock_irqsave(&dev->list_lock, flags);
+       list_add_tail(&cmd->list, &dev->cmd_list);
+       spin_unlock_irqrestore(&dev->list_lock, flags);
 }
 
 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
@@ -1159,7 +1199,8 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
        }
 
-       cmd->cmd_len = req->cmd_len;
+       cmd->cmd_len = scsi_req(req)->cmd_len;
+       cmd->cmnd = scsi_req(req)->cmd;
        cmd->transfersize = blk_rq_bytes(req);
        cmd->allowed = req->retries;
        return BLKPREP_OK;
@@ -1179,6 +1220,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
                        return ret;
        }
 
+       cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
        memset(cmd->cmnd, 0, BLK_MAX_CDB);
        return scsi_cmd_to_driver(cmd)->init_command(cmd);
 }
@@ -1297,19 +1339,28 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
 static int scsi_prep_fn(struct request_queue *q, struct request *req)
 {
        struct scsi_device *sdev = q->queuedata;
-       struct scsi_cmnd *cmd;
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
        int ret;
 
        ret = scsi_prep_state_check(sdev, req);
        if (ret != BLKPREP_OK)
                goto out;
 
-       cmd = scsi_get_cmd_from_req(sdev, req);
-       if (unlikely(!cmd)) {
-               ret = BLKPREP_DEFER;
-               goto out;
+       if (!req->special) {
+               /* Bail if we can't get a reference to the device */
+               if (unlikely(!get_device(&sdev->sdev_gendev))) {
+                       ret = BLKPREP_DEFER;
+                       goto out;
+               }
+
+               scsi_init_command(sdev, cmd);
+               req->special = cmd;
        }
 
+       cmd->tag = req->tag;
+       cmd->request = req;
+       cmd->prot_op = SCSI_PROT_NORMAL;
+
        ret = scsi_setup_cmnd(sdev, req);
 out:
        return scsi_prep_return(q, req, ret);
@@ -1826,7 +1877,9 @@ static int scsi_mq_prep_fn(struct request *req)
        unsigned char *sense_buf = cmd->sense_buffer;
        struct scatterlist *sg;
 
-       memset(cmd, 0, sizeof(struct scsi_cmnd));
+       /* zero out the cmd, except for the embedded scsi_request */
+       memset((char *)cmd + sizeof(cmd->req), 0,
+               sizeof(*cmd) - sizeof(cmd->req));
 
        req->special = cmd;
 
@@ -1836,7 +1889,6 @@ static int scsi_mq_prep_fn(struct request *req)
 
        cmd->tag = req->tag;
 
-       cmd->cmnd = req->cmd;
        cmd->prot_op = SCSI_PROT_NORMAL;
 
        INIT_LIST_HEAD(&cmd->list);
@@ -1911,7 +1963,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (!scsi_host_queue_ready(q, shost, sdev))
                goto out_dec_target_busy;
 
-
        if (!(req->rq_flags & RQF_DONTPREP)) {
                ret = prep_to_mq(scsi_mq_prep_fn(req));
                if (ret != BLK_MQ_RQ_QUEUE_OK)
@@ -1981,21 +2032,24 @@ static int scsi_init_request(void *data, struct request *rq,
                unsigned int hctx_idx, unsigned int request_idx,
                unsigned int numa_node)
 {
+       struct Scsi_Host *shost = data;
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
-       cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
-                       numa_node);
+       cmd->sense_buffer =
+               scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
        if (!cmd->sense_buffer)
                return -ENOMEM;
+       cmd->req.sense = cmd->sense_buffer;
        return 0;
 }
 
 static void scsi_exit_request(void *data, struct request *rq,
                unsigned int hctx_idx, unsigned int request_idx)
 {
+       struct Scsi_Host *shost = data;
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
-       kfree(cmd->sense_buffer);
+       scsi_free_sense_buffer(shost, cmd->sense_buffer);
 }
 
 static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2028,7 +2082,7 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
        return bounce_limit;
 }
 
-static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
+void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 {
        struct device *dev = shost->dma_dev;
 
@@ -2063,28 +2117,64 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
         */
        blk_queue_dma_alignment(q, 0x03);
 }
+EXPORT_SYMBOL_GPL(__scsi_init_queue);
 
-struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
-                                        request_fn_proc *request_fn)
+static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
 {
-       struct request_queue *q;
+       struct Scsi_Host *shost = q->rq_alloc_data;
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
 
-       q = blk_init_queue(request_fn, NULL);
-       if (!q)
-               return NULL;
-       __scsi_init_queue(shost, q);
-       return q;
+       memset(cmd, 0, sizeof(*cmd));
+
+       cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
+       if (!cmd->sense_buffer)
+               goto fail;
+       cmd->req.sense = cmd->sense_buffer;
+
+       if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
+               cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
+               if (!cmd->prot_sdb)
+                       goto fail_free_sense;
+       }
+
+       return 0;
+
+fail_free_sense:
+       scsi_free_sense_buffer(shost, cmd->sense_buffer);
+fail:
+       return -ENOMEM;
+}
+
+static void scsi_exit_rq(struct request_queue *q, struct request *rq)
+{
+       struct Scsi_Host *shost = q->rq_alloc_data;
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+       if (cmd->prot_sdb)
+               kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
+       scsi_free_sense_buffer(shost, cmd->sense_buffer);
 }
-EXPORT_SYMBOL(__scsi_alloc_queue);
 
 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
 {
+       struct Scsi_Host *shost = sdev->host;
        struct request_queue *q;
 
-       q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
        if (!q)
                return NULL;
+       q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
+       q->rq_alloc_data = shost;
+       q->request_fn = scsi_request_fn;
+       q->init_rq_fn = scsi_init_rq;
+       q->exit_rq_fn = scsi_exit_rq;
+
+       if (blk_init_allocated_queue(q) < 0) {
+               blk_cleanup_queue(q);
+               return NULL;
+       }
 
+       __scsi_init_queue(shost, q);
        blk_queue_prep_rq(q, scsi_prep_fn);
        blk_queue_unprep_rq(q, scsi_unprep_fn);
        blk_queue_softirq_done(q, scsi_softirq_done);
@@ -2208,6 +2298,8 @@ int __init scsi_init_queue(void)
 
 void scsi_exit_queue(void)
 {
+       kmem_cache_destroy(scsi_sense_cache);
+       kmem_cache_destroy(scsi_sense_isadma_cache);
        kmem_cache_destroy(scsi_sdb_cache);
 }