]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/nvme/host/core.c
nvme: move nvme_setup_flush and nvme_setup_rw to common code
[mirror_ubuntu-zesty-kernel.git] / drivers / nvme / host / core.c
CommitLineData
21d34711
CH
1/*
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/blkdev.h>
16#include <linux/blk-mq.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21
22#include "nvme.h"
23
24/*
25 * Returns 0 on success. If the result is negative, it's a Linux error code;
26 * if the result is positive, it's an NVM Express status code
27 */
28int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
29 void *buffer, void __user *ubuffer, unsigned bufflen,
30 u32 *result, unsigned timeout)
31{
32 bool write = cmd->common.opcode & 1;
33 struct bio *bio = NULL;
34 struct request *req;
35 int ret;
36
37 req = blk_mq_alloc_request(q, write, 0);
38 if (IS_ERR(req))
39 return PTR_ERR(req);
40
41 req->cmd_type = REQ_TYPE_DRV_PRIV;
42 req->cmd_flags |= REQ_FAILFAST_DRIVER;
43 req->__data_len = 0;
44 req->__sector = (sector_t) -1;
45 req->bio = req->biotail = NULL;
46
47 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
48
49 req->cmd = (unsigned char *)cmd;
50 req->cmd_len = sizeof(struct nvme_command);
51 req->special = (void *)0;
52
53 if (buffer && bufflen) {
54 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
55 if (ret)
56 goto out;
57 } else if (ubuffer && bufflen) {
58 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
59 GFP_KERNEL);
60 if (ret)
61 goto out;
62 bio = req->bio;
63 }
64
65 blk_execute_rq(req->q, NULL, req, 0);
66 if (bio)
67 blk_rq_unmap_user(bio);
68 if (result)
69 *result = (u32)(uintptr_t)req->special;
70 ret = req->errors;
71 out:
72 blk_mq_free_request(req);
73 return ret;
74}
75
76int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
77 void *buffer, unsigned bufflen)
78{
79 return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0);
80}
81
1c63dc66 82int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
21d34711
CH
83{
84 struct nvme_command c = { };
85 int error;
86
87 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
88 c.identify.opcode = nvme_admin_identify;
89 c.identify.cns = cpu_to_le32(1);
90
91 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
92 if (!*id)
93 return -ENOMEM;
94
95 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
96 sizeof(struct nvme_id_ctrl));
97 if (error)
98 kfree(*id);
99 return error;
100}
101
1c63dc66 102int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
21d34711
CH
103 struct nvme_id_ns **id)
104{
105 struct nvme_command c = { };
106 int error;
107
108 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
109 c.identify.opcode = nvme_admin_identify,
110 c.identify.nsid = cpu_to_le32(nsid),
111
112 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
113 if (!*id)
114 return -ENOMEM;
115
116 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
117 sizeof(struct nvme_id_ns));
118 if (error)
119 kfree(*id);
120 return error;
121}
122
1c63dc66 123int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
21d34711
CH
124 dma_addr_t dma_addr, u32 *result)
125{
126 struct nvme_command c;
127
128 memset(&c, 0, sizeof(c));
129 c.features.opcode = nvme_admin_get_features;
130 c.features.nsid = cpu_to_le32(nsid);
131 c.features.prp1 = cpu_to_le64(dma_addr);
132 c.features.fid = cpu_to_le32(fid);
133
134 return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
135 result, 0);
136}
137
1c63dc66 138int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
21d34711
CH
139 dma_addr_t dma_addr, u32 *result)
140{
141 struct nvme_command c;
142
143 memset(&c, 0, sizeof(c));
144 c.features.opcode = nvme_admin_set_features;
145 c.features.prp1 = cpu_to_le64(dma_addr);
146 c.features.fid = cpu_to_le32(fid);
147 c.features.dword11 = cpu_to_le32(dword11);
148
149 return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, NULL, 0,
150 result, 0);
151}
152
1c63dc66 153int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
21d34711
CH
154{
155 struct nvme_command c = { };
156 int error;
157
158 c.common.opcode = nvme_admin_get_log_page,
159 c.common.nsid = cpu_to_le32(0xFFFFFFFF),
160 c.common.cdw10[0] = cpu_to_le32(
161 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
162 NVME_LOG_SMART),
163
164 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
165 if (!*log)
166 return -ENOMEM;
167
168 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
169 sizeof(struct nvme_smart_log));
170 if (error)
171 kfree(*log);
172 return error;
173}