]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/nvme/host/core.c
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
24 struct request
*nvme_alloc_request(struct request_queue
*q
,
25 struct nvme_command
*cmd
, unsigned int flags
)
27 bool write
= cmd
->common
.opcode
& 1;
30 req
= blk_mq_alloc_request(q
, write
, flags
);
34 req
->cmd_type
= REQ_TYPE_DRV_PRIV
;
35 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
37 req
->__sector
= (sector_t
) -1;
38 req
->bio
= req
->biotail
= NULL
;
40 req
->cmd
= (unsigned char *)cmd
;
41 req
->cmd_len
= sizeof(struct nvme_command
);
42 req
->special
= (void *)0;
48 * Returns 0 on success. If the result is negative, it's a Linux error code;
49 * if the result is positive, it's an NVM Express status code
51 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
52 void *buffer
, unsigned bufflen
, u32
*result
, unsigned timeout
)
57 req
= nvme_alloc_request(q
, cmd
, 0);
61 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
63 if (buffer
&& bufflen
) {
64 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
69 blk_execute_rq(req
->q
, NULL
, req
, 0);
71 *result
= (u32
)(uintptr_t)req
->special
;
74 blk_mq_free_request(req
);
78 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
79 void *buffer
, unsigned bufflen
)
81 return __nvme_submit_sync_cmd(q
, cmd
, buffer
, bufflen
, NULL
, 0);
84 int __nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
85 void __user
*ubuffer
, unsigned bufflen
,
86 void __user
*meta_buffer
, unsigned meta_len
, u32 meta_seed
,
87 u32
*result
, unsigned timeout
)
89 bool write
= cmd
->common
.opcode
& 1;
90 struct nvme_ns
*ns
= q
->queuedata
;
91 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
93 struct bio
*bio
= NULL
;
97 req
= nvme_alloc_request(q
, cmd
, 0);
101 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
103 if (ubuffer
&& bufflen
) {
104 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
112 bio
->bi_bdev
= bdget_disk(disk
, 0);
119 struct bio_integrity_payload
*bip
;
121 meta
= kmalloc(meta_len
, GFP_KERNEL
);
128 if (copy_from_user(meta
, meta_buffer
,
135 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
141 bip
->bip_iter
.bi_size
= meta_len
;
142 bip
->bip_iter
.bi_sector
= meta_seed
;
144 ret
= bio_integrity_add_page(bio
, virt_to_page(meta
),
145 meta_len
, offset_in_page(meta
));
146 if (ret
!= meta_len
) {
153 blk_execute_rq(req
->q
, disk
, req
, 0);
156 *result
= (u32
)(uintptr_t)req
->special
;
157 if (meta
&& !ret
&& !write
) {
158 if (copy_to_user(meta_buffer
, meta
, meta_len
))
165 if (disk
&& bio
->bi_bdev
)
167 blk_rq_unmap_user(bio
);
170 blk_mq_free_request(req
);
174 int nvme_submit_user_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
175 void __user
*ubuffer
, unsigned bufflen
, u32
*result
,
178 return __nvme_submit_user_cmd(q
, cmd
, ubuffer
, bufflen
, NULL
, 0, 0,
182 int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
184 struct nvme_command c
= { };
187 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
188 c
.identify
.opcode
= nvme_admin_identify
;
189 c
.identify
.cns
= cpu_to_le32(1);
191 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
195 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
196 sizeof(struct nvme_id_ctrl
));
202 int nvme_identify_ns(struct nvme_ctrl
*dev
, unsigned nsid
,
203 struct nvme_id_ns
**id
)
205 struct nvme_command c
= { };
208 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
209 c
.identify
.opcode
= nvme_admin_identify
,
210 c
.identify
.nsid
= cpu_to_le32(nsid
),
212 *id
= kmalloc(sizeof(struct nvme_id_ns
), GFP_KERNEL
);
216 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
217 sizeof(struct nvme_id_ns
));
223 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned nsid
,
224 dma_addr_t dma_addr
, u32
*result
)
226 struct nvme_command c
;
228 memset(&c
, 0, sizeof(c
));
229 c
.features
.opcode
= nvme_admin_get_features
;
230 c
.features
.nsid
= cpu_to_le32(nsid
);
231 c
.features
.prp1
= cpu_to_le64(dma_addr
);
232 c
.features
.fid
= cpu_to_le32(fid
);
234 return __nvme_submit_sync_cmd(dev
->admin_q
, &c
, NULL
, 0, result
, 0);
237 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
238 dma_addr_t dma_addr
, u32
*result
)
240 struct nvme_command c
;
242 memset(&c
, 0, sizeof(c
));
243 c
.features
.opcode
= nvme_admin_set_features
;
244 c
.features
.prp1
= cpu_to_le64(dma_addr
);
245 c
.features
.fid
= cpu_to_le32(fid
);
246 c
.features
.dword11
= cpu_to_le32(dword11
);
248 return __nvme_submit_sync_cmd(dev
->admin_q
, &c
, NULL
, 0, result
, 0);
251 int nvme_get_log_page(struct nvme_ctrl
*dev
, struct nvme_smart_log
**log
)
253 struct nvme_command c
= { };
256 c
.common
.opcode
= nvme_admin_get_log_page
,
257 c
.common
.nsid
= cpu_to_le32(0xFFFFFFFF),
258 c
.common
.cdw10
[0] = cpu_to_le32(
259 (((sizeof(struct nvme_smart_log
) / 4) - 1) << 16) |
262 *log
= kmalloc(sizeof(struct nvme_smart_log
), GFP_KERNEL
);
266 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *log
,
267 sizeof(struct nvme_smart_log
));