]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/nvme/host/nvme.h
drivers:block: cpqarray clean up
[mirror_ubuntu-bionic-kernel.git] / drivers / nvme / host / nvme.h
CommitLineData
f11bb3e2
CH
1/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
18#include <linux/pci.h>
19#include <linux/kref.h>
20#include <linux/blk-mq.h>
21
297465c8
CH
22enum {
23 /*
24 * Driver internal status code for commands that were cancelled due
25 * to timeouts or controller shutdown. The value is negative so
26 * that it a) doesn't overlap with the unsigned hardware error codes,
27 * and b) can easily be tested for.
28 */
29 NVME_SC_CANCELLED = -EINTR,
30};
31
f11bb3e2
CH
32extern unsigned char nvme_io_timeout;
33#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
34
21d34711
CH
35extern unsigned char admin_timeout;
36#define ADMIN_TIMEOUT (admin_timeout * HZ)
37
5fd4ce1b
CH
38extern unsigned char shutdown_timeout;
39#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
40
ca064085
MB
41enum {
42 NVME_NS_LBA = 0,
43 NVME_NS_LIGHTNVM = 1,
44};
45
f11bb3e2 46/*
106198ed
CH
47 * List of workarounds for devices that required behavior not specified in
48 * the standard.
f11bb3e2 49 */
106198ed
CH
50enum nvme_quirks {
51 /*
52 * Prefers I/O aligned to a stripe size specified in a vendor
53 * specific Identify field.
54 */
55 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
540c801c
KB
56
57 /*
58 * The controller doesn't handle Identify value others than 0 or 1
59 * correctly.
60 */
61 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
08095e70
KB
62
63 /*
64 * The controller deterministically returns O's on reads to discarded
65 * logical blocks.
66 */
67 NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
106198ed
CH
68};
69
1c63dc66
CH
70struct nvme_ctrl {
71 const struct nvme_ctrl_ops *ops;
f11bb3e2 72 struct request_queue *admin_q;
f11bb3e2 73 struct device *dev;
1673f1f0 74 struct kref kref;
f11bb3e2 75 int instance;
5bae7f73 76 struct blk_mq_tag_set *tagset;
f11bb3e2 77 struct list_head namespaces;
69d3b8ac 78 struct mutex namespaces_mutex;
5bae7f73 79 struct device *device; /* char device */
f3ca80fc 80 struct list_head node;
1c63dc66 81
f11bb3e2
CH
82 char name[12];
83 char serial[20];
84 char model[40];
85 char firmware_rev[8];
931e1c22 86 int cntlid;
5fd4ce1b
CH
87
88 u32 ctrl_config;
89
90 u32 page_size;
f11bb3e2
CH
91 u32 max_hw_sectors;
92 u32 stripe_size;
f11bb3e2 93 u16 oncs;
6bf25d16 94 atomic_t abort_limit;
f11bb3e2
CH
95 u8 event_limit;
96 u8 vwc;
f3ca80fc
CH
97 u32 vs;
98 bool subsystem;
106198ed 99 unsigned long quirks;
f11bb3e2
CH
100};
101
102/*
103 * An NVM Express namespace is equivalent to a SCSI LUN
104 */
105struct nvme_ns {
106 struct list_head list;
107
1c63dc66 108 struct nvme_ctrl *ctrl;
f11bb3e2
CH
109 struct request_queue *queue;
110 struct gendisk *disk;
111 struct kref kref;
112
2b9b6e86
KB
113 u8 eui[8];
114 u8 uuid[16];
115
f11bb3e2
CH
116 unsigned ns_id;
117 int lba_shift;
118 u16 ms;
119 bool ext;
120 u8 pi_type;
ca064085 121 int type;
f11bb3e2
CH
122 u64 mode_select_num_blocks;
123 u32 mode_select_block_len;
124};
125
1c63dc66 126struct nvme_ctrl_ops {
e439bb12 127 struct module *module;
1c63dc66 128 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
5fd4ce1b 129 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
7fd8930f 130 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
5bae7f73 131 bool (*io_incapable)(struct nvme_ctrl *ctrl);
f3ca80fc 132 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
1673f1f0 133 void (*free_ctrl)(struct nvme_ctrl *ctrl);
f11bb3e2
CH
134};
135
1c63dc66
CH
136static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
137{
138 u32 val = 0;
139
140 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
141 return false;
142 return val & NVME_CSTS_RDY;
143}
144
5bae7f73
CH
145static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
146{
147 u32 val = 0;
148
149 if (ctrl->ops->io_incapable(ctrl))
150 return false;
151 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
152 return false;
153 return val & NVME_CSTS_CFS;
154}
155
f3ca80fc
CH
156static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
157{
158 if (!ctrl->subsystem)
159 return -ENOTTY;
160 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
161}
162
f11bb3e2
CH
163static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
164{
165 return (sector >> (ns->lba_shift - 9));
166}
167
22944e99
CH
168static inline void nvme_setup_flush(struct nvme_ns *ns,
169 struct nvme_command *cmnd)
170{
171 memset(cmnd, 0, sizeof(*cmnd));
172 cmnd->common.opcode = nvme_cmd_flush;
173 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
174}
175
176static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
177 struct nvme_command *cmnd)
178{
179 u16 control = 0;
180 u32 dsmgmt = 0;
181
182 if (req->cmd_flags & REQ_FUA)
183 control |= NVME_RW_FUA;
184 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
185 control |= NVME_RW_LR;
186
187 if (req->cmd_flags & REQ_RAHEAD)
188 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
189
190 memset(cmnd, 0, sizeof(*cmnd));
191 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
192 cmnd->rw.command_id = req->tag;
193 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
194 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
195 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
196
197 if (ns->ms) {
198 switch (ns->pi_type) {
199 case NVME_NS_DPS_PI_TYPE3:
200 control |= NVME_RW_PRINFO_PRCHK_GUARD;
201 break;
202 case NVME_NS_DPS_PI_TYPE1:
203 case NVME_NS_DPS_PI_TYPE2:
204 control |= NVME_RW_PRINFO_PRCHK_GUARD |
205 NVME_RW_PRINFO_PRCHK_REF;
206 cmnd->rw.reftag = cpu_to_le32(
207 nvme_block_nr(ns, blk_rq_pos(req)));
208 break;
209 }
210 if (!blk_integrity_rq(req))
211 control |= NVME_RW_PRINFO_PRACT;
212 }
213
214 cmnd->rw.control = cpu_to_le16(control);
215 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
216}
217
218
15a190f7
CH
219static inline int nvme_error_status(u16 status)
220{
221 switch (status & 0x7ff) {
222 case NVME_SC_SUCCESS:
223 return 0;
224 case NVME_SC_CAP_EXCEEDED:
225 return -ENOSPC;
226 default:
227 return -EIO;
228 }
229}
230
7688faa6
CH
231static inline bool nvme_req_needs_retry(struct request *req, u16 status)
232{
233 return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
234 (jiffies - req->start_time) < req->timeout;
235}
236
5fd4ce1b
CH
237int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
238int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
239int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
f3ca80fc
CH
240int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
241 const struct nvme_ctrl_ops *ops, unsigned long quirks);
53029b04 242void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
1673f1f0 243void nvme_put_ctrl(struct nvme_ctrl *ctrl);
7fd8930f 244int nvme_init_identify(struct nvme_ctrl *ctrl);
5bae7f73
CH
245
246void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
247void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
1673f1f0 248
25646264
KB
249void nvme_stop_queues(struct nvme_ctrl *ctrl);
250void nvme_start_queues(struct nvme_ctrl *ctrl);
363c9aac 251
4160982e
CH
252struct request *nvme_alloc_request(struct request_queue *q,
253 struct nvme_command *cmd, unsigned int flags);
7688faa6 254void nvme_requeue_req(struct request *req);
f11bb3e2
CH
255int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
256 void *buf, unsigned bufflen);
257int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1cb3cce5
CH
258 struct nvme_completion *cqe, void *buffer, unsigned bufflen,
259 unsigned timeout);
4160982e
CH
260int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
261 void __user *ubuffer, unsigned bufflen, u32 *result,
262 unsigned timeout);
0b7f1f26
KB
263int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
264 void __user *ubuffer, unsigned bufflen,
265 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
f11bb3e2 266 u32 *result, unsigned timeout);
1c63dc66
CH
267int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
268int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
f11bb3e2 269 struct nvme_id_ns **id);
1c63dc66
CH
270int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
271int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
f11bb3e2 272 dma_addr_t dma_addr, u32 *result);
1c63dc66 273int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
f11bb3e2 274 dma_addr_t dma_addr, u32 *result);
9a0be7ab 275int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
f11bb3e2
CH
276
277struct sg_io_hdr;
278
279int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
280int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
281int nvme_sg_get_version_num(int __user *ip);
282
c4699e70 283#ifdef CONFIG_NVM
ca064085
MB
284int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
285int nvme_nvm_register(struct request_queue *q, char *disk_name);
286void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
c4699e70
KB
287#else
288static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
289{
290 return 0;
291}
292
293static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
294
295static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
296{
297 return 0;
298}
299#endif /* CONFIG_NVM */
ca064085 300
5bae7f73
CH
301int __init nvme_core_init(void);
302void nvme_core_exit(void);
303
f11bb3e2 304#endif /* _NVME_H */